From dca3779e8dae071003ebd7708bcf0516741c97f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2026 02:28:35 +0000 Subject: [PATCH 01/13] deps: Bump softprops/action-gh-release from 2 to 3 Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2 to 3. - [Release notes](https://github.com/softprops/action-gh-release/releases) - [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md) - [Commits](https://github.com/softprops/action-gh-release/compare/v2...v3) --- updated-dependencies: - dependency-name: softprops/action-gh-release dependency-version: '3' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f4ffba5cf..11915875a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -34,7 +34,7 @@ jobs: run: ./hack/build-all.sh ${{ steps.version.outputs.VERSION }} - name: Create Release - uses: softprops/action-gh-release@v2 + uses: softprops/action-gh-release@v3 with: files: | dist/ctx-* From 53ae0a8a0d20fe3a61a35f20457fb5b4b875888e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2026 02:28:44 +0000 Subject: [PATCH 02/13] deps: Bump golang.org/x/tools from 0.43.0 to 0.44.0 Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.43.0 to 0.44.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.43.0...v0.44.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-version: 0.44.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 85ba021f0..0af61fa74 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/hashicorp/raft v1.7.3 github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/spf13/cobra v1.10.2 - golang.org/x/tools v0.43.0 + golang.org/x/tools v0.44.0 google.golang.org/grpc v1.80.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -25,11 +25,11 @@ require ( github.com/mattn/go-isatty v0.0.14 // indirect github.com/spf13/pflag v1.0.10 // indirect go.etcd.io/bbolt v1.3.5 // indirect - golang.org/x/mod v0.34.0 // indirect - golang.org/x/net v0.52.0 // indirect + golang.org/x/mod v0.35.0 // indirect + golang.org/x/net v0.53.0 // indirect golang.org/x/sync v0.20.0 // indirect - golang.org/x/sys v0.42.0 // indirect - golang.org/x/text v0.35.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/text v0.36.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516 // indirect google.golang.org/protobuf v1.36.11 // indirect ) diff --git a/go.sum b/go.sum index 9047a3a2b..d84977914 100644 --- a/go.sum +++ b/go.sum @@ -170,16 +170,16 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= -golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM= +golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -205,15 +205,15 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= -golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= -golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= +golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c= +golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= From edaac81786c9379333b352dae0d55df0ae0f72bb Mon Sep 17 00:00:00 2001 From: ersan bilik Date: Fri, 10 Apr 2026 01:07:12 +0300 Subject: [PATCH 03/13] feat: full Copilot CLI skill parity with Claude integration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port all 40 Claude skills to Copilot CLI format with proper YAML frontmatter (tools array instead of allowed-tools string). Includes lifecycle hook scripts (bash + PowerShell), agent instructions, and hook configuration. Contents: - 40 skill SKILL.md files under integrations/copilot-cli/skills/ - 8 hook scripts (session-start/end, pre/post-tool-use × bash/ps1) - INSTRUCTIONS.md agent bootstrap instructions - ctx-hooks.json lifecycle hook configuration - Updated embed.go to include integrations assets - Parity spec document (specs/copilot-feature-parity-kit.md) Signed-off-by: ersan bilik Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> Signed-off-by: ersan bilik --- .../integrations/copilot-cli/INSTRUCTIONS.md | 94 +++++ .../integrations/copilot-cli/ctx-hooks.json | 70 +++- .../copilot-cli/scripts/post-tool-use.ps1 | 12 + .../copilot-cli/scripts/post-tool-use.sh | 14 + .../copilot-cli/scripts/pre-tool-use.ps1 | 11 + .../copilot-cli/scripts/pre-tool-use.sh | 15 + .../copilot-cli/scripts/session-end.ps1 | 8 + .../copilot-cli/scripts/session-end.sh | 10 + .../copilot-cli/scripts/session-start.ps1 | 5 + .../copilot-cli/scripts/session-start.sh | 10 + .../skills/_ctx-alignment-audit/SKILL.md | 61 +++ .../skills/ctx-add-convention/SKILL.md | 47 +++ .../skills/ctx-add-decision/SKILL.md | 47 +++ .../skills/ctx-add-learning/SKILL.md | 45 +++ .../copilot-cli/skills/ctx-add-task/SKILL.md | 54 +++ .../copilot-cli/skills/ctx-agent/SKILL.md | 43 ++ .../skills/ctx-architecture/SKILL.md | 69 ++++ .../copilot-cli/skills/ctx-archive/SKILL.md | 46 +++ .../skills/ctx-blog-changelog/SKILL.md | 132 +++++++ .../copilot-cli/skills/ctx-blog/SKILL.md | 57 +++ .../skills/ctx-brainstorm/SKILL.md | 132 +++++++ .../skills/ctx-check-links/SKILL.md | 136 +++++++ .../copilot-cli/skills/ctx-commit/SKILL.md | 90 +++++ .../skills/ctx-consolidate/SKILL.md | 57 +++ .../copilot-cli/skills/ctx-doctor/SKILL.md | 78 ++++ .../copilot-cli/skills/ctx-drift/SKILL.md | 237 ++--------- .../copilot-cli/skills/ctx-implement/SKILL.md | 129 ++++++ .../skills/ctx-import-plans/SKILL.md | 56 +++ .../skills/ctx-journal-enrich-all/SKILL.md | 59 +++ .../skills/ctx-journal-enrich/SKILL.md | 60 +++ .../skills/ctx-journal-normalize/SKILL.md | 46 +++ .../copilot-cli/skills/ctx-loop/SKILL.md | 62 +++ .../copilot-cli/skills/ctx-next/SKILL.md | 45 +-- .../copilot-cli/skills/ctx-pad/SKILL.md | 140 +++++++ .../copilot-cli/skills/ctx-pause/SKILL.md | 47 +++ .../skills/ctx-prompt-audit/SKILL.md | 54 +++ .../copilot-cli/skills/ctx-prompt/SKILL.md | 49 +++ .../copilot-cli/skills/ctx-recall/SKILL.md | 38 +- .../copilot-cli/skills/ctx-reflect/SKILL.md | 75 ++++ .../copilot-cli/skills/ctx-remember/SKILL.md | 69 ++++ .../copilot-cli/skills/ctx-remind/SKILL.md | 46 +++ .../copilot-cli/skills/ctx-resume/SKILL.md | 35 ++ .../skills/ctx-sanitize-permissions/SKILL.md | 67 ++++ .../skills/ctx-skill-audit/SKILL.md | 78 ++++ .../skills/ctx-skill-creator/SKILL.md | 76 ++++ .../copilot-cli/skills/ctx-spec/SKILL.md | 76 ++++ .../copilot-cli/skills/ctx-status/SKILL.md | 87 +--- .../copilot-cli/skills/ctx-verify/SKILL.md | 52 +++ .../copilot-cli/skills/ctx-worktree/SKILL.md | 170 ++++++++ .../copilot-cli/skills/ctx-wrap-up/SKILL.md | 121 ++++++ specs/copilot-feature-parity-kit.md | 373 ++++++++++++++++++ 51 files changed, 3337 insertions(+), 353 deletions(-) create mode 100644 internal/assets/integrations/copilot-cli/INSTRUCTIONS.md create mode 100644 internal/assets/integrations/copilot-cli/scripts/post-tool-use.ps1 create mode 100644 internal/assets/integrations/copilot-cli/scripts/post-tool-use.sh create mode 100644 internal/assets/integrations/copilot-cli/scripts/pre-tool-use.ps1 create mode 100644 internal/assets/integrations/copilot-cli/scripts/pre-tool-use.sh create mode 100644 internal/assets/integrations/copilot-cli/scripts/session-end.ps1 create mode 100644 internal/assets/integrations/copilot-cli/scripts/session-end.sh create mode 100644 internal/assets/integrations/copilot-cli/scripts/session-start.ps1 create mode 100644 internal/assets/integrations/copilot-cli/scripts/session-start.sh create mode 100644 internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-add-convention/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-add-decision/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-add-learning/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-add-task/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-check-links/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-import-plans/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-journal-normalize/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-prompt/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-skill-creator/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-verify/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md create mode 100644 internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md create mode 100644 specs/copilot-feature-parity-kit.md diff --git a/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md new file mode 100644 index 000000000..9419a3dc7 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md @@ -0,0 +1,94 @@ +# ctx Agent Instructions — Copilot CLI + + + + +## IMPORTANT: You Have Persistent Memory + +This project uses Context (`ctx`) for context persistence across sessions. +**Your memory is NOT ephemeral**: it lives in the context directory. + +## On Session Start + +1. **Run `ctx system bootstrap`**: CRITICAL, not optional. + This tells you where the context directory is. If it fails or returns + no context_dir, STOP and warn the user. +2. **Read AGENT_PLAYBOOK.md** from the context directory: it explains + how to use this system +3. **Run `ctx agent --budget 4000`** for a content summary + +## When Asked "Do You Remember?" + +When the user asks "Do you remember?", "What were we working on?", or any +memory-related question: + +**Do this FIRST (silently):** +- Read TASKS.md, DECISIONS.md, and LEARNINGS.md from the context directory +- Run `ctx recall list --limit 5` for recent session history + +**Then respond with a structured readback:** + +1. **Last session**: cite the most recent session topic and date +2. **Active work**: list pending or in-progress tasks +3. **Recent context**: mention 1-2 recent decisions or learnings +4. **Next step**: offer to continue or ask what to focus on + +**Never** lead with "I don't have memory", "Let me check if there are files", +or narrate your discovery process. The context files are your memory. +Read them silently, then present what you found as recall, not as a search. + +## Quick Context Load + +```bash +# Get AI-optimized context packet (what you should know) +ctx agent --budget 4000 + +# Or see full status +ctx status +``` + +## Context Files + +| File | Purpose | +|-----------------|----------------------------------------| +| CONSTITUTION.md | Hard rules — NEVER violate | +| TASKS.md | Current work items | +| DECISIONS.md | Architectural decisions with rationale | +| LEARNINGS.md | Gotchas, tips, lessons learned | +| CONVENTIONS.md | Code patterns and standards | + +All files live in the context directory reported by `ctx system bootstrap`. + +## Context Updates During Work + +Proactively update context files as you work: + +| Event | Action | +|-----------------------------|-------------------------------------| +| Made architectural decision | Add to `.context/DECISIONS.md` | +| Discovered gotcha/bug | Add to `.context/LEARNINGS.md` | +| Established new pattern | Add to `.context/CONVENTIONS.md` | +| Completed task | Mark [x] in `.context/TASKS.md` | + +## Self-Check + +Periodically ask yourself: + +> "If this session ended right now, would the next session know what happened?" + +If no — save a session file or update context files before continuing. + +## Session Persistence + +After completing meaningful work, save a session summary to +`.context/sessions/`. Use the `ctx-wrap-up` skill for the full ceremony. + +## Build Commands + +```bash +make build # or: go build ./cmd/ctx/... +make lint # or: golangci-lint run +make test # or: go test ./... +``` + + diff --git a/internal/assets/integrations/copilot-cli/ctx-hooks.json b/internal/assets/integrations/copilot-cli/ctx-hooks.json index 2aa48f448..b4ae75012 100644 --- a/internal/assets/integrations/copilot-cli/ctx-hooks.json +++ b/internal/assets/integrations/copilot-cli/ctx-hooks.json @@ -1,40 +1,68 @@ { - "version": 1, "hooks": { "sessionStart": [ { - "type": "command", - "bash": ".github/hooks/scripts/ctx-sessionStart.sh", - "powershell": ".github/hooks/scripts/ctx-sessionStart.ps1", - "cwd": ".", - "timeoutSec": 10 + "description": "Bootstrap ctx context on session start", + "command": "ctx system bootstrap" + }, + { + "description": "Load AI-optimized context packet", + "command": "ctx agent --budget 4000" } ], "preToolUse": [ { - "type": "command", - "bash": ".github/hooks/scripts/ctx-preToolUse.sh", - "powershell": ".github/hooks/scripts/ctx-preToolUse.ps1", - "cwd": ".", - "timeoutSec": 5 + "description": "Context load gate — ensure context is loaded before work", + "command": "ctx system context-load-gate" + }, + { + "description": "Block dangerous non-path ctx commands", + "matcher": "bash", + "command": "ctx system block-non-path-ctx" + }, + { + "description": "QA reminder nudge", + "matcher": "bash", + "command": "ctx system qa-reminder" } ], "postToolUse": [ { - "type": "command", - "bash": ".github/hooks/scripts/ctx-postToolUse.sh", - "powershell": ".github/hooks/scripts/ctx-postToolUse.ps1", - "cwd": ".", - "timeoutSec": 5 + "description": "Post-commit context persistence check", + "matcher": "bash", + "command": "ctx system post-commit" + }, + { + "description": "Check if a task was just completed", + "matcher": "edit", + "command": "ctx system check-task-completion" + }, + { + "description": "Check if a task was just completed (write)", + "matcher": "write", + "command": "ctx system check-task-completion" } ], "sessionEnd": [ { - "type": "command", - "bash": ".github/hooks/scripts/ctx-sessionEnd.sh", - "powershell": ".github/hooks/scripts/ctx-sessionEnd.ps1", - "cwd": ".", - "timeoutSec": 15 + "description": "Check context size for budget drift", + "command": "ctx system check-context-size" + }, + { + "description": "Persistence check — unsaved decisions/learnings", + "command": "ctx system check-persistence" + }, + { + "description": "Journal export check", + "command": "ctx system check-journal" + }, + { + "description": "Version freshness check", + "command": "ctx system check-version" + }, + { + "description": "Heartbeat — record session activity", + "command": "ctx system heartbeat" } ] } diff --git a/internal/assets/integrations/copilot-cli/scripts/post-tool-use.ps1 b/internal/assets/integrations/copilot-cli/scripts/post-tool-use.ps1 new file mode 100644 index 000000000..4dceed315 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/post-tool-use.ps1 @@ -0,0 +1,12 @@ +# ctx post-tool-use hook for Copilot CLI (PowerShell) +# Checks for post-commit context and task completion + +$Tool = $args[0] + +if ($Tool -eq "bash" -or $Tool -eq "powershell") { + try { ctx system post-commit 2>$null } catch {} +} + +if ($Tool -eq "edit" -or $Tool -eq "write") { + try { ctx system check-task-completion 2>$null } catch {} +} diff --git a/internal/assets/integrations/copilot-cli/scripts/post-tool-use.sh b/internal/assets/integrations/copilot-cli/scripts/post-tool-use.sh new file mode 100644 index 000000000..7d9ccb906 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/post-tool-use.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# ctx post-tool-use hook for Copilot CLI +# Checks for post-commit context and task completion +set -euo pipefail + +TOOL="${1:-}" + +if [ "$TOOL" = "bash" ] || [ "$TOOL" = "powershell" ]; then + ctx system post-commit 2>/dev/null || true +fi + +if [ "$TOOL" = "edit" ] || [ "$TOOL" = "write" ]; then + ctx system check-task-completion 2>/dev/null || true +fi diff --git a/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.ps1 b/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.ps1 new file mode 100644 index 000000000..c7fed6f7b --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.ps1 @@ -0,0 +1,11 @@ +# ctx pre-tool-use hook for Copilot CLI (PowerShell) +# Ensures context is loaded and blocks dangerous commands + +$Tool = $args[0] + +try { ctx system context-load-gate 2>$null } catch {} + +if ($Tool -eq "bash" -or $Tool -eq "powershell") { + try { ctx system block-non-path-ctx 2>$null } catch {} + try { ctx system qa-reminder 2>$null } catch {} +} diff --git a/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.sh b/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.sh new file mode 100644 index 000000000..cd9cd926c --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/pre-tool-use.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# ctx pre-tool-use hook for Copilot CLI +# Ensures context is loaded and blocks dangerous commands +set -euo pipefail + +TOOL="${1:-}" + +# Always check context load gate +ctx system context-load-gate 2>/dev/null || true + +# Bash-specific hooks +if [ "$TOOL" = "bash" ] || [ "$TOOL" = "powershell" ]; then + ctx system block-non-path-ctx 2>/dev/null || true + ctx system qa-reminder 2>/dev/null || true +fi diff --git a/internal/assets/integrations/copilot-cli/scripts/session-end.ps1 b/internal/assets/integrations/copilot-cli/scripts/session-end.ps1 new file mode 100644 index 000000000..8b840f2bb --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/session-end.ps1 @@ -0,0 +1,8 @@ +# ctx session end hook for Copilot CLI (PowerShell) +# Checks for unsaved context and records heartbeat + +try { ctx system check-context-size 2>$null } catch {} +try { ctx system check-persistence 2>$null } catch {} +try { ctx system check-journal 2>$null } catch {} +try { ctx system check-version 2>$null } catch {} +try { ctx system heartbeat 2>$null } catch {} diff --git a/internal/assets/integrations/copilot-cli/scripts/session-end.sh b/internal/assets/integrations/copilot-cli/scripts/session-end.sh new file mode 100644 index 000000000..776ddcfd5 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/session-end.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# ctx session end hook for Copilot CLI +# Checks for unsaved context and records heartbeat +set -euo pipefail + +ctx system check-context-size 2>/dev/null || true +ctx system check-persistence 2>/dev/null || true +ctx system check-journal 2>/dev/null || true +ctx system check-version 2>/dev/null || true +ctx system heartbeat 2>/dev/null || true diff --git a/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 b/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 new file mode 100644 index 000000000..8cbbd22fb --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 @@ -0,0 +1,5 @@ +# ctx session start hook for Copilot CLI (PowerShell) +# Bootstraps context and loads the agent packet + +try { ctx system bootstrap 2>$null } catch {} +try { ctx agent --budget 4000 2>$null } catch {} diff --git a/internal/assets/integrations/copilot-cli/scripts/session-start.sh b/internal/assets/integrations/copilot-cli/scripts/session-start.sh new file mode 100644 index 000000000..1206e14c3 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/scripts/session-start.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# ctx session start hook for Copilot CLI +# Bootstraps context and loads the agent packet +set -euo pipefail + +# Bootstrap ctx context +ctx system bootstrap 2>/dev/null || true + +# Load AI-optimized context packet +ctx agent --budget 4000 2>/dev/null || true diff --git a/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md new file mode 100644 index 000000000..26dd5576d --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md @@ -0,0 +1,61 @@ +--- +name: _ctx-alignment-audit +description: "Audit alignment between docs and agent instructions. Use when docs make claims about agent behavior that may not be backed by the playbook or skills." +tools: [bash, read, glob, grep] +--- + +Audit whether behavioral claims in documentation are backed by +actual agent instructions. + +## When to Use + +- After writing or updating documentation +- After modifying the Agent Playbook or skills +- When a doc makes claims about proactive agent behavior +- Periodically to catch drift between docs and instructions + +## When NOT to Use + +- For code-level drift (use `ctx-drift` instead) +- For context file staleness (use `ctx-status`) +- When reviewing docs for prose quality (not behavioral claims) + +## Process + +### Step 1: Collect Claims + +Read target docs. Extract every behavioral claim — statements +describing what an agent "will do", "may do", or "offers to do". + +### Step 2: Trace Each Claim + +Search for matching instructions in: +1. **AGENT_PLAYBOOK.md**: primary behavioral source +2. **skills/*/SKILL.md**: skill-specific instructions +3. **INSTRUCTIONS.md**: project-level instructions + +For each claim, determine: +- **Covered**: matching instruction exists +- **Partial**: related but incomplete +- **Gap**: no instruction exists + +### Step 3: Report + +| Claim (file:line) | Status | Backing instruction | Gap | +|---|---|---|---| +| "agent creates tasks" | Gap | None | Not taught | +| "agent saves learnings" | Covered | Playbook | — | + +### Step 4: Fix (if requested) + +For each gap, propose: +- **Playbook addition**: if behavior applies broadly +- **Skill addition**: if specific to one skill +- **Doc correction**: if the claim overpromises + +## Quality Checklist + +- [ ] Every behavioral claim was traced +- [ ] Each claim has clear status (Covered/Partial/Gap) +- [ ] Gaps have proposed fixes +- [ ] No new claims introduced without backing diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-add-convention/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-add-convention/SKILL.md new file mode 100644 index 000000000..dcf929503 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-add-convention/SKILL.md @@ -0,0 +1,47 @@ +--- +name: ctx-add-convention +description: "Record a coding convention. Use when a repeated pattern should be codified so all sessions follow it consistently." +tools: [bash] +--- + +Record a coding convention in CONVENTIONS.md. + +## When to Use + +- When a pattern has been used 2-3 times and should be standardized +- When establishing a naming, formatting, or structural rule +- When a new contributor would need to know "how we do things here" +- When the user says "codify that" or "make that a convention" + +## When NOT to Use + +- One-off implementation details (use code comments instead) +- Architectural decisions with trade-offs (use `ctx-add-decision`) +- Debugging insights or gotchas (use `ctx-add-learning`) +- Rules that are already enforced by linters or formatters + +## Gathering Information + +Conventions are simpler than decisions or learnings. You need: + +1. **Name**: What is the convention called? +2. **Rule**: What is the rule? One clear sentence. +3. **Section**: Where does it belong in CONVENTIONS.md? + +If the user provides only a description, infer the section from the +topic. Check existing sections in CONVENTIONS.md first to place it +correctly: don't create a new section if an existing one fits. + +## Execution + +```bash +ctx add convention "Use kebab-case for all CLI flag names" --section "Naming" +``` + +## Quality Checklist + +- [ ] The rule is clear enough that someone unfamiliar could follow it +- [ ] It is specific to this project (not a general rule) +- [ ] It is not already in CONVENTIONS.md (check first) +- [ ] The section matches an existing section, or a new one is needed +- [ ] It describes a pattern, not a one-time choice (that's a decision) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-add-decision/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-add-decision/SKILL.md new file mode 100644 index 000000000..82f3698f0 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-add-decision/SKILL.md @@ -0,0 +1,47 @@ +--- +name: ctx-add-decision +description: "Record architectural decision. Use when a trade-off is resolved or a non-obvious design choice is made that future sessions need to know." +tools: [bash] +--- + +Record an architectural decision in DECISIONS.md. + +## When to Use + +- After resolving a trade-off between alternatives +- When making a non-obvious design choice +- When the "why" behind a choice needs to be preserved + +## When NOT to Use + +- Minor implementation details (use code comments instead) +- Routine maintenance or bug fixes +- When there was no real alternative to consider + +## Decision Formats + +### Quick Format (Y-Statement) + +> "In the context of **[situation]**, facing **[constraint]**, we decided +> for **[choice]** and against **[alternatives]**, to achieve +> **[benefit]**, accepting that **[trade-off]**." + +### Full Format + +Gather: Context, Alternatives, Decision, Rationale, Consequence. + +## Execution + +```bash +ctx add decision "Use Cobra for CLI framework" \ + --context "Need CLI framework for Go project" \ + --rationale "Better subcommand support, team familiarity" \ + --consequence "More boilerplate, but clearer command structure" +``` + +## Quality Checklist + +- [ ] Context explains the problem clearly +- [ ] At least one alternative was considered +- [ ] Rationale addresses why alternatives were rejected +- [ ] Consequence includes both benefits and trade-offs diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-add-learning/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-add-learning/SKILL.md new file mode 100644 index 000000000..7ac679724 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-add-learning/SKILL.md @@ -0,0 +1,45 @@ +--- +name: ctx-add-learning +description: "Record a learning. Use when discovering gotchas, bugs, or unexpected behavior that future sessions should know about." +tools: [bash] +--- + +Record a learning in LEARNINGS.md. + +## Before Recording + +Three questions: if any answer is "no", don't record: + +1. **"Could someone Google this in 5 minutes?"** → If yes, skip it +2. **"Is this specific to this codebase?"** → If no, skip it +3. **"Did it take real effort to discover?"** → If no, skip it + +Learnings should capture **principles and heuristics**, not code snippets. + +## When to Use + +- After discovering a gotcha or unexpected behavior +- When a debugging session reveals root cause +- When finding a pattern that will help future work + +## When NOT to Use + +- General programming knowledge (not specific to this project) +- One-off workarounds that won't recur +- Things already documented in the codebase + +## Execution + +```bash +ctx add learning "Title" \ + --context "What were you doing when you discovered this?" \ + --lesson "What's the key insight?" \ + --application "How should we handle this going forward?" +``` + +## Quality Checklist + +- [ ] Context explains what happened (not just what you learned) +- [ ] Lesson is a principle, not a code snippet +- [ ] Application gives actionable guidance for next time +- [ ] Not already in LEARNINGS.md (check first) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-add-task/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-add-task/SKILL.md new file mode 100644 index 000000000..7090eaabe --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-add-task/SKILL.md @@ -0,0 +1,54 @@ +--- +name: ctx-add-task +description: "Add a task. Use when follow-up work is identified or when breaking down complex work into subtasks." +tools: [bash] +--- + +Add a task to TASKS.md. + +## Before Recording + +Three questions: if any answer is "no", don't record: + +1. **"Is this actionable?"** → If it's a vague wish, clarify first +2. **"Would someone else know what to do?"** → If not, add more detail +3. **"Is this tracked elsewhere?"** → If yes, don't duplicate + +Tasks should describe **what to do and why**, not just a topic. + +## When to Use + +- When follow-up work is identified during a session +- When breaking down a complex task into subtasks +- When the user mentions something that should be tracked + +## When NOT to Use + +- Vague ideas without clear scope (discuss first, then add) +- Work already completed (mark existing tasks done instead) +- One-line fixes you can do right now (just do it) + +## Execution + +```bash +ctx add task "Task description" [--priority high|medium|low] [--section "Phase N"] +``` + +**Good examples:** +```bash +ctx add task "Add --cooldown flag to ctx agent" --priority medium +ctx add task "Investigate ctx init overwriting user content" --priority high +``` + +**Bad examples (too shallow):** +```bash +ctx add task "Fix bug" # What bug? Where? +ctx add task "Improve performance" # Of what? How? +``` + +## Quality Checklist + +- [ ] Task starts with a verb (Add, Fix, Implement, Investigate, Update) +- [ ] Someone unfamiliar with the session could act on it +- [ ] Not a duplicate of an existing task in TASKS.md (check first) +- [ ] Priority set if the user indicated urgency diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md new file mode 100644 index 000000000..2475bc6ba --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md @@ -0,0 +1,43 @@ +--- +name: ctx-agent +description: "Load full context packet. Use at session start or when context seems stale or incomplete." +tools: [bash] +--- + +Load the full context packet for AI consumption. + +## When to Use + +- At the start of a session to load all context +- When context seems stale or incomplete +- When switching between different areas of work + +## When NOT to Use + +- The session start hook already runs `ctx agent` automatically: + you rarely need to invoke this manually +- Don't run it just to "refresh" if you already have context loaded + +## After Loading + +**Read the files listed in "Read These Files (in order)"**: the +packet is a summary, not a substitute. In particular, read +CONVENTIONS.md before writing any code. + +Confirm to the user: "I have read the required context files and +I'm following project conventions." + +## Flags + +| Flag | Default | Description | +|--------------|---------|-----------------------------------------------| +| `--budget` | 8000 | Token budget for context packet | +| `--format` | md | Output format: `md` or `json` | +| `--cooldown` | 10m | Suppress repeated output within this duration | +| `--session` | (none) | Session ID for cooldown isolation | + +## Execution + +```bash +ctx agent --budget 4000 +``` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md new file mode 100644 index 000000000..0667d8c02 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md @@ -0,0 +1,69 @@ +--- +name: ctx-architecture +description: "Build and maintain ARCHITECTURE.md and DETAILED_DESIGN.md. Use when working on structure, adding packages, or tracing flow." +tools: [bash, read, write, glob, grep] +--- + +Build and maintain architecture documentation with incremental +coverage tracking. + +## When to Use + +- Working on system structure or adding packages +- Tracing data flow across the codebase +- Onboarding to understand the system +- After significant structural changes + +## When NOT to Use + +- For code-level changes within a single package +- When ARCHITECTURE.md is already up-to-date +- For documentation-only projects + +## Process + +### 1. Scan the codebase + +```bash +ctx status +``` + +Read the existing ARCHITECTURE.md if it exists. +Scan the directory tree to identify: +- Top-level packages and their responsibilities +- Data flow between components +- External dependencies + +### 2. Build or update ARCHITECTURE.md + +Structure: +- **Overview**: 2-3 sentence system description +- **Package Map**: table of packages → responsibilities +- **Data Flow**: how data moves through the system +- **Key Interfaces**: important boundaries +- **Dependencies**: external deps and why they're used + +### 3. Build DETAILED_DESIGN.md (optional) + +Deeper dive into internals for complex packages: +- Function-level documentation +- State machines +- Error handling patterns +- Concurrency model + +### 4. Coverage tracking + +Track which packages have been documented: + +``` +Coverage: 18/24 packages documented (75%) +Missing: internal/hub, internal/crypto, ... +``` + +## Quality Checklist + +- [ ] Every top-level package mentioned +- [ ] Data flow is traceable end-to-end +- [ ] External dependencies listed with rationale +- [ ] Coverage percentage reported +- [ ] No stale references to removed packages diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md new file mode 100644 index 000000000..90920e161 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md @@ -0,0 +1,46 @@ +--- +name: ctx-archive +description: "Archive completed tasks. Use when TASKS.md has many completed items cluttering the view." +tools: [bash] +--- + +Move completed tasks from TASKS.md to the archive. + +## Before Archiving + +1. **"Are the completed tasks cluttering the view?"** → If TASKS.md is + still easy to scan, there's no urgency +2. **"Are all `[x]` items truly done?"** → Verify nothing was checked + off prematurely + +## When to Use + +- When TASKS.md has many completed `[x]` tasks +- When the task list is hard to navigate +- Periodically to keep context clean + +## When NOT to Use + +- When there are only a few completed tasks +- When you're unsure if tasks are truly complete (verify first) +- **Never delete tasks**: only archive (CONSTITUTION invariant) + +## Execution + +```bash +# Preview first (recommended) +ctx tasks archive --dry-run + +# Archive after confirming the preview +ctx tasks archive +``` + +Archived tasks go to `archive/tasks-YYYY-MM-DD.md` in the context +directory, preserving Phase headers for traceability. + +## Quality Checklist + +- [ ] Previewed with `--dry-run` before archiving +- [ ] All archived items are truly complete +- [ ] No tasks were deleted (only archived) +- [ ] Reported how many tasks were archived diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md new file mode 100644 index 000000000..3366a7625 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md @@ -0,0 +1,132 @@ +--- +name: ctx-blog-changelog +description: "Generate themed blog post from commits. Use when writing about changes between releases or documenting a development arc." +tools: [bash, read, write, edit, glob, grep] +--- + +Generate a blog post about changes since a specific commit, with a given theme. + +## Before Writing + +Two questions; if any answer is "no", reconsider: + +1. **"Is there enough change to tell a story?"** → A handful of typo + fixes doesn't warrant a post +2. **"Is the theme clear?"** → If the commit range covers unrelated + work, narrow the scope or split into multiple posts + +## When to Use + +- When documenting changes between releases +- When writing about a development arc or theme +- When the user wants to explain "what changed and why" + +## When NOT to Use + +- For general project updates without a commit range (use `/ctx-blog`) +- When the changes are minor or routine maintenance +- When there's no unifying theme across the commits + +## Input + +Required: +- **Commit hash**: Starting point (e.g., `040ce99`, `HEAD~50`, `v0.1.0`) +- **Theme**: The narrative angle (e.g., "human-assisted refactoring", + "the recall system") + +Optional: +- **Reference post**: An existing post to match the style + +## Process + +1. **Analyze the commit range**: +```bash +git --no-pager log --oneline ..HEAD +git --no-pager diff --stat ..HEAD +git --no-pager log --format="%s" ..HEAD | head -50 +``` + +2. **Gather supporting context**: +```bash +# Files most changed +git --no-pager diff --stat ..HEAD | sort -t'|' -k2 -rn | head -20 + +# Journal entries from this period +ctx recall list +``` + +3. **Draft the narrative** following the theme +4. Save to `docs/blog/YYYY-MM-DD-slug.md` +5. **Update `docs/blog/index.md`** with an entry at the top: + +```markdown +### [Post Title](YYYY-MM-DD-slug.md) + +*Author / Date* + +2-3 sentence blurb. + +**Topics**: topic-one, topic-two, topic-three + +--- +``` + +## Blog Structure + +### Frontmatter + +```yaml +--- +title: "[Theme]: [Specific Angle]" +date: YYYY-MM-DD +author: [Ask user] +topics: + - topic-one + - topic-two + - topic-three +--- +``` + +### Body + +```markdown +# [Title] + +![ctx](../images/ctx-banner.png) + +> [Hook related to theme] + +## The Starting Point +[State of codebase at , what prompted the change] + +## The Journey +[Narrative of changes, organized by theme not chronology] + +## Before and After +[Comparison table or code diff showing improvement] + +## Key Commits + +| Commit | Change | +|--------|-------------| +| abc123 | Description | + +## Lessons Learned +[Insights from this work] + +## What's Next +[Future work enabled by these changes] +``` + +## Style Guidelines + +- **Personal voice**: Use "I", "we", share the journey +- **Show don't tell**: Include actual code, commits, diffs +- **Tables for comparisons**: Before/after, key commits +- **Honest about failures**: Include what went wrong and why +- **Concrete examples**: Reference specific files, commits, decisions +- **No em-dashes**: Use `:`, `;`, or restructure the sentence instead +- **Straight quotes only**: Use "dumb quotes" (`"`, `'`), never + typographic/curly quotes +- **80-character line width**: Wrap prose at ~80 characters; exceptions + for tables, code blocks, and URLs diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md new file mode 100644 index 000000000..bae7e5adf --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md @@ -0,0 +1,57 @@ +--- +name: ctx-blog +description: "Generate blog post drafts from project activity. Use to communicate progress, decisions, or technical insights." +tools: [bash, read, write] +--- + +Generate blog post drafts from project activity. + +## When to Use + +- After completing a significant feature +- When a decision or learning is worth sharing publicly +- For project updates and changelogs +- When the user says "write a blog post about..." + +## When NOT to Use + +- For internal context (use learnings/decisions instead) +- When there's nothing noteworthy to share + +## Process + +### 1. Gather material + +- Recent commits: `git log --oneline -20` +- Recent decisions from DECISIONS.md +- Recent learnings from LEARNINGS.md +- Completed tasks from TASKS.md + +### 2. Identify the narrative + +What's the story? Options: +- Feature announcement +- Technical deep-dive +- Lessons learned +- Project update / changelog + +### 3. Draft the post + +Structure: +- **Title**: clear and engaging +- **Introduction**: what and why (2-3 sentences) +- **Body**: the story with technical details +- **Conclusion**: what's next + +### 4. Write to blog directory + +Target: `site/blog/{date}-{slug}/index.html` or +`docs/blog/{date}-{slug}.md` per project convention. + +## Quality Checklist + +- [ ] Title is clear and engaging +- [ ] Technical accuracy verified +- [ ] No sensitive information exposed +- [ ] Proper frontmatter/metadata +- [ ] Links to relevant specs/docs where appropriate diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md new file mode 100644 index 000000000..e2b5ba45e --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md @@ -0,0 +1,132 @@ +--- +name: ctx-brainstorm +description: "Design before implementation. Use before any creative or constructive work to transform vague ideas into validated designs." +tools: [bash, read, write] +--- + +Transform raw ideas into **clear, validated designs** through +structured dialogue **before any implementation begins**. + +## Before Brainstorming + +1. **Check if design is needed**: is the change complex enough? +2. **Review prior art**: check `.context/DECISIONS.md` for related + past decisions +3. **Identify what exists**: read relevant code before asking + questions the codebase already answers + +## When to Use + +- Before implementing a new feature +- Before architectural changes +- Before significant behavior modifications +- When an idea is vague and needs shaping + +## When NOT to Use + +- Bug fixes with clear solutions +- Routine maintenance tasks +- Well-defined requirements +- Small, isolated changes +- When the user explicitly wants to jump to code + +## Operating Mode + +Design facilitator, not builder. No implementation while +brainstorming. + +## The Process + +### 1. Understand Current Context + +Before asking questions: +- Review project state, docs, prior decisions +- Identify what exists vs what is proposed +- Note implicit constraints + +### 2. Clarify the Idea + +Rules: +- Ask **one question per message** +- Prefer **multiple-choice** when possible + +Focus on: +- Purpose: why does this need to exist? +- Users: who benefits? +- Constraints: what limits apply? +- Success criteria: how do we know it works? +- Non-goals: what is explicitly out of scope? + +### 3. Non-Functional Requirements + +Clarify or propose assumptions for: +- Performance, scale, security, reliability, maintenance + +### 4. Understanding Lock (Gate) + +Before proposing any design, provide: + +**Understanding Summary** (5-7 bullets): +- What is being built, why, for whom, constraints, non-goals + +**Assumptions**: list all explicitly. + +**Open Questions**: list unresolved items. + +> "Does this accurately reflect your intent? Confirm or correct +> before we move to design." + +**Do NOT proceed until confirmed.** + +### 5. Explore Design Approaches + +- Propose **2-3 viable approaches** +- Lead with recommended option +- Explain trade-offs + +### 6. Stress-Test the Chosen Approach + +After the user picks an approach: +- Surface assumptions and dependencies +- Identify failure modes +- Steel-man an alternative + +> "These are the risks I see. Do they change your preference?" + +### 7. Present the Design + +Break into digestible sections. Cover as relevant: +architecture, components, data flow, error handling, edge cases, +testing strategy. + +### 8. Decision Log + +Maintain a running log: + +| Decision | Alternatives | Rationale | +|----------|--------------|-----------| +| ... | ... | ... | + +## After the Design + +### Persist to Context + +```bash +ctx add decision "..." --context "..." --rationale "..." +``` + +### Implementation Handoff + +Only after documentation, ask: +> "Ready to begin implementation?" + +## Quality Checklist + +Exit brainstorming **only when**: +- [ ] Understanding Lock confirmed by the user +- [ ] At least one design approach accepted +- [ ] Stress-test completed +- [ ] Major assumptions documented +- [ ] Key risks acknowledged +- [ ] Decision Log complete +- [ ] Decisions persisted to context diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-check-links/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-check-links/SKILL.md new file mode 100644 index 000000000..dac6a0e72 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-check-links/SKILL.md @@ -0,0 +1,136 @@ +--- +name: ctx-check-links +description: "Audit docs for dead links. Use before releases, after restructuring docs, or when running a documentation audit." +tools: [bash, read, glob, grep] +--- + +Scan markdown files for broken links. Two passes: +internal (file targets) and external (HTTP URLs). + +## Scope Discovery + +Determine which directories to scan: + +1. If the user specifies a path, use that +2. Otherwise, glob for common doc directories: `docs/`, `doc/`, + `documentation/`, `site/` +3. If none exist, fall back to scanning all `.md` files in the + project root (excluding `node_modules/`, `.git/`, `vendor/`) + +Report which directories are being scanned at the start of output. + +## When to Use + +- Before releases or doc deployments +- After renaming, moving, or deleting doc pages +- After restructuring documentation directories or nav +- When `/_ctx-audit` runs (audit check #12) +- When a user reports a 404 on the site + +## When NOT to Use + +- When editing a single doc (just eyeball links in that file) +- When offline and only external checks would matter + +## Execution + +### Pass 1: Internal Links + +Scan every `.md` file in the discovered scope for markdown links +pointing to other files: `[text](target.md)`, +`[text](../path/file.md)`, `[text](path/file.md#anchor)`. + +For each link: + +1. Resolve the target **relative to the source file's directory** +2. Strip any `#anchor` fragment before checking file existence +3. Skip external URLs (`http://`, `https://`, `mailto:`) +4. Skip bare anchors (`#section-name`): these are intra-page +5. Verify the target file exists on disk + +Collect all broken internal links as: + +``` +BROKEN: source-file.md:LINE → target.md (file not found) +``` + +### Pass 2: External Links + +Scan every `.md` file in the discovered scope for `http://` and +`https://` URLs in markdown link syntax. + +For each URL: + +1. Send an HTTP HEAD request with a 10-second timeout +2. If HEAD fails or returns 405, retry with GET +3. Record the HTTP status code + +Report failures as: + +``` +WARN: source-file.md:LINE → https://example.com (HTTP 404) +WARN: source-file.md:LINE → https://example.com (timeout) +``` + +**Do not treat external failures as errors.** Network partitions, +rate limiting, and transient outages are common. Report them but +do not fail the check. + +Exceptions: skip these URLs: +- `localhost` / `127.0.0.1` URLs (local dev servers) +- `example.com` / `example.org` (placeholder domains) + +### Pass 3: Image References + +Scan for image links: `![alt](path/to/image.png)` and +`![alt](images/file.jpg)`. + +Verify the image file exists on disk. Same resolution rules as +internal links. + +## Output Format + +``` +## Link Check Report + +### Internal Links +- N broken links found (or "All clear") +- [list of broken links with file:line and target] + +### External Links +- N warnings (or "All reachable") +- [list of failures with file:line, URL, and reason] + +### Images +- N missing images (or "All present") +- [list of missing images with file:line and target] + +### Summary +Internal: N broken / M total +External: N unreachable / M total +Images: N missing / M total +``` + +## Fixing + +For broken internal links, offer specific fixes: + +- If the target was renamed, suggest the new path +- If the target was deleted, suggest removing the link or + pointing to an alternative +- If the target is a typo (close match exists), suggest the + correction + +For external links, just report. The user decides whether to +update, remove, or ignore. + +## Quality Checklist + +After running the check: +- [ ] All `.md` files in the discovered scope were scanned +- [ ] Relative path resolution accounts for subdirectories +- [ ] Anchors stripped before file existence check +- [ ] External check used timeouts (not hanging on slow hosts) +- [ ] localhost/example URLs were skipped +- [ ] Report distinguishes errors (internal) from warnings + (external) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md new file mode 100644 index 000000000..aa63466a4 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md @@ -0,0 +1,90 @@ +--- +name: ctx-commit +description: "Commit with context persistence. Use instead of raw git commit to capture decisions and learnings alongside code changes." +tools: [bash, read, write, edit] +--- + +Commit code changes, then prompt for decisions and learnings +worth persisting. Bridges the gap between committing code and +recording the context behind it. + +## When to Use + +- When committing after meaningful work (feature, bugfix, + refactor) +- When the commit involves a design choice or trade-off +- When the user says "commit" or "commit this" + +## When NOT to Use + +- For trivial commits (typo, formatting): just commit normally +- When the user explicitly says "just commit, no context" +- When nothing has changed + +## Process + +### 1. Pre-commit checks + +Unless the user says "skip checks": + +- Run `git diff --name-only` to see what changed +- If Go files changed, run `go build ./cmd/ctx/...` +- If build fails, stop and report: do not commit broken code + +### 2. Stage and commit + +- Review unstaged changes with `git status` +- Stage relevant files (prefer specific files over `git add -A`) +- Craft a concise commit message: + - If the user provided a message, use it + - If not, draft one based on the changes +- Commit with trailers as required by project conventions + +### 3. Context prompt + +After a successful commit, ask the user: + +> **Any context to capture?** +> +> - **Decision**: Did you make a design choice or trade-off? +> - **Learning**: Did you hit a gotcha or discover something? +> - **Neither**: No context to capture. + +If they provide a decision or learning, record it: + +```bash +ctx add decision "..." +ctx add learning --context "..." --lesson "..." --application "..." +``` + +### 4. Doc drift check + +If committed files include source code that could affect docs, +offer to check for doc drift. + +### 5. Reflect (optional) + +If the commit represents a significant milestone, suggest: + +> This looks like a good checkpoint. Want me to run a quick +> reflection to capture the bigger picture? + +## Commit Message Style + +- Focus on **why**, not what (the diff shows what) +- Concise (1-2 sentences) +- Follow the repository's existing commit style +- Include required trailers (Spec:, Co-authored-by:, etc.) + +## Quality Checklist + +Before committing: +- [ ] Build passes (if Go files changed) +- [ ] Commit message explains the why +- [ ] No secrets in staged changes +- [ ] Specific files staged (not blind `git add -A`) + +After committing: +- [ ] Context prompt was presented +- [ ] Any decisions/learnings were recorded +- [ ] Doc drift check offered if source code changed diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md new file mode 100644 index 000000000..4a36f7280 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md @@ -0,0 +1,57 @@ +--- +name: ctx-consolidate +description: "Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Use when context files grow large with overlapping entries." +tools: [bash, read, write, edit] +--- + +Find and merge duplicate or overlapping entries in context files. + +## When to Use + +- Context files have grown large (50+ entries) +- Multiple entries cover the same topic from different sessions +- After a long project phase where many similar learnings accumulated +- When `ctx status` shows high token counts for context files + +## When NOT to Use + +- Files are small and manageable +- Entries are all distinct +- Just after a fresh `ctx init` + +## Process + +### 1. Read the target file + +Read the full content of the file to consolidate +(LEARNINGS.md or DECISIONS.md). + +### 2. Identify clusters + +Group entries by topic. Look for: +- Same subject with different wording +- Entries that build on each other chronologically +- Contradictory entries (later one supersedes) + +### 3. Propose merges + +For each cluster, propose a consolidated entry: + +> **Cluster: [topic]** (N entries → 1) +> - Entry A: "..." +> - Entry B: "..." +> - **Merged**: "..." +> +> Approve? + +### 4. Apply approved merges + +Replace the cluster entries with the merged version. +Archive originals if requested. + +## Quality Checklist + +- [ ] No information lost in merges +- [ ] Contradictions resolved (latest wins) +- [ ] User approved each merge +- [ ] File is valid markdown after edits diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md new file mode 100644 index 000000000..710f38f31 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md @@ -0,0 +1,78 @@ +--- +name: ctx-doctor +description: "Troubleshoot ctx behavior. Runs structural health checks, analyzes event log patterns, and presents findings with suggested actions." +tools: [bash, read] +--- + +Diagnose ctx problems by combining structural health checks with +event log analysis. + +## When to Use + +- User says "doctor", "diagnose", "troubleshoot", "health check" +- User asks "why didn't my hook fire?" +- User says "hooks seem broken" or "context seems stale" + +## When NOT to Use + +- User wants a quick status check (use `ctx-status`) +- User wants to fix drift (use `ctx-drift`) +- User wants to pause hooks (use `ctx-pause`) + +## Diagnostic Playbook + +### Phase 1: Structural Baseline + +```bash +ctx doctor --json +``` + +Parse the JSON output. Note any warnings or errors. + +### Phase 2: Event Log Analysis (if available) + +```bash +ctx system events --json --last 100 +``` + +For specific hooks: +```bash +ctx system events --hook --json --last 20 +``` + +If event logging is not enabled, note: "Enable `event_log: true` +in `.ctxrc` for hook-level diagnostics." + +### Phase 3: Targeted Investigation + +Based on findings, check: +- Hook config for hook registration +- Custom messages: `ctx system message list` +- RC config: read `.ctxrc` +- Reminders: `ctx remind list` + +### Phase 4: Present Findings + +``` +## Doctor Report + +### Structural health +- Summarize ctx doctor results + +### Event analysis (if available) +- Patterns, gaps, or anomalies + +### Suggested actions +- [ ] Actionable items based on findings +``` + +### Phase 5: Suggest, Don't Fix + +Present actionable next steps but do NOT auto-fix anything. + +## Quality Checklist + +- [ ] Ran `ctx doctor` for structural checks +- [ ] Checked event log if available +- [ ] Presented findings in structured format +- [ ] Suggested actions without auto-applying diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md index 59cd6f907..c678e8b3d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md @@ -1,250 +1,59 @@ --- name: ctx-drift description: "Detect and fix context drift. Use to find stale paths, broken references, and constitution violations in context files." +tools: [bash, read, write, edit, glob, grep] --- Detect context drift at two layers: **structural** (stale paths, missing files, constitution violations) via `ctx drift`, and **semantic** (outdated conventions, superseded decisions, -irrelevant learnings) via agent analysis. The semantic layer is -where the real value is: the CLI cannot do it. +irrelevant learnings) via agent analysis. ## When to Use - At session start to verify context health before working - After refactors, renames, or major structural changes -- When the user asks "is our context clean?", "anything - stale?", or "check for drift" -- Proactively when you notice a path in ARCHITECTURE.md or - CONVENTIONS.md that does not match the actual file tree -- Before a release or milestone to ensure context is accurate +- When the user asks "is our context clean?" or "check for drift" +- Before a release or milestone ## When NOT to Use -- When you just ran `/ctx-status` and everything looked fine - (status already shows drift warnings) -- Repeatedly in the same session without changes in between -- When the user is mid-flow on a task; do not interrupt with - unsolicited maintenance - -## Usage Examples - -```text -/ctx-drift -/ctx-drift (after the refactor) -``` +- When you just ran status and everything looked fine +- Repeatedly in the same session without changes +- Mid-flow when the user is focused on a task ## Execution -Drift detection has two layers: **structural** (programmatic) and -**semantic** (agent-driven). Always do both. - ### Layer 1: Structural Checks -Run the CLI tool for fast, programmatic checks: - ```bash ctx drift ``` -This catches dead paths, missing files, staleness indicators, -and constitution violations. These are necessary but insufficient: -they only detect structural problems. +Catches dead paths, missing files, staleness indicators. ### Layer 2: Semantic Analysis -After the structural check, read the context files yourself and -compare them to what you know about the codebase. This is where -you add real value: the CLI tool cannot do this. - -Check for: - -- **Outdated conventions**: Does CONVENTIONS.md describe patterns - the code no longer follows? Read a few source files in the - relevant area to verify. -- **Superseded decisions**: Does DECISIONS.md contain entries that - were implicitly overridden by later work? Look for decisions - whose rationale no longer applies. -- **Stale architecture descriptions**: Does ARCHITECTURE.md - describe module purposes that have changed? A path can still - exist while its description is wrong. -- **Irrelevant learnings**: Does LEARNINGS.md contain entries - about bugs that were since fixed or patterns that no longer - apply? -- **Contradictions**: Do any context files contradict each other - or contradict the actual code? - -### Reporting - -After both layers, do **not** dump raw output. Instead: - -1. **Summarize findings** by severity (structural warnings, - semantic issues) in plain language -2. **Explain each finding**: what file, what line, why it - matters -3. **Distinguish structural from semantic**: structural issues - can be auto-fixed; semantic issues need the user's judgment -4. **Offer to auto-fix** structural issues: - "I can run `ctx drift --fix` to clean up the dead path - references. Want me to?" -5. **Propose specific edits** for semantic issues: - "CONVENTIONS.md still says 'use fmt.Printf for output' but - we switched to cmd.Printf three weeks ago. Want me to - update it?" -6. **Suggest follow-up commands** when appropriate: - - Many stale paths after a refactor → suggest `ctx sync` - - Heavy task clutter → suggest `ctx compact --archive` - - Old files untouched for weeks → suggest reviewing content - -## Interpreting Results - -| Finding | What It Means | Suggested Action | -|-------------------------------|--------------------------------------------------|------------------------------------------------------| -| Path does not exist | Context references a deleted file/dir | Remove reference or update path | -| Directory is empty | Referenced dir exists but has no files | Remove reference or populate directory | -| Many completed tasks | TASKS.md is cluttered | Run `ctx compact --archive` | -| File not modified in 30+ days | Content may be outdated | Review and update or confirm current | -| Constitution violation | A hard rule may be broken | Fix immediately | -| Missing packages | An `internal/` package is not in ARCHITECTURE.md | Add it with `/ctx-architecture` or document manually | -| Required file missing | A core context file does not exist | Create it with `ctx init` or manually | - -## Auto-Fix - -When the user agrees to auto-fix: - -```bash -ctx drift --fix -``` - -After fixing, run `ctx drift` again to confirm remaining -issues need manual attention. Report what was fixed and what -still needs the user's judgment. - -## Skill Template Drift - -After running `ctx drift`, check whether the project's -installed skills (`.claude/skills/`) match the canonical -templates shipped with `ctx`. - -### Procedure - -1. Create a temp directory and run `ctx init --force` inside - it to get the latest templates: - - ```bash - CTX_TPL_DIR=$(mktemp -d) - cd "$CTX_TPL_DIR" && ctx init --force 2>/dev/null - ``` - -2. Compare each skill in the project against the template: - - ```bash - diff -ru "$CTX_TPL_DIR/.claude/skills/" .claude/skills/ 2>/dev/null - ``` - -3. Clean up the temp directory: - - ```bash - rm -rf "$CTX_TPL_DIR" - ``` - -### Interpreting Skill Drift +After structural check, read context files and compare to the +codebase. Check for: -| Finding | Action | -|--------------------------------------|---------------------------------------------------| -| Skill missing from project | Offer to install: copy from template | -| Skill differs from template | Show the diff; offer to update to latest template | -| Project has extra skills (no match) | These are custom: leave them alone | -| No differences | Skills are up to date; report clean | +- **Outdated conventions**: patterns the code no longer follows +- **Superseded decisions**: entries overridden by later work +- **Stale architecture**: module descriptions that have changed +- **Irrelevant learnings**: entries about fixed bugs +- **Contradictions**: context files contradicting each other -When reporting skill drift, distinguish between: - -- **ctx-managed skills** (present in the template): these - should generally match; differences mean the user's copy - is outdated or was customized intentionally -- **Custom skills** (only in the project): these are user - additions and should not be flagged as drift - -If a skill was intentionally customized, note it and move on. -Offer to update only ctx-managed skills, and always show the -diff before overwriting. - -## Permission Drift - -After checking skills, verify that `.claude/settings.local.json` -has the expected ctx permissions. This file is gitignored, so it -drifts independently from the codebase. - -### Procedure - -1. Read `.claude/settings.local.json` and extract the allow list. - -2. Check for **missing ctx defaults**. Every entry in - `DefaultAllowPermissions()` (defined in - `internal/assets/permissions/allow.txt`) should be present. The current - expected set is: - - - `Bash(ctx:*)`: covers all ctx subcommands - - `Skill(ctx-*)`: one entry per ctx-shipped skill - - To get the authoritative list: - - ```bash - ctx init --force 2>/dev/null # in a temp dir - ``` - - Then compare permissions from the generated - `settings.local.json` against the project's copy. - -3. Check for **stale skill permissions**. If a `Skill(ctx-*)` - entry references a skill that no longer exists in - `.claude/skills/`, flag it. - -4. Check for **missing skill permissions**. If a `ctx-*` skill - exists in `.claude/skills/` but has no corresponding - `Skill(ctx-*)` in the allow list, flag it. - -### Interpreting Permission Drift - -| Finding | Action | -|----------------------------------|---------------------------------------------------------------------| -| Missing `Bash(ctx:*)` | Suggest adding: required for ctx to work | -| Missing `Skill(ctx-*)` entry | Suggest adding: skill will prompt every time | -| Stale `Skill(ctx-*)` entry | Suggest removing: dead reference | -| Granular `Bash(ctx :*)` | Suggest consolidating to `Bash(ctx:*)` | -| One-off / session debris entries | Note as hygiene issue (see `hack/runbooks/sanitize-permissions.md`) | - -### Important - -Do **not** edit `settings.local.json` directly. Report findings -and let the user make changes. This file controls agent -permissions: self-modification is a security concern. Refer -users to `hack/runbooks/sanitize-permissions.md` for the manual cleanup -procedure. - -## Proactive Use - -Run drift detection without being asked when: - -- You load context at session start and notice a path - reference that does not match the file tree -- The user just completed a refactor that renamed or moved - files -- TASKS.md has obviously heavy clutter (20+ completed items - visible when you read it) - -When running proactively, keep the report brief: +### Reporting -> I ran a quick drift check after the refactor. Two stale -> path references in ARCHITECTURE.md. Want me to clean -> them up? +1. Summarize findings by severity +2. Explain each finding: what file, why it matters +3. Distinguish structural from semantic +4. Offer to auto-fix structural: `ctx drift --fix` +5. Propose specific edits for semantic issues ## Quality Checklist -After running drift detection, verify: -- [ ] Summarized findings in plain language (did not just - paste raw CLI output) +- [ ] Summarized findings (did not dump raw output) - [ ] Explained why each finding matters -- [ ] Offered auto-fix for fixable issues before running it -- [ ] Suggested appropriate follow-up commands +- [ ] Offered auto-fix before running it - [ ] Did not run `--fix` without user confirmation diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md new file mode 100644 index 000000000..6e13828bb --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md @@ -0,0 +1,129 @@ +--- +name: ctx-implement +description: "Execute a plan step-by-step with verification. Use when you have a plan document and need disciplined, checkpointed implementation." +tools: [bash, read, write, edit, glob, grep] +--- + +Take a plan (inline text, file path, or from the conversation) +and execute it step-by-step with build/test verification between +steps. + +## When to Use + +- When the user provides a plan document or file and says + "implement this" +- When a multi-step task has been planned and needs disciplined + execution +- When the user wants checkpointed progress with verification + at each step +- After `ctx-brainstorm` or plan mode produces an approved plan + +## When NOT to Use + +- For single-step tasks: just do them directly +- When the plan is vague or incomplete: use `ctx-brainstorm` + first to refine it +- When the user wants to explore or discuss, not execute +- When changes are trivial (typo fix, config tweak) + +## Process + +### 1. Load the plan + +- If a file path is provided, read it +- If inline text is provided, use it directly +- If neither, look back in the conversation for the most + recent plan or approved design +- If no plan can be found, ask the user for one + +### 2. Break into steps + +Parse the plan into discrete, checkable steps. Each step +should be: +- **Atomic**: one logical change (a file, a function, a test) +- **Verifiable**: has a clear pass/fail check +- **Ordered**: dependencies respected (create before use, + test after implement) + +Present the step list to the user for confirmation: + +> **Implementation plan** (N steps): +> +> 1. [Step description] - verify: [check] +> 2. [Step description] - verify: [check] +> 3. ... +> +> Ready to start? + +### 3. Execute step-by-step + +For each step: + +1. **Announce** what you're doing (one line) +2. **Think through** the change before writing code +3. **Implement** the change +4. **Verify** with the appropriate check: + - Go code changed → `go build ./cmd/ctx/...` + - Tests affected → `go test ./...` + - Config/template changed → build to verify embeds + - Docs only → no verification needed +5. **Report** step result: pass or fail +6. **If failed**: stop, diagnose, fix, re-verify before + moving to the next step + +### 4. Checkpoint progress + +After every 3-5 steps (or after a significant milestone): +- Summarize what has been completed +- Note any deviations from the plan +- Ask the user if they want to continue, adjust, or stop + +### 5. Wrap up + +After all steps complete: +- Run a final full verification +- Summarize what was implemented +- Note any deviations from the original plan +- Suggest context to persist (decisions, learnings, tasks) + +## Step Verification Map + +| Change type | Verification command | +|--------------------|--------------------------------| +| Go source code | `go build ./cmd/ctx/...` | +| Test files | `go test ./...` | +| Templates/embeds | `go build ./cmd/ctx/...` | +| Makefile | Run the changed target | +| Skill files | Build to verify embed | +| Docs/markdown only | None required | +| Shell scripts | `bash -n script.sh` | + +## Handling Failures + +When a step fails verification: + +1. Read the error output carefully +2. Reason through the failure before attempting a fix +3. Fix the issue in the current step +4. Re-verify the fix +5. Only then move to the next step + +If a step fails repeatedly (3+ attempts), stop and ask the +user for guidance. + +## Quality Checklist + +Before starting: +- [ ] Plan exists and is clear enough to execute +- [ ] Steps are broken down and presented to the user +- [ ] User confirmed readiness to proceed + +During execution: +- [ ] Each step is verified before moving on +- [ ] Failures are fixed in place, not deferred +- [ ] Checkpoints happen every 3-5 steps + +After completion: +- [ ] Final full verification passes +- [ ] Deviations from plan are noted +- [ ] Context persistence is suggested if warranted diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-import-plans/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-import-plans/SKILL.md new file mode 100644 index 000000000..7f72bb741 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-import-plans/SKILL.md @@ -0,0 +1,56 @@ +--- +name: ctx-import-plans +description: "Import plan files into project specs directory. Use to convert external plans into project-tracked specs." +tools: [bash, read, write] +--- + +Import plan files into the project's `specs/` directory. + +## When to Use + +- When plan files exist outside the project (e.g., from AI + tool plan modes) +- When converting external design docs to project specs +- When the user says "import that plan" + +## When NOT to Use + +- Plan is already in `specs/` +- Plan is too vague to be a spec (brainstorm first) + +## Process + +### 1. Locate the plan + +If path provided, read it. Otherwise, check common locations: +- Current conversation context +- Session workspace files + +### 2. Convert to spec format + +Map plan sections to the spec template structure: +- Problem → Problem +- Steps/Tasks → Implementation +- Goals → Happy Path +- Risks → Edge Cases + +### 3. Handle conflicts + +If `specs/{name}.md` already exists: +- Compare contents +- Offer to merge, replace, or rename + +### 4. Write the spec + +Write to `specs/{name}.md`. + +### 5. Create tasks (optional) + +Offer to break the spec into tasks in TASKS.md. + +## Quality Checklist + +- [ ] Spec follows project template structure +- [ ] No conflicts with existing specs +- [ ] File written to correct location +- [ ] Tasks offered if applicable diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md new file mode 100644 index 000000000..cbf9396de --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md @@ -0,0 +1,59 @@ +--- +name: ctx-journal-enrich-all +description: "Batch journal pipeline: export unexported sessions then enrich all unenriched entries." +tools: [bash, read, write, edit] +--- + +Full journal pipeline: import sessions and batch-enrich entries. + +## When to Use + +- Backlog of unenriched journal entries +- After many sessions without journal maintenance +- When running periodic journal housekeeping + +## When NOT to Use + +- No journal entries exist +- All entries are already enriched +- Single entry (use `ctx-journal-enrich` instead) + +## Process + +### 1. Import unexported sessions + +```bash +ctx recall export --all +``` + +### 2. List unenriched entries + +```bash +ctx journal list --unenriched +``` + +### 3. Batch enrich + +For each unenriched entry: +1. Read the entry content +2. Generate appropriate frontmatter +3. Write the enriched version +4. Report progress + +For large backlogs (20+ entries), use heuristic enrichment: +derive metadata from filename patterns and entry headings +without reading full content. + +### 4. Report + +``` +Enriched: 15/15 entries +Skipped: 3 (already enriched) +``` + +## Quality Checklist + +- [ ] All unexported sessions imported first +- [ ] Each enriched entry has valid frontmatter +- [ ] Progress reported during batch +- [ ] No entries corrupted or lost diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md new file mode 100644 index 000000000..d9a27f0f1 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md @@ -0,0 +1,60 @@ +--- +name: ctx-journal-enrich +description: "Enrich a journal entry with YAML frontmatter metadata. Use to add type, outcome, topics, and technologies to session records." +tools: [bash, read, write, edit] +--- + +Enrich individual journal entries with structured metadata. + +## When to Use + +- After exporting a session to the journal +- When journal entries lack metadata for search/filter +- When `ctx journal` shows unenriched entries + +## When NOT to Use + +- Entry is already fully enriched +- No journal entries exist + +## Process + +### 1. Identify the entry + +If not specified, find unenriched entries: + +```bash +ctx journal list --unenriched +``` + +### 2. Read the entry + +Read the full session content to understand what happened. + +### 3. Generate frontmatter + +Add or update YAML frontmatter with: + +```yaml +--- +type: feature|bugfix|refactor|research|planning|review +outcome: completed|partial|blocked|abandoned +topics: [topic1, topic2] +technologies: [go, typescript, ...] +summary: "One-line summary of the session" +--- +``` + +### 4. Write enriched entry + +Update the file with the new frontmatter while preserving +the body content. + +## Quality Checklist + +- [ ] Frontmatter is valid YAML +- [ ] Type matches the actual work done +- [ ] Outcome is accurate +- [ ] Topics are specific, not generic +- [ ] Summary is one clear sentence +- [ ] Body content is preserved unchanged diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-journal-normalize/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-journal-normalize/SKILL.md new file mode 100644 index 000000000..298f5dc28 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-journal-normalize/SKILL.md @@ -0,0 +1,46 @@ +--- +name: ctx-journal-normalize +description: "Normalize journal source markdown for clean rendering. Use after journal site shows rendering issues: fence nesting, metadata formatting, broken lists." +tools: [bash, read, write, edit] +--- + +Reconstruct journal entries as clean markdown from stripped plain text. + +## When to Use + +- After `ctx journal site` shows rendering issues +- When journal entries have fence nesting problems +- When metadata blocks render as raw `**Key**: value` +- Before running `ctx-journal-enrich` (clean markdown improves extraction) + +## When NOT to Use + +- On entries already normalized (check `.state.json`) +- When the site renders correctly +- On non-journal markdown files + +## Output Rules + +1. **Fences**: Always use backtick fences. Innermost code gets + 3 backticks. Each nesting level adds 1. +2. **Metadata**: `**Key**: value` blocks become collapsed `
`. +3. **Tool output**: Collapse into `
` when > 10 lines. +4. **Lists**: 2-space indent per level. +5. **No invented content**: Every word in output traces to input. + +## Process + +1. **Backup first**: copy journal directory to `.bak` sibling +2. Identify files to normalize (skip already-normalized via `.state.json`) +3. Process files turn-by-turn (not whole file at once) +4. Write back the fixed files +5. Mark normalized: `ctx system mark-journal normalized` +6. Regenerate site: `ctx journal site --build` +7. Report what changed + +## Quality Checklist + +- [ ] Backup created before modifying +- [ ] Already-normalized files skipped +- [ ] No content was invented or lost +- [ ] State file updated for processed entries diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md new file mode 100644 index 000000000..0d0edde19 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md @@ -0,0 +1,62 @@ +--- +name: ctx-loop +description: "Generate autonomous iteration loop scripts for headless AI tool runs with configurable completion signals." +tools: [bash, read, write] +--- + +Generate shell scripts for autonomous AI iteration loops. + +## When to Use + +- Setting up CI-driven AI workflows +- When a task needs autonomous iteration with checks +- For batch processing with verification gates + +## When NOT to Use + +- Interactive work (just do it in the session) +- Simple single-run tasks +- When safety checks aren't defined + +## Process + +### 1. Define the loop + +Gather: +- **Command**: what to run each iteration +- **Completion signal**: how to detect "done" (exit code, output pattern, file exists) +- **Max iterations**: safety limit (default: 10) +- **Checkpoint command**: what to run between iterations + +### 2. Generate the script + +```bash +#!/bin/bash +set -euo pipefail +MAX_ITER=${1:-10} +for i in $(seq 1 "$MAX_ITER"); do + echo "=== Iteration $i/$MAX_ITER ===" + # Run the task + {command} + # Check completion + if {completion_check}; then + echo "✅ Complete after $i iterations" + exit 0 + fi + # Checkpoint + {checkpoint} +done +echo "❌ Max iterations reached" +exit 1 +``` + +### 3. Write and verify + +Write to the requested location. Verify with `bash -n`. + +## Quality Checklist + +- [ ] Max iterations has a sane default +- [ ] Completion signal is well-defined +- [ ] Script has `set -euo pipefail` +- [ ] Script passes `bash -n` syntax check diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md index 58181d247..e17e48034 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md @@ -1,6 +1,7 @@ --- name: ctx-next description: "Suggest what to work on next. Use when starting a session, finishing a task, or when unsure what to prioritize." +tools: [bash, read, glob, grep] --- Analyze current tasks and recent session activity, then suggest @@ -19,13 +20,6 @@ Analyze current tasks and recent session activity, then suggest - When actively mid-task (don't interrupt flow with suggestions) - When no context directory exists (nothing to analyze) -## Usage Examples - -```text -/ctx-next -/ctx-next (just finished the auth refactor) -``` - ## Process Do all of this **silently**: do not narrate the steps: @@ -35,7 +29,7 @@ Do all of this **silently**: do not narrate the steps: 2. **Check recent sessions** to understand what was just worked on and avoid suggesting already-completed work: ```bash - ctx journal source --limit 3 + ctx recall list --limit 3 ``` 3. **Read the most recent session file** (if any) to understand what was accomplished and what follow-up items were noted @@ -101,41 +95,6 @@ useful)* - If an in-progress task exists, it should almost always be recommendation #1 (don't abandon unfinished work) -## Examples - -### Good Output - -> ### Recommended Next -> -> **1. Fix `block-non-path-ctx` hook** `#priority:high` -> > Still open from yesterday's session. The hook is too -> > aggressive: it blocks `git -C path` commands that don't -> > invoke ctx. Quick fix, clears a blocker. -> -> **2. Add `Context.File(name)` method** `#priority:high` -> > Eliminates 10+ linear scan boilerplate instances across -> > 5 packages. High impact, low effort: good consolidation -> > target. -> -> **3. Topics system (T1.1)** `#priority:medium` -> > Journal site's most impactful remaining feature. Metadata -> > is already in place from the enrichment work. -> -> --- -> -> *Based on 24 pending tasks across 3 phases. Last session: -> doc-drift-cleanup (2026-02-11).* - -### Bad Output - -> "You have many tasks. Here are some options: -> - Do some stuff with hooks -> - Maybe work on tests -> - There's also some docs to write" - -(Too vague, no priorities, no rationale, no connection to -context.) - ## Quality Checklist Before presenting recommendations, verify: diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md new file mode 100644 index 000000000..9f4e3ccab --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md @@ -0,0 +1,140 @@ +--- +name: ctx-pad +description: "Manage encrypted scratchpad. Use for short, sensitive one-liners that travel with the project." +tools: [bash] +--- + +Manage the encrypted scratchpad via `ctx pad` commands using +natural language. Translate what the user says into the right +command. + +## When to Use + +- User wants to jot down a quick note, reminder, or sensitive value +- User asks to see, add, remove, edit, or reorder scratchpad entries +- User mentions "scratchpad", "pad", "notes", or "sticky notes" +- User says "jot down", "remember this", "note to self" + +## When NOT to Use + +- For structured tasks (use `ctx add task` instead) +- For architectural decisions (use `ctx add decision` instead) +- For lessons learned (use `ctx add learning` instead) + +## Command Mapping + +| User intent | Command | +|------------------------------------------------------------|--------------------------------------------| +| "show my scratchpad" / "what's on my pad" | `ctx pad` | +| "show me entry 3" / "what's in entry 3" | `ctx pad show 3` | +| "add a note: check DNS" / "jot down: check DNS" | `ctx pad add "check DNS"` | +| "delete the third one" / "remove entry 3" | `ctx pad rm 3` | +| "change entry 2 to ..." / "replace entry 2 with ..." | `ctx pad edit 2 "new text"` | +| "append '-- important' to entry 3" / "add to entry 3: ..." | `ctx pad edit 3 --append "-- important"` | +| "prepend 'URGENT:' to entry 1" | `ctx pad edit 1 --prepend "URGENT:"` | +| "move entry 4 to the top" / "prioritize entry 4" | `ctx pad mv 4 1` | +| "move entry 1 to the bottom" | `ctx pad mv 1 N` (where N = last position) | +| "import my notes from notes.txt" | `ctx pad import notes.txt` | +| "import from stdin" / pipe into pad | `cmd | ctx pad import -` | +| "export all blobs" / "extract blobs to DIR" | `ctx pad export [DIR]` | +| "export blobs, overwrite existing" | `ctx pad export --force [DIR]` | +| "merge entries from another pad" | `ctx pad merge FILE...` | +| "merge with a different key" | `ctx pad merge --key /path/to/key FILE` | + +## Execution + +**List entries:** +```bash +ctx pad +``` + +**Show a single entry (raw text, pipe-friendly):** +```bash +ctx pad show 3 +``` + +**Add an entry:** +```bash +ctx pad add "remember to check DNS config on staging" +``` + +**Remove an entry:** +```bash +ctx pad rm 2 +``` + +**Replace an entry:** +```bash +ctx pad edit 1 "updated note text" +``` + +**Append to an entry:** +```bash +ctx pad edit 3 --append " - this is important" +``` + +**Prepend to an entry:** +```bash +ctx pad edit 1 --prepend "URGENT: " +``` + +**Move an entry:** +```bash +ctx pad mv 3 1 # move entry 3 to position 1 +``` + +**Compose entries (pipe show into edit):** +```bash +ctx pad edit 1 --append "$(ctx pad show 3)" +``` + +**Import lines from a file:** +```bash +ctx pad import notes.txt +``` + +**Import from stdin:** +```bash +grep TODO *.go | ctx pad import - +``` + +**Export blobs to a directory:** +```bash +ctx pad export ./ideas +ctx pad export --dry-run # preview without writing +ctx pad export --force ./backup # overwrite existing files +``` + +**Merge entries from another scratchpad:** +```bash +ctx pad merge worktree/.context/scratchpad.enc +ctx pad merge --key /path/to/other.key foreign.enc +ctx pad merge --dry-run pad-a.enc pad-b.md +``` + +## Interpreting User Intent + +When the user's intent is ambiguous: + +- "update entry 2" with new text → **replace** (full rewrite) +- "add X to entry 2" → **append** (partial update) +- "put X before entry 2's text" → **prepend** +- "prioritize" / "bump up" / "move to top" → **mv N 1** +- "deprioritize" / "move to bottom" → **mv N last** + +When the user says "add": check context: +- "add a note" / "add to my pad" → `ctx pad add` (new entry) +- "add to entry 3" / "add this to the third one" → `ctx pad edit 3 --append` (modify existing) + +## Important Notes + +- Keep the encryption key path (`~/.ctx/.ctx.key`) internal to + `ctx pad` commands: exposing it grants full decryption access + to all pad entries +- Always use `ctx pad` to access entries: reading `scratchpad.enc` + directly yields unreadable ciphertext +- If the user gets a "no key" error, tell them to obtain the + key file from a teammate +- Entries are one-liners; do not add multi-line content +- After modifying, show the updated scratchpad so the user can + verify the change diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md new file mode 100644 index 000000000..a0c85ddf4 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md @@ -0,0 +1,47 @@ +--- +name: ctx-pause +description: "Pause context nudge/reminder hooks for the session. Security hooks remain active." +tools: [bash] +--- + +Temporarily pause context nudge and reminder hooks while keeping +security hooks active. + +## When to Use + +- When hooks are too noisy during focused work +- When doing rapid iteration and nudges interrupt flow +- When the user says "pause hooks" or "too many reminders" + +## When NOT to Use + +- At session start (hooks haven't fired yet) +- When the user wants to disable security hooks (not supported) + +## Process + +```bash +ctx system pause-hooks +``` + +This suppresses: +- Ceremony checks (remember, wrap-up) +- Persistence nudges +- Task completion checks +- Journal reminders + +This does NOT suppress: +- Dangerous command blocking +- Context load gate +- Version checks + +## Resuming + +Use `ctx-resume` to re-enable hooks, or they automatically +resume at next session start. + +## Quality Checklist + +- [ ] User confirmed they want to pause +- [ ] Security hooks remain active +- [ ] Informed user how to resume diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md new file mode 100644 index 000000000..c3547bb14 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md @@ -0,0 +1,54 @@ +--- +name: ctx-prompt-audit +description: "Analyze session transcripts to identify vague prompts that caused unnecessary clarification cycles." +tools: [bash, read, write] +--- + +Analyze session history to find prompts that led to wasted work +due to ambiguity, and suggest improvements. + +## When to Use + +- After a session with many back-and-forth clarifications +- When improving prompt discipline +- During periodic workflow reviews + +## When NOT to Use + +- No session history exists +- Sessions were straightforward + +## Process + +### 1. Load recent sessions + +```bash +ctx recall list --limit 5 +``` + +### 2. Scan for patterns + +Look for: +- Multiple clarifying questions before work began +- Misunderstood instructions leading to rework +- Vague requests like "fix it" or "make it better" +- Missing context that was discovered mid-task + +### 3. Categorize findings + +| Pattern | Example | Improvement | +|---------|---------|-------------| +| Vague scope | "Fix the tests" | "Fix TestFoo in internal/cli — it's failing on empty input" | +| Missing context | "Add a feature" | "Add JSON output to ctx status (see spec in specs/)" | +| Ambiguous reference | "Update that file" | "Update internal/config/mcp/tool/tool.go" | + +### 4. Present recommendations + +Provide actionable suggestions for clearer prompts. + +## Quality Checklist + +- [ ] At least 3 sessions analyzed +- [ ] Patterns categorized with examples +- [ ] Concrete improvements suggested +- [ ] No session data exposed inappropriately diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-prompt/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-prompt/SKILL.md new file mode 100644 index 000000000..3cd99bc47 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-prompt/SKILL.md @@ -0,0 +1,49 @@ +--- +name: ctx-prompt +description: "Apply, list, and manage saved prompt templates from .context/prompts/. Use when the user asks to apply, list, or create a reusable template like code-review or refactor." +tools: [bash, read, write] +--- + +Apply reusable prompt templates from `.context/prompts/`. + +## When to Use + +- User says "use the code-review prompt" or "apply the refactor template" +- User asks to list, create, or manage prompt templates +- User mentions "prompt template" or "reusable prompt" + +## When NOT to Use + +- For structured context entries (use `ctx add` instead) +- For full workflow automation (use a dedicated skill instead) +- For scratchpad notes (use `ctx pad` instead) + +## Command Mapping + +| User intent | Command | +|----------------------------------|---------------------------------| +| "list my prompts" | `ctx prompt list` | +| "show the code-review prompt" | `ctx prompt show code-review` | +| "create a new prompt" | `ctx prompt add --stdin` | +| "delete the debug prompt" | `ctx prompt rm debug` | + +## Execution + +**When no name is given:** +```bash +ctx prompt list +``` + +**When a name is given:** +```bash +ctx prompt show +``` + +Read the prompt content, then follow the instructions in the +prompt applied to the user's current context. + +## Quality Checklist + +- [ ] Used correct subcommand for user intent +- [ ] Prompt content was applied, not just displayed +- [ ] If prompt not found, suggested `ctx prompt list` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md index 5db979773..2ec6e56b9 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md @@ -1,6 +1,7 @@ --- name: ctx-recall description: "Browse session history. Use when referencing past discussions or finding context from previous work." +tools: [bash] --- Browse, inspect, and export AI session history. @@ -9,34 +10,53 @@ Browse, inspect, and export AI session history. - When the user asks "what did we do last time?" - When looking for context from previous work sessions -- When exporting sessions to the journal +- When exporting sessions to the journal for enrichment - When searching for a specific session by topic or date ## When NOT to Use -- When the user just wants current context (use ctx-status instead) +- When the user just wants current context (use `ctx-status` or + `ctx-agent` instead) - For modifying session content (recall is read-only) -## Execution +## Subcommands -List recent sessions: +### `ctx recall list` ```bash ctx recall list --limit 5 ``` -Show details of a specific session: +### `ctx recall show` ```bash +ctx recall show ctx recall show --latest -ctx recall show ``` -Export sessions to journal markdown: +### `ctx recall export` +```bash +ctx recall export --all # Export new sessions only +ctx recall export --all --regenerate # Re-export all +``` + +## Typical Workflows + +**"What did we work on recently?"** +```bash +ctx recall list --limit 5 +``` + +**"Export everything to the journal"** ```bash ctx recall export --all ``` -After listing sessions, summarize relevant findings rather than -dumping raw output. +Then suggest `ctx-journal-enrich-all` for enrichment. + +## Quality Checklist + +- [ ] Used the right subcommand for user intent +- [ ] Applied filters if user mentioned project, date, or topic +- [ ] For export, mentioned the normalize/enrich pipeline diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md new file mode 100644 index 000000000..5f83f3ef0 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md @@ -0,0 +1,75 @@ +--- +name: ctx-reflect +description: "Reflect on session progress. Use at natural breakpoints, after unexpected behavior, or when shifting to a different task." +tools: [bash, read] +--- + +Pause and reflect on this session. Review what has been +accomplished and identify context worth persisting. + +## When to Use + +- At natural breakpoints (feature complete, bug fixed, task done) +- After unexpected behavior or a debugging detour +- When shifting from one task to a different one +- When the session may end soon +- When the user explicitly asks to reflect + +## When NOT to Use + +- At the very start of a session (nothing to reflect on) +- After trivial changes (a typo fix does not need reflection) +- When the user is in flow: do not interrupt + +## Reflection Checklist + +Step back and reason through the session as a whole before +listing items. + +### 1. Learnings + +- Did we discover any gotchas or unexpected behavior? +- Did we learn something about the codebase or tools? +- Would this help a future session avoid problems? +- Is it specific to this project? + +### 2. Decisions + +- Did we make any architectural or design choices? +- Did we choose between alternatives? What was the trade-off? +- Should the rationale be captured? + +### 3. Tasks + +- Did we complete any tasks? (Mark done in TASKS.md) +- Did we start any tasks not yet finished? +- Should new tasks be added for follow-up work? + +### 4. Session Notes + +- Was this a significant session worth a full snapshot? +- Are there open threads a future session needs to pick up? + +## Output Format + +1. **Summary**: what was accomplished (2-3 sentences) +2. **Suggested persists**: list what should be saved, with + the specific command for each item +3. **Offer**: ask the user which items to persist + +## Persistence Commands + +| What to persist | Command | +|------------------|-----------------------------------------------------------------------| +| Learning | `ctx add learning --context "..." --lesson "..." --application "..."` | +| Decision | `ctx add decision "..."` | +| Task completed | Edit TASKS.md directly | +| New task | `ctx add task "..."` | + +## Quality Checklist + +- [ ] Every suggested persist has a concrete command +- [ ] Learnings are project-specific, not general knowledge +- [ ] Decisions include trade-off rationale +- [ ] No empty checklist categories +- [ ] User is asked before anything is persisted diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md new file mode 100644 index 000000000..a0b8f991b --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md @@ -0,0 +1,69 @@ +--- +name: ctx-remember +description: "Recall project context and present structured readback. Use when the user asks 'do you remember?', at session start, or when context seems lost." +tools: [bash, read] +--- + +Recall project context and present a structured readback as if +remembering, not searching. + +## Before Recalling + +Check that the context directory exists. If it does not, tell the +user: "No context directory found. Run `ctx init` to set up context +tracking, then there will be something to remember." + +## When to Use + +- The user asks "do you remember?", "what were we working on?", + or any memory-related question +- At the start of a session when context is not yet loaded +- When context seems lost or stale mid-session + +## When NOT to Use + +- Context was already loaded this session: don't re-fetch +- Mid-session when actively working and context is fresh +- When asking about a specific past session by name: use + `ctx-recall` instead + +## Process + +Do all of this **silently** — no narration of the steps: + +1. **Load context packet**: + ```bash + ctx agent --budget 4000 + ``` +2. **Read the files** listed in the packet's "Read These Files" + section (TASKS.md, DECISIONS.md, LEARNINGS.md, etc.) +3. **List recent sessions**: + ```bash + ctx recall list --limit 3 + ``` +4. **Present the structured readback** + +## Readback Format + +**Last session**: Topic, date, and what was accomplished. + +**Active work**: Pending and in-progress tasks from TASKS.md. + +**Recent context**: 1-2 recent decisions or learnings. + +**Next step**: Suggest what to work on next or ask for direction. + +## Readback Rules + +- Open directly with the readback: not "I don't have memory" +- Skip preamble like "Let me check": go straight to readback +- Present findings as recall, not discovery +- Be honest about the mechanism only if explicitly asked + +## Quality Checklist + +- [ ] Context packet was loaded +- [ ] Files from the read order were actually read +- [ ] Structured readback has all four sections +- [ ] No narration of the discovery process +- [ ] Readback feels like recall, not a file system tour diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md new file mode 100644 index 000000000..1d2c9b806 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md @@ -0,0 +1,46 @@ +--- +name: ctx-remind +description: "Manage session reminders. Use when the user says 'remind me to...' or asks about pending reminders." +tools: [bash] +--- + +Manage session-scoped reminders via `ctx remind` commands. + +## When to Use + +- User says "remind me to..." or "remind me about..." +- User asks "what reminders do I have?" +- User wants to dismiss or clear reminders + +## When NOT to Use + +- For structured tasks with status tracking (use `ctx add task`) +- For sensitive values or quick notes (use `ctx pad`) +- Create a reminder only when the user explicitly says "remind me" + +## Command Mapping + +| User intent | Command | +|----------------------------------|--------------------------------------------| +| "remind me to refactor swagger" | `ctx remind "refactor swagger"` | +| "remind me tomorrow to check CI" | `ctx remind "check CI" --after YYYY-MM-DD` | +| "what reminders do I have?" | `ctx remind list` | +| "dismiss reminder 3" | `ctx remind dismiss 3` | +| "clear all reminders" | `ctx remind dismiss --all` | + +## Natural Language Date Handling + +The CLI only accepts `YYYY-MM-DD` for `--after`. Convert natural +language dates to this format: + +| User says | You run | +|----------------|-------------------------------------------------| +| "next session" | `ctx remind "..."` (no `--after`) | +| "tomorrow" | `ctx remind "..." --after YYYY-MM-DD` | +| "next week" | `ctx remind "..." --after YYYY-MM-DD` (+7 days) | + +## Important Notes + +- Reminders fire **every session** until dismissed: no throttle +- The `--after` flag gates when a reminder starts appearing +- Reminders are stored in `.context/reminders.json` (committed to git) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md new file mode 100644 index 000000000..6a60267c3 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md @@ -0,0 +1,35 @@ +--- +name: ctx-resume +description: "Resume context hooks after a pause." +tools: [bash] +--- + +Re-enable context hooks that were paused with `ctx-pause`. + +## When to Use + +- After a focused work period where hooks were paused +- When the user is ready for nudges and reminders again +- When the user says "resume hooks" + +## When NOT to Use + +- Hooks are not currently paused +- At session start (hooks auto-resume) + +## Process + +```bash +ctx system resume-hooks +``` + +This re-enables all non-security hooks: +- Ceremony checks +- Persistence nudges +- Task completion checks +- Journal reminders + +## Quality Checklist + +- [ ] Hooks were actually paused before resuming +- [ ] Confirmed hooks are active again diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md new file mode 100644 index 000000000..e4761062c --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md @@ -0,0 +1,67 @@ +--- +name: ctx-sanitize-permissions +description: "Audit tool permissions for dangerous or overly broad entries. Use to ensure safe agent configuration." +tools: [bash, read, write] +--- + +Audit agent permission configurations for dangerous patterns. + +## When to Use + +- After initial project setup +- When reviewing security posture +- When permissions seem overly broad +- Before sharing a project configuration + +## When NOT to Use + +- No permission config exists +- Already audited recently + +## Categories to Check + +### 1. Hook bypass permissions +Permissions that disable safety hooks entirely. + +### 2. Destructive command permissions +Allow patterns that cover `rm -rf`, `git push --force`, +`git reset --hard`, etc. + +### 3. Injection vectors +Overly broad shell permissions that could allow arbitrary +command execution. + +### 4. Overly broad wildcards +Permissions like `Bash(*)` or `Write(*)` that grant +unrestricted access. + +## Process + +1. Read the permission configuration file +2. Check each entry against the four categories +3. Flag dangerous entries with severity level +4. Propose safer alternatives +5. Apply fixes with user approval + +## Output Format + +``` +## Permission Audit Results + +### 🔴 Critical (N) +1. `Bash(*)` — unrestricted shell access + → Suggest: scope to specific commands + +### 🟡 Warning (N) +1. `Write(/etc/*)` — write access to system dirs + → Suggest: remove or scope to project + +### ✅ Clean (N entries passed) +``` + +## Quality Checklist + +- [ ] All permission entries reviewed +- [ ] Critical items flagged +- [ ] Safer alternatives proposed +- [ ] No changes made without user approval diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md new file mode 100644 index 000000000..fbe310653 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md @@ -0,0 +1,78 @@ +--- +name: ctx-skill-audit +description: "Audit skills against prompting best practices. Check for quality, consistency, and common anti-patterns." +tools: [bash, read, glob, grep] +--- + +Audit skill files for quality, consistency, and adherence to +prompting best practices. + +## When to Use + +- After creating or modifying skills +- During periodic quality reviews +- When skills seem to underperform + +## When NOT to Use + +- No skills exist yet +- Just after a fresh skill creation (let it settle first) + +## Audit Dimensions + +### 1. Positive framing +Instructions should say what TO do, not just what NOT to do. + +### 2. Motivation over mandates +Explain WHY a rule exists, not just the rule. + +### 3. Structure +Uses clear sections: When to Use, When NOT to Use, Process, +Quality Checklist. + +### 4. Examples +Includes good and bad examples for clarity. + +### 5. Scope +Skill is focused on one task, not a catch-all. + +### 6. Description quality +Frontmatter description is clear and actionable. + +### 7. Overtriggering guard +"When NOT to Use" section prevents false activations. + +### 8. Phantom references +No references to tools, files, or commands that don't exist. + +### 9. Tool declarations +Tools listed in frontmatter match what the skill actually uses. + +## Process + +1. Glob all skill files: `internal/assets/**/skills/*/SKILL.md` +2. Read each skill +3. Score against the 9 dimensions (pass/fail/partial) +4. Report findings per skill with actionable fixes + +## Output Format + +``` +## Skill Audit Report + +| Skill | Score | Issues | +|-------|-------|--------| +| ctx-implement | 8/9 | Missing bad example | +| ctx-commit | 9/9 | Clean | +| ctx-reflect | 7/9 | Phantom ref to /ctx-update-docs | + +### Details +... +``` + +## Quality Checklist + +- [ ] All skill files scanned +- [ ] Each dimension checked per skill +- [ ] Actionable fixes provided for failures +- [ ] No false positives (verify references exist) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-skill-creator/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-skill-creator/SKILL.md new file mode 100644 index 000000000..f8b05ac9a --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-skill-creator/SKILL.md @@ -0,0 +1,76 @@ +--- +name: ctx-skill-creator +description: "Create, improve, test, and deploy skills. Full skill lifecycle from intent to working skill file." +tools: [bash, read, write, edit, glob, grep] +--- + +Create new skills or improve existing ones through a structured +workflow. + +## When to Use + +- Creating a new skill from scratch +- Improving an underperforming skill +- Porting a skill from one integration to another + +## When NOT to Use + +- Quick one-off automations (just script it) +- When the need is too vague (brainstorm first) + +## Process + +### 1. Intent capture + +Gather: +- What should this skill do? +- When should it trigger? +- What tools does it need? +- What's the expected output? + +### 2. Draft the SKILL.md + +Use the standard structure: + +```yaml +--- +name: ctx-{name} +description: "..." +tools: [bash, read, write, ...] +--- +``` + +Sections: When to Use, When NOT to Use, Process, Quality Checklist. + +### 3. Validate + +Check against skill audit dimensions: +- Positive framing +- Clear scope +- Good examples +- No phantom references +- Overtriggering guard + +### 4. Test + +If possible, do a dry run of the skill's workflow to verify +it works end-to-end. + +### 5. Deploy + +Write the file to the appropriate skills directory: +- Claude: `internal/assets/claude/skills/{name}/SKILL.md` +- Copilot CLI: `internal/assets/integrations/copilot-cli/skills/{name}/SKILL.md` + +### 6. Build + +Run `go build ./cmd/ctx/...` to verify the embed compiles. + +## Quality Checklist + +- [ ] Frontmatter is complete (name, description, tools) +- [ ] When to Use / When NOT to Use sections exist +- [ ] Process has numbered, actionable steps +- [ ] Quality Checklist at the end +- [ ] No phantom references +- [ ] Build passes with new skill embedded diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md new file mode 100644 index 000000000..a8fc9445a --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md @@ -0,0 +1,76 @@ +--- +name: ctx-spec +description: "Scaffold a feature spec from the project template. Use when planning a new feature or when a task references a missing spec." +tools: [bash, read, write] +--- + +Scaffold a new spec from `specs/tpl/spec-template.md` and walk +through each section with the user. + +## When to Use + +- Before implementing a non-trivial feature +- When a task says "Spec: `specs/X.md`" and the file doesn't exist +- When `ctx-brainstorm` produced a validated design that needs + a written artifact +- When the user says "let's spec this out" + +## When NOT to Use + +- Bug fixes or small changes +- When a spec already exists (read it instead) +- When the design is still vague (use `ctx-brainstorm` first) + +## Process + +### 1. Gather the Feature Name + +If not provided, ask. Derive filename: lowercase, hyphens. +Target: `specs/{feature-name}.md` + +### 2. Read the Template + +Read `specs/tpl/spec-template.md`. + +### 3. Walk Through Sections + +Work through each section **one at a time**: + +| Section | Prompt | +|----------------------|---------------------------------------------------------------| +| **Problem** | "What user-visible problem does this solve? Why now?" | +| **Approach** | "How does this work? Where does it fit?" | +| **Happy Path** | "Walk through what happens when everything goes right." | +| **Edge Cases** | "What could go wrong? (empty input, failures, duplicates)" | +| **Validation Rules** | "What input constraints are enforced?" | +| **Error Handling** | "For each error: user message and recovery?" | +| **Interface** | "CLI command? Skill? Both? Flags?" | +| **Implementation** | "Which files change? Key functions? Helpers to reuse?" | +| **Configuration** | "Any .ctxrc keys, env vars, or settings?" | +| **Testing** | "Unit, integration, edge case tests?" | +| **Non-Goals** | "What does this intentionally NOT do?" | + +**Spend extra time on Edge Cases and Error Handling.** + +### 4. Open Questions + +After all sections: +> "Anything unresolved? If not, I'll remove the Open Questions +> section." + +### 5. Write the Spec + +Write to `specs/{feature-name}.md`. + +### 6. Cross-Reference + +- If a Phase exists in TASKS.md, confirm the path matches +- If no tasks exist, offer to create them + +## Quality Checklist + +- [ ] Problem section explains *why*, not just *what* +- [ ] At least 3 edge cases with expected behavior +- [ ] Error handling has user messages and recovery +- [ ] Non-goals are explicit +- [ ] No placeholder text remains diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md index 345e97f6d..14eca349e 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md @@ -1,6 +1,7 @@ --- name: ctx-status description: "Show context summary. Use at session start or when unclear about current project state." +tools: [bash] --- Show the current context status: files, token budget, tasks, @@ -9,65 +10,21 @@ and recent activity. ## When to Use - At session start to orient before doing work -- When confused about what is being worked on or what context - exists +- When confused about what's being worked on - To check token usage and context health - When the user asks "what's the state of the project?" ## When NOT to Use -- When you already loaded context via `/ctx-agent` in this - session (status is a subset of what agent provides) -- Repeatedly within the same session without changes in between - -## Usage Examples - -```text -/ctx-status -/ctx-status --verbose -/ctx-status --json -``` +- When you already loaded context via `ctx-agent` in this session +- Repeatedly within the same session without changes ## Flags -| Flag | Short | Default | Purpose | -|-------------|-------|---------|----------------------------------| -| `--json` | | false | Output as JSON (for scripting) | -| `--verbose` | `-v` | false | Include file content previews | - -## What It Shows - -The output has three sections: - -### 1. Overview - -- Context directory path -- Total file count -- Token estimate (sum across all `.md` files in the context directory) - -### 2. Files - -Each `.md` file in the context directory with: - -| Indicator | Meaning | -|-----------|-----------------------------------------| -| check | File has content (loaded) | -| circle | File exists but is empty | - -File-specific summaries: -- `CONSTITUTION.md`: number of invariants -- `TASKS.md`: active and completed task counts -- `DECISIONS.md`: number of decisions -- `GLOSSARY.md`: number of terms -- Others: "loaded" or "empty" - -With `--verbose`: adds token count, byte size, and a 3-line -content preview per file. - -### 3. Recent Activity - -The 3 most recently modified files with relative timestamps -(e.g., "5 minutes ago", "2 hours ago"). +| Flag | Default | Purpose | +|-------------|---------|--------------------------------| +| `--json` | false | Output as JSON (for scripting) | +| `--verbose` | false | Include file content previews | ## Execution @@ -75,25 +32,23 @@ The 3 most recently modified files with relative timestamps ctx status ``` -After running, summarize the key points for the user: +After running, summarize the key points: - How many active tasks remain -- Whether any context files are empty (might need populating) -- Token budget usage (is context lean or bloated?) -- What was recently modified (gives a sense of momentum) +- Whether any context files are empty +- Token budget usage +- What was recently modified ## Interpreting Results -| Observation | Suggestion | -|-------------------------|-------------------------------------------------------------| -| Many empty files | Context is sparse; populate core files (TASKS, CONVENTIONS) | -| High token count (>30k) | Consider `ctx compact` or archiving completed tasks | -| No recent activity | Context may be stale; check if files need updating | -| TASKS.md has 0 active | All work done, or tasks need to be added | +| Observation | Suggestion | +|-------------------------|-------------------------------------------------| +| Many empty files | Populate core files (TASKS, CONVENTIONS) | +| High token count (>30k) | Consider `ctx compact` or archiving tasks | +| No recent activity | Context may be stale; check if files need update | +| TASKS.md has 0 active | All work done, or tasks need to be added | ## Quality Checklist -After running status, verify: -- [ ] Summarized the output for the user (do not just dump - raw output without commentary) -- [ ] Flagged any empty core files that should be populated -- [ ] Noted token budget if it seems high or low +- [ ] Summarized the output (do not just dump raw output) +- [ ] Flagged empty core files +- [ ] Noted token budget if high or low diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-verify/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-verify/SKILL.md new file mode 100644 index 000000000..c0101db64 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-verify/SKILL.md @@ -0,0 +1,52 @@ +--- +name: ctx-verify +description: "Verify before claiming completion. Use before saying work is done, tests pass, or builds succeed." +tools: [bash, read, glob, grep] +--- + +Run the relevant verification command before claiming a result. + +## When to Use + +- Before saying "tests pass", "build succeeds", or "bug fixed" +- Before reporting completion of any task with a testable outcome +- When the user asks "does it work?" or "is it done?" + +## When NOT to Use + +- For documentation-only changes with no testable outcome +- When the user explicitly says "skip verification" +- For exploratory work with no pass/fail criterion + +## Workflow + +1. **Identify** what command proves the claim +2. **Think through** what passing looks like (and false positives) +3. **Run** the command (fresh, not a previous run) +4. **Read** full output; check exit code, count failures +5. **Report** actual results with evidence + +## Claim-to-Evidence Map + +| Claim | Required Evidence | +|-------------------|--------------------------------------------| +| Tests pass | Test command output showing 0 failures | +| Linter clean | `golangci-lint run` showing 0 errors | +| Build succeeds | `go build` exit 0 | +| Bug fixed | Original symptom no longer reproduces | +| All checks pass | `make audit` showing all steps pass | + +## Self-Audit Questions + +Before presenting any artifact as complete: +- What assumptions did I make? +- What did I NOT check? +- Where am I least confident? +- What would a reviewer question first? + +## Quality Checklist + +- [ ] Verification command was run fresh (not reused) +- [ ] Exit code was checked +- [ ] Claim matches evidence (build ≠ tests) +- [ ] If multiple claims, each has its own evidence diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md new file mode 100644 index 000000000..5686becd4 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md @@ -0,0 +1,170 @@ +--- +name: ctx-worktree +description: "Manage git worktrees for parallel agent development. Use when splitting work across independent task tracks." +tools: [bash, read, glob] +--- + +Manage git worktrees to parallelize agent work across independent +task tracks. Supports creating, listing, and tearing down worktrees +with ctx-aware guardrails. + +## When to Use + +- User wants to parallelize a backlog across multiple agents +- Multiple independent task tracks with non-overlapping files +- User says "create worktree", "let's parallelize", "split the work" +- 3+ independent tasks that can be worked concurrently + +## When NOT to Use + +- Single task or tightly coupled tasks +- Tasks that touch overlapping files (high merge conflict risk) +- Fewer than 3 independent tasks (overhead exceeds benefit) +- Already inside a worktree (manage from the main checkout only) +- User just wants concurrent sessions in the same tree + +## Operations + +### `create ` + +Create a new worktree as a sibling directory with a `work/` branch. + +**Process:** + +1. **Check count**: refuse if 4 worktrees already exist: + ```bash + git worktree list + ``` + Count lines. If >= 5 (1 main + 4 worktrees), stop and explain + the limit. + +2. **Determine project name** from the current directory basename: + ```bash + basename "$(git rev-parse --show-toplevel)" + ``` + +3. **Create the worktree** as a sibling directory: + ```bash + git worktree add "../-" -b "work/" + ``` + +4. **Verify** the worktree was created: + ```bash + ls "../-" + ``` + +5. **Remind the user**: + > Do NOT run `ctx init` in the worktree. The context + > directory is already tracked in git and will be present. + > Launch a separate session there and work normally. + +### `list` + +Show all active worktrees: + +```bash +git worktree list +``` + +### `teardown ` + +Merge a completed worktree back and clean up. + +**Process:** + +1. **Check for uncommitted changes** in the worktree: + ```bash + git -C "../-" status --porcelain + ``` + If output is non-empty, warn and stop. The user must commit or + discard changes first. + +2. **Merge the work branch** into the current branch: + ```bash + git merge "work/" + ``` + If there are conflicts, stop and help the user resolve them. + TASKS.md conflicts are common: see guidance below. + +3. **Remove the worktree**: + ```bash + git worktree remove "../-" + ``` + +4. **Delete the branch**: + ```bash + git branch -d "work/" + ``` + +5. **Verify** cleanup: + ```bash + git worktree list + git branch | grep "work/" + ``` + +## Guardrails + +- **Max 4 worktrees**: more than 4 parallel tracks makes merge + complexity outweigh productivity gains +- **Sibling directories only**: worktrees go in `../-`, + never inside the project tree +- **`work/` branch prefix**: all worktree branches use `work/` + for easy identification and cleanup +- **No `ctx init` in worktrees**: the context directory is tracked + in git; running init would overwrite shared context files +- **Manage from main checkout only**: create and teardown worktrees + from the main working tree, not from inside a worktree +- **TASKS.md conflict resolution**: when merging, TASKS.md will + often conflict because multiple agents marked different tasks as + complete. Resolution: accept all `[x]` completions from both sides. + No task should go from `[x]` back to `[ ]`. + +## What Works Differently in Worktrees + +The encryption key lives at `~/.ctx/.ctx.key` (user-level, outside +the project). All worktrees on the same machine share this path, so +**`ctx pad` and `ctx notify` work in worktrees automatically**. + +One thing to watch: + +- **Journal enrichment**: `ctx recall export` and `ctx journal enrich` + resolve paths relative to the current working directory. Files + created in a worktree stay in that worktree and are discarded on + teardown. Enrich journals on the main branch after merging: the + JSONL session logs are intact regardless. + +## Task Grouping Guidance + +Before creating worktrees, analyze the backlog to group tasks into +non-overlapping tracks: + +1. **Read TASKS.md** and identify all pending tasks +2. **Estimate blast radius**: which files/directories does each + task touch? +3. **Group by non-overlapping directories**: tasks that touch the + same package or file must go in the same track +4. **Present the grouping** to the user before creating worktrees: + +```text +Proposed worktree groups: + + work/docs : recipe updates, blog post, getting started guide + (touches: docs/) + work/crypto : P3.1-P3.3 encrypted scratchpad infra + (touches: internal/crypto/, internal/config/) + work/pad-cli : P3.4-P3.9 pad CLI commands + (touches: internal/cli/pad/) +``` + +Let the user approve or adjust before proceeding. + +## Quality Checklist + +Before any operation, verify: +- [ ] Worktree count checked (max 4) +- [ ] Branch uses `work/` prefix +- [ ] Worktree is a sibling directory (`../`) +- [ ] User reminded not to run `ctx init` in worktree +- [ ] Uncommitted changes checked before teardown +- [ ] Merge completed before worktree removal +- [ ] Branch deleted after worktree removal diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md new file mode 100644 index 000000000..ba764c867 --- /dev/null +++ b/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md @@ -0,0 +1,121 @@ +--- +name: ctx-wrap-up +description: "End-of-session context persistence ceremony. Use when wrapping up a session to capture learnings, decisions, conventions, and tasks." +tools: [bash, read, write, edit] +--- + +Guide end-of-session context persistence. Gather signal from the +session, propose candidates worth persisting, and persist approved +items via `ctx add`. + +## When to Use + +- At the end of a session, before the user quits +- When the user says "let's wrap up", "save context", "end of + session" + +## When NOT to Use + +- Nothing meaningful happened (only read files, quick lookup) +- The user already persisted everything manually +- Mid-session: use `ctx-reflect` instead + +## Process + +### Phase 1: Gather signal + +Do this **silently**: + +1. Check what changed: + ```bash + git diff --stat + ``` +2. Check commits made this session: + ```bash + git log --oneline -5 + ``` +3. Scan the conversation for: + - Architectural choices or trade-offs + - Gotchas or unexpected behavior + - Patterns established or conventions agreed + - Follow-up work identified + - Tasks completed or progressed + +### Phase 2: Propose candidates + +Think step-by-step about what is worth persisting. For each +candidate ask: +- Is this project-specific or general knowledge? +- Would a future session benefit from knowing this? +- Is this already captured in context files? + +Present candidates grouped by type. Skip empty categories. + +``` +## Session Wrap-Up + +### Learnings (N candidates) +1. **Title** — Context, Lesson, Application + +### Decisions (N candidates) +1. **Title** — Context, Rationale, Consequence + +### Conventions (N candidates) +1. **Convention description** + +### Tasks (N candidates) +1. **Task description** (new | completed | updated) + +Persist all? Or select which to keep? +``` + +### Phase 3: Persist approved candidates + +Wait for user approval. For each approved item: + +| Type | Command | +|-------------|----------------------------------------------------------------------| +| Learning | `ctx add learning "Title" --context "..." --lesson "..." --application "..."` | +| Decision | `ctx add decision "Title" --context "..." --rationale "..." --consequence "..."` | +| Convention | `ctx add convention "Description"` | +| Task (new) | `ctx add task "Description"` | +| Task (done) | Edit TASKS.md to mark complete | + +### Phase 4: Commit (optional) + +After persisting, check for uncommitted changes: + +```bash +git status --short +``` + +If there are uncommitted changes, offer to commit with +`ctx-commit`. + +## Candidate Quality Guide + +### Good candidates + +- Specific gotchas with actionable lessons +- Real trade-offs with rationale +- Patterns codified for consistency + +### Weak candidates (do not propose) + +- General programming knowledge +- Obvious facts from the diff +- Things already in context files + +## Quality Checklist + +Before presenting: +- [ ] Signal was gathered (git diff, git log, conversation scan) +- [ ] Every candidate has complete fields +- [ ] Candidates are project-specific +- [ ] No duplicates with existing context +- [ ] Empty categories are omitted +- [ ] User is asked before persisting + +After persisting: +- [ ] Each `ctx add` command succeeded +- [ ] Uncommitted changes were surfaced diff --git a/specs/copilot-feature-parity-kit.md b/specs/copilot-feature-parity-kit.md new file mode 100644 index 000000000..fa07d7217 --- /dev/null +++ b/specs/copilot-feature-parity-kit.md @@ -0,0 +1,373 @@ +# Copilot Feature Parity Kit + +## Problem + +Claude Code integration has 41 skills, 20+ hooks, a governance/ceremony +system, and deep code intelligence (GitNexus). Copilot CLI has 5 skills +and 4 lifecycle hooks. Copilot VS Code has 45+ slash commands but most +delegate to CLI without agent-level intelligence (workflow orchestration, +proactive nudges, code-aware operations). + +This creates a two-tier experience: Claude users get a full context-aware +AI partner; Copilot users get a context reader. The gap is not in data +access (MCP tools are shared) but in **agent behavior** — the skills, +hooks, and governance that turn raw context into workflow. + +## Approach + +A phased spec kit that brings Copilot CLI and VS Code to parity with +Claude Code. Each phase is independently shippable. Work is organized +into three layers: + +1. **Skills** — Copilot CLI `.github/skills/` and VS Code slash commands +2. **Hooks** — Copilot CLI hook scripts and VS Code event handlers +3. **Governance** — Proactive nudges, ceremony checks, session health + +Architecture principle: **single source of truth**. Skills are authored +as markdown SKILL.md files in `internal/assets/integrations/copilot-cli/skills/` +and deployed by `ctx setup copilot-cli --write`. VS Code commands call +`ctx` CLI or MCP tools — the extension does not duplicate skill logic. + +### Cross-references + +- `specs/copilot-cli-integration.md` — existing feature matrix (context injection, hooks, MCP, recall) +- `specs/vscode-feature-parity.md` — existing layer-by-layer VS Code mapping +- This spec supersedes neither; it fills the **skill + governance gap** they identify. + +--- + +## Phase 1 — Core Workflow Skills (Copilot CLI + VS Code) + +Port the skills that encode the work cycle: pick → implement → commit → reflect. + +### 1.1 Skills to Port + +| # | Claude Skill | Copilot CLI Skill | VS Code Command | Priority | +|---|-------------|-------------------|-----------------|----------| +| 1 | `ctx-next` | `ctx-next/SKILL.md` ✅ exists | `/next` | P0 — already done | +| 2 | `ctx-implement` | `ctx-implement/SKILL.md` | `/implement` | P0 | +| 3 | `ctx-commit` | `ctx-commit/SKILL.md` | `/commit` | P0 | +| 4 | `ctx-reflect` | `ctx-reflect/SKILL.md` | `/reflect` | P0 | +| 5 | `ctx-remember` | `ctx-remember/SKILL.md` | `/remember` | P0 | +| 6 | `ctx-wrap-up` | `ctx-wrap-up/SKILL.md` | `/wrapup` | P0 | +| 7 | `ctx-code-review` | `ctx-code-review/SKILL.md` | `/review` | P1 | +| 8 | `ctx-refactor` | `ctx-refactor/SKILL.md` | `/refactor` | P1 | +| 9 | `ctx-explain` | `ctx-explain/SKILL.md` | `/explain` | P1 | +| 10 | `ctx-brainstorm` | `ctx-brainstorm/SKILL.md` | `/brainstorm` | P1 | +| 11 | `ctx-spec` | `ctx-spec/SKILL.md` | `/spec` | P1 | + +### 1.2 Skill File Format (Copilot CLI) + +Copilot CLI skills live in `.github/skills//SKILL.md`. Format: + +```markdown +--- +name: ctx-implement +description: Execute implementation plan step-by-step with verification +tools: [bash, read, write, edit, glob, grep] +--- + +# ctx-implement + +## When to Use +- User says "implement this", "build it", "start coding" +- A task from TASKS.md is selected for implementation + +## When NOT to Use +- No spec or plan exists (use ctx-spec first) +- Task is ambiguous (use ctx-brainstorm first) + +## Workflow +1. Read the referenced spec from `specs/` +2. Read CONVENTIONS.md for code patterns +3. Break work into chunks, commit after each +4. Run `make lint && make test` after each chunk +5. Mark task done in TASKS.md when complete +6. Offer to record learnings/decisions discovered + +## Quality Gates +- [ ] Spec exists and was read +- [ ] Tests pass after each chunk +- [ ] Lint passes +- [ ] TASKS.md updated +``` + +### 1.3 VS Code Slash Command Wiring + +For each new skill, add to `editors/vscode/package.json` contributes +and handle in `extension.ts`: + +```typescript +case '/implement': + return runCtxCommand(stream, 'implement', request.prompt); +``` + +VS Code commands delegate to `ctx` CLI. The extension provides UI +(progress, follow-ups, markdown rendering) but not logic. + +### 1.4 Files to Create/Modify + +| File | Change | +|------|--------| +| `internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-code-review/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-refactor/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-explain/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md` | New skill | +| `internal/cli/setup/core/copilot_cli/copilot_cli.go` | Deploy new skills | +| `editors/vscode/package.json` | Add slash commands | +| `editors/vscode/src/extension.ts` | Handle new commands | + +--- + +## Phase 2 — Architecture & Design Skills + +### 2.1 Skills to Port + +| # | Claude Skill | Copilot CLI Skill | VS Code Command | Priority | +|---|-------------|-------------------|-----------------|----------| +| 1 | `ctx-architecture` | `ctx-architecture/SKILL.md` | `/architecture` | P1 | +| 2 | `ctx-architecture-enrich` | `ctx-architecture-enrich/SKILL.md` | — (CLI only) | P2 | +| 3 | `ctx-architecture-failure-analysis` | `ctx-architecture-failure-analysis/SKILL.md` | — (CLI only) | P2 | +| 4 | `ctx-doctor` | `ctx-doctor/SKILL.md` | `/system doctor` ✅ | P1 | + +### 2.2 GitNexus Dependency + +`ctx-architecture-enrich` and `ctx-architecture-failure-analysis` use +GitNexus MCP tools (`mcp__gitnexus__*`). For Copilot CLI: + +- GitNexus MCP server must be registered in `~/.copilot/mcp-config.json` +- Skills should gracefully degrade if GitNexus is unavailable +- Fallback: use `grep`/`go doc` for basic code intelligence + +### 2.3 Files to Create/Modify + +| File | Change | +|------|--------| +| `internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-architecture-enrich/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-architecture-failure-analysis/SKILL.md` | New skill | + +--- + +## Phase 3 — Governance & Proactive Hooks + +The biggest behavioral gap. Claude's hook system fires on every tool +use and user prompt, surfacing nudges for persistence, ceremonies, +and health. Copilot has no equivalent. + +### 3.1 Copilot CLI Hooks to Add + +Extend `.github/hooks/ctx-hooks.json` with richer behavior in existing +hook scripts: + +| Hook | Trigger | Behavior | Claude Equivalent | +|------|---------|----------|-------------------| +| `preToolUse` (enhanced) | Every tool call | Dangerous cmd block + context load gate | `block-non-path-ctx` + `context-load-gate` | +| `postToolUse` (enhanced) | After edit/write | Task completion check + learning nudge | `check-task-completion` + `post-commit` | +| `sessionStart` (enhanced) | Session begin | Load context + version check + reminder relay | `budget-agent` + `check-version` + `check-reminders` | +| `sessionEnd` (enhanced) | Session close | Persistence ceremony + journal capture | `check-ceremonies` + `check-persistence` | + +### 3.2 New Hook Scripts + +``` +.github/hooks/scripts/ +├── ctx-preToolUse.sh # Enhanced: dangerous cmd + context gate +├── ctx-preToolUse.ps1 # PowerShell mirror +├── ctx-postToolUse.sh # Enhanced: task check + learning nudge +├── ctx-postToolUse.ps1 # PowerShell mirror +├── ctx-sessionStart.sh # Enhanced: bootstrap + reminders +├── ctx-sessionStart.ps1 # PowerShell mirror +├── ctx-sessionEnd.sh # Enhanced: ceremony + journal +└── ctx-sessionEnd.ps1 # PowerShell mirror +``` + +### 3.3 Governance Messages + +Port the hook message registry. Each message has: +- **Condition**: when to fire (e.g., "uncompleted tasks > 5") +- **Message**: what to show (e.g., "⚠ 5 tasks pending — run `/next`") +- **Cooldown**: don't repeat within N minutes + +| Message ID | Condition | Copilot Surface | +|------------|-----------|-----------------| +| `ceremony-remember` | Session > 30 min, no recall done | `sessionStart` script output | +| `ceremony-wrapup` | Session > 2 hours, no persist | `sessionEnd` script output | +| `persistence-nudge` | Decision made but not recorded | `postToolUse` script output | +| `task-completion` | File edited matching task description | `postToolUse` script output | +| `version-drift` | `ctx --version` != expected | `sessionStart` script output | +| `reminder-relay` | Pending reminders exist | `sessionStart` script output | + +### 3.4 VS Code Governance + +Map governance to VS Code extension events: + +| Governance | VS Code Mechanism | +|------------|-------------------| +| Ceremony check | `vscode.window.onDidChangeWindowState` (focus loss → prompt) | +| Persistence nudge | `vscode.workspace.onDidSaveTextDocument` (`.context/` watch) | +| Task completion | `onDidSaveTextDocument` → `ctx system check-task-completion` | +| Reminder relay | Status bar item + 5-min timer (already partial) | +| Version check | `ensureCtxAvailable()` version comparison | +| Session ceremony | Extension `deactivate()` → wrap-up prompt | + +### 3.5 Files to Create/Modify + +| File | Change | +|------|--------| +| `internal/assets/integrations/copilot-cli/scripts/ctx-preToolUse.sh` | Enhance with context gate | +| `internal/assets/integrations/copilot-cli/scripts/ctx-preToolUse.ps1` | PowerShell mirror | +| `internal/assets/integrations/copilot-cli/scripts/ctx-postToolUse.sh` | Enhance with task check + nudge | +| `internal/assets/integrations/copilot-cli/scripts/ctx-postToolUse.ps1` | PowerShell mirror | +| `internal/assets/integrations/copilot-cli/scripts/ctx-sessionStart.sh` | Enhance with bootstrap | +| `internal/assets/integrations/copilot-cli/scripts/ctx-sessionStart.ps1` | PowerShell mirror | +| `internal/assets/integrations/copilot-cli/scripts/ctx-sessionEnd.sh` | Enhance with ceremony | +| `internal/assets/integrations/copilot-cli/scripts/ctx-sessionEnd.ps1` | PowerShell mirror | +| `internal/assets/integrations/copilot-cli/ctx-hooks.json` | Updated hook config | +| `editors/vscode/src/extension.ts` | Add governance event handlers | + +--- + +## Phase 4 — Context Health & Maintenance Skills + +### 4.1 Skills to Port + +| # | Claude Skill | Copilot CLI Skill | VS Code Command | Priority | +|---|-------------|-------------------|-----------------|----------| +| 1 | `ctx-consolidate` | `ctx-consolidate/SKILL.md` | `/consolidate` | P2 | +| 2 | `ctx-permission-sanitize` | `ctx-permission-sanitize/SKILL.md` | — | P2 | +| 3 | `ctx-prompt-audit` | `ctx-prompt-audit/SKILL.md` | — | P3 | +| 4 | `ctx-skill-audit` | `ctx-skill-audit/SKILL.md` | — | P3 | +| 5 | `ctx-skill-create` | `ctx-skill-create/SKILL.md` | — | P3 | +| 6 | `ctx-link-check` | `ctx-link-check/SKILL.md` | `/check-links` ✅ | Done | +| 7 | `ctx-pad` | `ctx-pad/SKILL.md` | `/pad` ✅ | Done | + +### 4.2 Files to Create/Modify + +| File | Change | +|------|--------| +| `internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-permission-sanitize/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-skill-create/SKILL.md` | New skill | + +--- + +## Phase 5 — Journal & Documentation Skills + +### 5.1 Skills to Port + +| # | Claude Skill | Copilot CLI Skill | VS Code Command | Priority | +|---|-------------|-------------------|-----------------|----------| +| 1 | `ctx-journal-enrich` | `ctx-journal-enrich/SKILL.md` | `/journal enrich` | P2 | +| 2 | `ctx-journal-enrich-all` | `ctx-journal-enrich-all/SKILL.md` | — | P3 | +| 3 | `ctx-blog` | `ctx-blog/SKILL.md` | `/blog` | P3 | +| 4 | `ctx-blog-changelog` | `ctx-blog-changelog/SKILL.md` | `/changelog` ✅ | Done | +| 5 | `ctx-plan-import` | `ctx-plan-import/SKILL.md` | — | P3 | + +### 5.2 Files to Create/Modify + +| File | Change | +|------|--------| +| `internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md` | New skill | +| `internal/assets/integrations/copilot-cli/skills/ctx-plan-import/SKILL.md` | New skill | + +--- + +## Phase 6 — Advanced / Infrastructure Skills + +### 6.1 Skills to Port + +| # | Claude Skill | Copilot CLI Skill | VS Code Command | Priority | +|---|-------------|-------------------|-----------------|----------| +| 1 | `ctx-loop` | `ctx-loop/SKILL.md` | — (N/A for chat UI) | P3 | +| 2 | `ctx-worktree` | `ctx-worktree/SKILL.md` | `/worktree` ✅ | Done | +| 3 | `ctx-pause` / `ctx-resume` | `ctx-pause/SKILL.md` | `/pause` | P3 | + +--- + +## Summary: Parity Scorecard + +### Current State + +| Surface | Skills | Hooks | Governance | Total Score | +|---------|--------|-------|------------|-------------| +| Claude Code | 41 | 20+ | Full | 100% | +| Copilot CLI | 5 | 4 | None | ~15% | +| Copilot VS Code | 45 cmds (CLI delegate) | 6 watchers | Partial | ~40% | + +### After This Spec Kit + +| Surface | Skills | Hooks | Governance | Total Score | +|---------|--------|-------|------------|-------------| +| Claude Code | 41 | 20+ | Full | 100% | +| Copilot CLI | 36 | 4 (enhanced) | Hook-based | ~85% | +| Copilot VS Code | 50+ cmds | 10+ handlers | Event-based | ~90% | + +### Remaining Gap (Intentional) + +These features are Claude Code specific and **not ported**: + +| Feature | Reason | +|---------|--------| +| `UserPromptSubmit` hooks (12 types) | Copilot CLI has no equivalent trigger point | +| `check-context-size` (token budget) | Copilot does not expose token counts | +| `check-knowledge` | Claude Code knowledge graph specific | +| `heartbeat` telemetry | Different telemetry model | +| Plugin system (`.claude-plugin/`) | Claude Code specific packaging | + +--- + +## Edge Cases + +| Case | Expected Behavior | +|------|-------------------| +| Skill references MCP tool not available | Graceful degradation: use CLI fallback | +| GitNexus not registered | Architecture-enrich falls back to grep/ast | +| Hook script timeout (>5s) | Script returns empty, no block | +| Concurrent skill invocation | Each invocation is independent | +| `ctx` binary not on PATH | `sessionStart` hook warns; VS Code auto-downloads | +| `.context/` doesn't exist | Skills prompt `ctx init` | +| Windows vs Unix line endings | Scripts use native endings per platform | + +## Validation Rules + +- Every skill SKILL.md must have: name, description, tools, workflow, quality gates +- Every hook script must have: bash + PowerShell variants +- Every VS Code command must: delegate to CLI, show progress, offer follow-ups +- Skill names must match between Claude and Copilot (e.g., `ctx-implement` in both) + +## Testing + +- **Unit**: Each skill SKILL.md passes `ctx skill audit` (format, completeness) +- **Integration**: `ctx setup copilot-cli --write` deploys all skills + hooks +- **E2E**: Run Copilot CLI session with skills, verify workflow cycle +- **VS Code**: Extension test suite covers new slash commands +- **Cross-platform**: Hook scripts tested on bash (Linux/macOS) and PowerShell (Windows) + +## Non-Goals + +- Replacing Copilot's built-in features (code completion, inline suggestions) +- Porting Claude Code's plugin packaging system +- Real-time token budget monitoring (Copilot doesn't expose this) +- Bidirectional memory sync (separate spec: Copilot memory bridge) +- ACP server mode (separate spec: `specs/copilot-cli-integration.md` Phase 4) + +## Open Questions + +1. **Copilot CLI skill discovery**: Does Copilot CLI auto-discover `.github/skills/` + or do skills need explicit registration? Need to verify with latest CLI docs. +2. **Hook script output rendering**: How does Copilot CLI render hook script + stdout? Markdown? Plain text? This affects governance message formatting. +3. **VS Code command registration limit**: Is there a practical limit on slash + commands in a chat participant? Current 45 → 55+ after this spec. +4. **Skill frontmatter schema**: Does Copilot CLI enforce a specific YAML + frontmatter schema for SKILL.md, or is it freeform markdown? From f4117b8793ff696881cb11b54f144a2ba89294d8 Mon Sep 17 00:00:00 2001 From: Jose Alekhinne Date: Tue, 14 Apr 2026 01:06:29 -0700 Subject: [PATCH 04/13] chore: namespace-cleanup docstring sweep + git push regex hardening MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Combines two in-flight workstreams into one commit. ## Docstring namespace sweep (parallel agent) Follow-up to the CLI namespace cleanup in 78fbdf7d. Updates doc comments, blog posts, recipes, and the regenerated site to reflect current command names (e.g. `ctx pause` → `ctx hook pause`, `ctx resume` → `ctx hook resume`, `ctx message` → `ctx hook message`). Also refreshes the Copilot CLI integration skills added in edaac817 (PR #63) to match the new namespace. Scope: ~450 files across docs/, site/, internal/cli, internal/write, internal/config, .github/, integration skill templates, and .claude/ skill SKILL.md files. ## Git push regex hardening (this session) The `block-dangerous-command` hook's `MidGitPush` regex only matched `git push` mid-command after `;`, `&&`, or `||`. This session accidentally bypassed it with `git -C push` — the permissions deny list `Bash(git push *)` only matches prefix `git push`, so `git -C push` also slipped through. Replace `MidGitPush` with a broader `GitPush` that covers: - Bare `git push` at command start - All separator and subshell entry points (`;`, `&&`, `||`, `|`, `&`, `(`, `$(`, backtick, newline) - Env-var and command-wrapper prefixes (`GIT_DIR=/x git push`, `time git push`, `nice git push`) - Any flag shape between `git` and `push` (`-C path`, `-c key=val`, `--git-dir=/path`, `--no-pager`, `--bare`, `-p`, `-P`) - Tail anchor that distinguishes subcommand from ref names (`push-to-remote`, `push_branch`) via `[^a-zA-Z0-9._/-]|$` Documented trade-offs: accepted false positives on `git log push` and `git commit -m push` (push as literal arg); known blind spots for `eval` / `sh -c` quoting and shell aliases. Adds `internal/config/regex/cmd_test.go` with 42 table-driven cases covering all entry points, flag shapes, prefixes, negative cases (other subcommands, ref-name continuations), and the accepted false-positive classes. Renames the Go symbol `MidGitPush` → `GitPush` to accurately reflect scope; keeps legacy variant string `mid-git-push` and text key `block.mid-git-push` (user-facing message is already generic: "git push requires explicit user approval"). Spec: specs/git-push-regex-hardening.md Spec: specs/cli-namespace-cleanup.md Signed-off-by: Jose Alekhinne --- .claude/skills/_ctx-backup/SKILL.md | 10 +- .context/CONVENTIONS.md | 6 + .context/DECISIONS.md | 45 + .context/LEARNINGS.md | 55 + .context/TASKS.md | 10 + .ctxrc.dev | 4 +- .github/copilot-instructions.md | 2 +- Makefile | 8 + .../blog/2026-01-27-building-ctx-using-ctx.md | 14 +- ...2-01-ctx-v0.2.0-the-archaeology-release.md | 16 +- .../2026-02-01-refactoring-with-intent.md | 4 +- docs/blog/2026-02-03-the-attention-budget.md | 4 +- ...26-02-04-skills-that-fight-the-platform.md | 10 +- .../2026-02-05-you-cant-import-expertise.md | 8 +- ...02-07-the-anatomy-of-a-skill-that-works.md | 12 +- .../2026-02-08-not-everything-is-a-skill.md | 8 +- ...-09-defense-in-depth-securing-ai-agents.md | 6 +- docs/blog/2026-02-12-how-deep-is-too-deep.md | 4 +- docs/blog/2026-02-14-irc-as-context.md | 4 +- ...26-02-14-parallel-agents-with-worktrees.md | 4 +- ...02-15-ctx-v0.3.0-the-discipline-release.md | 2 +- docs/blog/2026-02-15-why-zensical.md | 4 +- ...026-02-17-code-is-cheap-judgment-is-not.md | 6 +- .../2026-02-17-context-as-infrastructure.md | 4 +- ...debt-and-the-myth-of-overnight-progress.md | 12 +- docs/blog/2026-02-17-the-3-1-ratio.md | 6 +- ...-when-a-system-starts-explaining-itself.md | 4 +- docs/blog/2026-02-25-the-homework-problem.md | 4 +- ...26-03-04-agent-memory-is-infrastructure.md | 4 +- docs/blog/2026-03-23-we-broke-the-3-1-rule.md | 2 +- ...02-code-structure-as-an-agent-interface.md | 2 +- docs/cli/config.md | 2 +- docs/cli/connect.md | 12 +- docs/cli/connection.md | 10 +- docs/cli/context.md | 2 +- docs/cli/hub.md | 8 +- docs/cli/index.md | 47 +- docs/cli/journal.md | 2 +- docs/cli/serve.md | 4 +- docs/cli/setup.md | 2 +- docs/cli/steering.md | 10 +- docs/cli/system.md | 7 +- docs/cli/trace.md | 2 +- docs/cli/trigger.md | 12 +- docs/home/common-workflows.md | 4 +- docs/home/configuration.md | 2 +- docs/home/contributing.md | 10 +- docs/home/faq.md | 16 +- docs/home/first-session.md | 4 +- docs/home/getting-started.md | 2 +- docs/home/hub.md | 14 +- docs/home/index.md | 86 +- docs/home/joining-a-project.md | 2 +- docs/home/keeping-ai-honest.md | 4 +- docs/home/prompting-guide.md | 34 +- docs/home/repeated-mistakes.md | 6 +- docs/home/steering.md | 16 +- docs/home/triggers.md | 12 +- docs/index.md | 18 +- docs/operations/autonomous-loop.md | 4 +- docs/operations/hub-failure-modes.md | 38 +- docs/operations/hub.md | 16 +- docs/operations/index.md | 44 +- docs/operations/integrations.md | 6 +- docs/operations/migration.md | 4 +- docs/operations/release.md | 8 +- docs/operations/upgrading.md | 2 +- docs/recipes/architecture-deep-dive.md | 8 +- docs/recipes/autonomous-loops.md | 2 +- docs/recipes/claude-code-permissions.md | 6 +- docs/recipes/configuration-profiles.md | 14 +- docs/recipes/context-health.md | 4 +- docs/recipes/customizing-hook-messages.md | 4 +- docs/recipes/design-before-coding.md | 2 +- docs/recipes/hook-sequence-diagrams.md | 42 +- docs/recipes/hub-cluster.md | 22 +- docs/recipes/hub-getting-started.md | 24 +- docs/recipes/hub-multi-machine.md | 18 +- docs/recipes/hub-overview.md | 20 +- docs/recipes/hub-personal.md | 26 +- docs/recipes/hub-team.md | 40 +- docs/recipes/index.md | 43 +- docs/recipes/knowledge-capture.md | 6 +- docs/recipes/memory-bridge.md | 2 +- docs/recipes/multi-tool-setup.md | 4 +- docs/recipes/multilingual-sessions.md | 8 +- docs/recipes/parallel-worktrees.md | 2 +- docs/recipes/publishing.md | 2 +- docs/recipes/scratchpad-sync.md | 2 +- docs/recipes/session-archaeology.md | 6 +- docs/recipes/session-ceremonies.md | 6 +- docs/recipes/session-changes.md | 8 +- docs/recipes/session-lifecycle.md | 2 +- docs/recipes/steering.md | 28 +- docs/recipes/task-management.md | 2 +- docs/recipes/triggers.md | 24 +- docs/recipes/troubleshooting.md | 10 +- docs/recipes/when-to-use-agent-teams.md | 8 +- docs/reference/audit-conventions.md | 8 +- docs/reference/comparison.md | 4 +- docs/reference/design-invariants.md | 2 +- docs/reference/session-journal.md | 4 +- docs/security/agent-security.md | 4 +- docs/security/hub.md | 26 +- docs/security/reporting.md | 2 +- docs/thesis/index.md | 4 +- examples/demo/README.md | 2 +- hack/title-case-headings.py | 259 ++++ .../assets/claude/skills/ctx-pause/SKILL.md | 2 +- .../assets/claude/skills/ctx-resume/SKILL.md | 2 +- .../claude/skills/ctx-worktree/SKILL.md | 2 +- internal/assets/commands/examples.yaml | 4 +- internal/assets/commands/text/mcp.yaml | 2 +- internal/assets/commands/text/ui.yaml | 2 +- internal/assets/integrations/agents.md | 2 +- .../integrations/copilot-cli/INSTRUCTIONS.md | 2 +- .../integrations/copilot-cli/ctx-hooks.json | 1 + .../copilot-cli/skills/ctx-agent/SKILL.md | 48 +- .../skills/ctx-architecture/SKILL.md | 958 ++++++++++++- .../copilot-cli/skills/ctx-archive/SKILL.md | 40 +- .../skills/ctx-blog-changelog/SKILL.md | 18 +- .../copilot-cli/skills/ctx-blog/SKILL.md | 159 ++- .../skills/ctx-brainstorm/SKILL.md | 180 ++- .../copilot-cli/skills/ctx-commit/SKILL.md | 191 ++- .../skills/ctx-consolidate/SKILL.md | 204 ++- .../copilot-cli/skills/ctx-doctor/SKILL.md | 63 +- .../copilot-cli/skills/ctx-drift/SKILL.md | 236 +++- .../copilot-cli/skills/ctx-implement/SKILL.md | 102 +- .../skills/ctx-journal-enrich-all/SKILL.md | 235 +++- .../skills/ctx-journal-enrich/SKILL.md | 162 ++- .../copilot-cli/skills/ctx-loop/SKILL.md | 124 +- .../copilot-cli/skills/ctx-next/SKILL.md | 44 +- .../copilot-cli/skills/ctx-pad/SKILL.md | 25 +- .../copilot-cli/skills/ctx-pause/SKILL.md | 57 +- .../skills/ctx-prompt-audit/SKILL.md | 164 ++- .../copilot-cli/skills/ctx-recall/SKILL.md | 30 +- .../copilot-cli/skills/ctx-reflect/SKILL.md | 100 +- .../copilot-cli/skills/ctx-remember/SKILL.md | 116 +- .../copilot-cli/skills/ctx-remind/SKILL.md | 76 +- .../copilot-cli/skills/ctx-resume/SKILL.md | 37 +- .../skills/ctx-skill-audit/SKILL.md | 257 +++- .../copilot-cli/skills/ctx-spec/SKILL.md | 106 +- .../copilot-cli/skills/ctx-status/SKILL.md | 86 +- .../copilot-cli/skills/ctx-worktree/SKILL.md | 9 +- .../copilot-cli/skills/ctx-wrap-up/SKILL.md | 140 +- .../copilot/copilot-instructions.md | 2 +- internal/assets/read/agent/agent.go | 2 - internal/assets/read/claude/claude.go | 2 - internal/assets/read/lookup/doc.go | 72 +- internal/assets/tpl/doc.go | 76 +- internal/assets/tpl/tpl_hub_entry.go | 4 +- internal/bootstrap/doc.go | 79 +- internal/cli/add/cmd/root/doc.go | 50 +- internal/cli/add/core/insert/doc.go | 58 +- internal/cli/agent/cmd/root/doc.go | 56 +- internal/cli/agent/core/budget/doc.go | 96 +- internal/cli/agent/core/score/doc.go | 69 +- internal/cli/change/core/detect/doc.go | 61 +- internal/cli/config/core/profile/doc.go | 59 +- internal/cli/connection/core/render/doc.go | 56 +- internal/cli/connection/core/render/format.go | 2 +- internal/cli/decision/cmd/reindex/cmd.go | 1 - internal/cli/decision/decision.go | 1 - internal/cli/doctor/core/check/doc.go | 76 +- internal/cli/doctor/doc.go | 57 +- internal/cli/doctor/doctor.go | 2 - internal/cli/drift/core/fix/doc.go | 58 +- internal/cli/drift/core/out/doc.go | 50 +- internal/cli/event/cmd.go | 2 +- internal/cli/event/doc.go | 2 +- internal/cli/hub/core/server/doc.go | 59 +- .../cli/initialize/core/claude_check/doc.go | 54 +- internal/cli/initialize/core/merge/doc.go | 75 +- internal/cli/initialize/core/plugin/doc.go | 59 +- internal/cli/initialize/core/project/doc.go | 62 +- internal/cli/initialize/doc.go | 89 +- internal/cli/journal/cmd/importer/doc.go | 63 +- internal/cli/journal/cmd/obsidian/doc.go | 50 +- internal/cli/journal/cmd/site/doc.go | 49 +- internal/cli/journal/core/collapse/doc.go | 60 +- internal/cli/journal/core/frontmatter/doc.go | 60 +- internal/cli/journal/core/generate/doc.go | 64 +- internal/cli/journal/core/index/doc.go | 59 +- internal/cli/journal/core/lock/doc.go | 71 +- internal/cli/journal/core/moc/doc.go | 71 +- internal/cli/journal/core/normalize/doc.go | 76 +- internal/cli/journal/core/obsidian/doc.go | 49 +- internal/cli/journal/core/parse/doc.go | 51 +- internal/cli/journal/core/reduce/doc.go | 52 +- internal/cli/journal/core/schema/doc.go | 60 +- internal/cli/journal/core/section/doc.go | 67 +- internal/cli/journal/core/slug/doc.go | 57 +- internal/cli/journal/core/source/doc.go | 50 +- .../cli/journal/core/source/format/doc.go | 56 +- internal/cli/journal/core/turn/doc.go | 43 +- internal/cli/journal/core/wikilink/doc.go | 53 +- internal/cli/learning/cmd/reindex/cmd.go | 1 - internal/cli/learning/doc.go | 50 +- internal/cli/mcp/mcp.go | 1 - internal/cli/memory/cmd/status/cmd.go | 1 - internal/cli/memory/cmd/sync/cmd.go | 1 - internal/cli/message/cmd.go | 2 +- internal/cli/message/cmd/edit/cmd.go | 2 +- internal/cli/message/cmd/edit/doc.go | 2 +- internal/cli/message/cmd/list/cmd.go | 2 +- internal/cli/message/cmd/list/doc.go | 2 +- internal/cli/message/cmd/reset/cmd.go | 2 +- internal/cli/message/cmd/reset/doc.go | 2 +- internal/cli/message/cmd/root/cmd.go | 2 +- internal/cli/message/cmd/root/doc.go | 2 +- internal/cli/message/cmd/show/cmd.go | 2 +- internal/cli/message/cmd/show/doc.go | 2 +- internal/cli/message/doc.go | 2 +- internal/cli/notify/cmd/setup/cmd.go | 2 +- internal/cli/notify/cmd/setup/doc.go | 2 +- internal/cli/notify/cmd/test/cmd.go | 2 +- internal/cli/notify/cmd/test/doc.go | 2 +- internal/cli/notify/doc.go | 41 +- internal/cli/notify/notify.go | 2 +- internal/cli/pad/cmd/edit/doc.go | 33 +- internal/cli/pad/core/parse/doc.go | 38 +- internal/cli/pad/core/store/doc.go | 54 +- internal/cli/pad/core/tag/doc.go | 44 +- internal/cli/pause/cmd/root/cmd.go | 2 +- internal/cli/pause/cmd/root/doc.go | 2 +- internal/cli/pause/pause.go | 2 +- internal/cli/permission/doc.go | 64 +- internal/cli/remind/doc.go | 52 +- internal/cli/resume/cmd/root/cmd.go | 2 +- internal/cli/resume/cmd/root/doc.go | 2 +- internal/cli/resume/resume.go | 2 +- internal/cli/setup/core/cline/cline.go | 1 - internal/cli/setup/core/copilot/doc.go | 58 +- internal/cli/setup/core/copilot_cli/doc.go | 62 +- internal/cli/setup/core/cursor/cursor.go | 1 - internal/cli/setup/core/kiro/kiro.go | 1 - internal/cli/site/cmd/feed/cmd.go | 1 - internal/cli/site/cmd/feed/doc.go | 41 +- internal/cli/site/core/scan/doc.go | 47 +- internal/cli/sync/core/doc.go | 40 +- internal/cli/sync/core/validate/doc.go | 49 +- internal/cli/sync/doc.go | 69 +- .../system/cmd/block_dangerous_command/run.go | 2 +- .../cli/system/cmd/check_context_size/doc.go | 50 +- .../cli/system/cmd/check_persistence/doc.go | 48 +- internal/cli/system/core/archive/doc.go | 59 +- internal/cli/system/core/drift/doc.go | 55 +- internal/cli/system/core/health/doc.go | 52 +- internal/cli/system/core/journal/doc.go | 52 +- internal/cli/system/core/knowledge/doc.go | 49 +- internal/cli/system/core/message/doc.go | 65 +- internal/cli/system/core/nudge/doc.go | 72 +- internal/cli/system/core/nudge/pause.go | 4 +- internal/cli/system/core/session/doc.go | 60 +- internal/cli/system/core/stats/doc.go | 46 +- internal/cli/task/cmd/complete/cmd.go | 2 +- internal/cli/task/doc.go | 66 +- internal/cli/task/task.go | 9 - internal/cli/watch/core/apply/doc.go | 57 +- internal/cli/watch/core/doc.go | 47 +- internal/config/copilot/copilot.go | 6 - internal/config/embed/cmd/doc.go | 63 +- internal/config/embed/cmd/system.go | 14 +- internal/config/embed/flag/doc.go | 58 +- internal/config/embed/text/doc.go | 66 +- internal/config/event/log.go | 2 +- internal/config/hook/doc.go | 55 +- internal/config/http/http.go | 5 - internal/config/marker/doc.go | 59 +- internal/config/regex/cmd.go | 44 +- internal/config/regex/cmd_test.go | 93 ++ internal/config/regex/doc.go | 68 +- internal/config/stats/doc.go | 38 +- internal/config/sysinfo/sysinfo.go | 2 - internal/config/token/doc.go | 60 +- internal/config/vscode/vscode.go | 4 +- internal/config/warn/warn.go | 6 - internal/config/why/why.go | 6 - internal/context/load/doc.go | 65 +- internal/context/summary/doc.go | 51 +- internal/crypto/crypto.go | 5 - internal/crypto/doc.go | 72 +- internal/drift/detector.go | 1 - internal/drift/doc.go | 111 +- internal/entity/doc.go | 120 +- internal/entry/doc.go | 79 +- internal/err/backup/doc.go | 45 +- internal/err/crypto/doc.go | 48 +- internal/err/fs/doc.go | 47 +- internal/err/hub/hub.go | 1 - internal/err/journal/doc.go | 42 +- internal/err/memory/doc.go | 51 +- internal/err/skill/doc.go | 50 +- internal/err/steering/doc.go | 66 +- internal/err/trigger/doc.go | 70 +- internal/format/doc.go | 67 +- internal/hub/doc.go | 122 +- internal/index/doc.go | 74 +- internal/index/index.go | 1 - internal/journal/parser/doc.go | 112 +- internal/journal/parser/types.go | 6 - internal/journal/schema/doc.go | 85 +- internal/journal/state/doc.go | 71 +- internal/journal/state/state.go | 5 - internal/log/event/doc.go | 73 +- internal/mcp/handler/doc.go | 113 +- internal/mcp/proto/doc.go | 64 +- internal/mcp/server/def/tool/doc.go | 54 +- internal/mcp/server/dispatch/poll/doc.go | 52 +- internal/mcp/server/doc.go | 96 +- internal/mcp/server/route/prompt/doc.go | 45 +- internal/mcp/server/route/tool/doc.go | 59 +- internal/memory/doc.go | 90 +- internal/notify/doc.go | 86 +- internal/notify/notify.go | 5 - internal/parse/doc.go | 57 +- internal/rc/doc.go | 131 +- internal/rc/rc.go | 3 +- internal/skill/doc.go | 81 +- internal/steering/doc.go | 135 +- internal/sysinfo/doc.go | 73 +- internal/task/doc.go | 70 +- internal/task/task.go | 4 - internal/tidy/doc.go | 82 +- internal/trace/doc.go | 138 +- internal/trigger/doc.go | 154 +- internal/validate/doc.go | 60 +- internal/wrap/doc.go | 65 +- internal/write/complete/doc.go | 2 +- internal/write/drift/doc.go | 67 +- internal/write/initialize/doc.go | 59 +- internal/write/message/doc.go | 52 +- internal/write/notify/doc.go | 2 +- internal/write/prune/doc.go | 2 +- internal/write/resource/doc.go | 49 +- internal/write/session/doc.go | 3 +- internal/write/steering/doc.go | 52 +- internal/write/vscode/info.go | 1 - site/404.html | 78 +- .../index.html | 98 +- .../index.html | 98 +- .../index.html | 86 +- .../index.html | 86 +- .../index.html | 90 +- .../index.html | 86 +- .../index.html | 94 +- .../index.html | 90 +- .../index.html | 90 +- .../index.html | 82 +- .../blog/2026-02-14-irc-as-context/index.html | 84 +- .../index.html | 82 +- .../index.html | 80 +- .../index.html | 78 +- site/blog/2026-02-15-why-zensical/index.html | 82 +- .../index.html | 78 +- .../index.html | 88 +- .../index.html | 82 +- .../index.html | 94 +- site/blog/2026-02-17-the-3-1-ratio/index.html | 86 +- .../index.html | 82 +- .../index.html | 82 +- .../2026-02-28-the-last-question/index.html | 78 +- .../index.html | 84 +- .../index.html | 78 +- .../index.html | 80 +- .../index.html | 82 +- .../index.html | 78 +- site/blog/index.html | 78 +- site/cli/backup/index.html | 78 +- site/cli/bootstrap/index.html | 78 +- site/cli/change/index.html | 78 +- site/cli/completion/index.html | 78 +- site/cli/config/index.html | 78 +- site/cli/connect/index.html | 97 +- site/cli/connection/index.html | 102 +- site/cli/context/index.html | 78 +- site/cli/doctor/index.html | 78 +- site/cli/event/index.html | 78 +- site/cli/guide/index.html | 78 +- site/cli/hook/index.html | 78 +- site/cli/hub/index.html | 98 +- site/cli/index.html | 226 ++- site/cli/init-status/index.html | 78 +- site/cli/journal/index.html | 78 +- site/cli/loop/index.html | 78 +- site/cli/mcp/index.html | 78 +- site/cli/memory/index.html | 78 +- site/cli/message/index.html | 78 +- site/cli/notify/index.html | 78 +- site/cli/pad/index.html | 78 +- site/cli/pause/index.html | 78 +- site/cli/prune/index.html | 78 +- site/cli/remind/index.html | 78 +- site/cli/resume/index.html | 78 +- site/cli/serve/index.html | 86 +- site/cli/setup/index.html | 80 +- site/cli/site/index.html | 78 +- site/cli/skill/index.html | 78 +- site/cli/steering/index.html | 104 +- site/cli/sysinfo/index.html | 78 +- site/cli/system/index.html | 96 +- site/cli/trace/index.html | 78 +- site/cli/trigger/index.html | 110 +- site/cli/usage/index.html | 78 +- site/cli/watch/index.html | 78 +- site/cli/why/index.html | 78 +- site/home/about/index.html | 568 ++++---- site/home/common-workflows/index.html | 563 ++++---- site/home/community/index.html | 735 +++++----- site/home/configuration/index.html | 567 ++++---- site/home/context-files/index.html | 559 ++++---- site/home/contributing/index.html | 1253 ++++++++--------- site/home/faq/index.html | 850 ++++++----- site/home/first-session/index.html | 559 ++++---- site/home/getting-started/index.html | 567 ++++---- site/home/hub/index.html | 751 +++++----- site/home/index.html | 847 ++++++----- site/home/is-ctx-right/index.html | 574 ++++---- site/home/joining-a-project/index.html | 557 ++++---- site/home/keeping-ai-honest/index.html | 579 ++++---- site/home/prompting-guide/index.html | 655 +++++---- site/home/repeated-mistakes/index.html | 559 ++++---- site/home/steering/index.html | 647 ++++----- site/home/triggers/index.html | 631 ++++----- site/index.html | 104 +- site/operations/autonomous-loop/index.html | 218 ++- site/operations/hub-failure-modes/index.html | 326 ++--- site/operations/hub/index.html | 256 ++-- site/operations/index.html | 186 ++- site/operations/integrations/index.html | 222 ++- site/operations/migration/index.html | 226 ++- site/operations/release/index.html | 124 +- site/operations/upgrading/index.html | 214 ++- .../recipes/architecture-deep-dive/index.html | 90 +- site/recipes/autonomous-loops/index.html | 111 +- site/recipes/building-skills/index.html | 105 +- .../claude-code-permissions/index.html | 123 +- .../recipes/configuration-profiles/index.html | 143 +- site/recipes/context-health/index.html | 111 +- .../customizing-hook-messages/index.html | 117 +- site/recipes/design-before-coding/index.html | 111 +- site/recipes/external-context/index.html | 105 +- site/recipes/guide-your-agent/index.html | 111 +- site/recipes/hook-output-patterns/index.html | 105 +- .../recipes/hook-sequence-diagrams/index.html | 160 +-- site/recipes/hub-cluster/index.html | 175 ++- site/recipes/hub-getting-started/index.html | 185 ++- site/recipes/hub-multi-machine/index.html | 171 +-- site/recipes/hub-overview/index.html | 177 ++- site/recipes/hub-personal/index.html | 195 ++- site/recipes/hub-team/index.html | 203 ++- site/recipes/import-plans/index.html | 105 +- site/recipes/index.html | 775 ++++++++-- site/recipes/knowledge-capture/index.html | 115 +- site/recipes/memory-bridge/index.html | 107 +- site/recipes/multi-tool-setup/index.html | 107 +- site/recipes/multilingual-sessions/index.html | 125 +- site/recipes/parallel-worktrees/index.html | 105 +- site/recipes/permission-snapshots/index.html | 105 +- site/recipes/publishing/index.html | 107 +- site/recipes/scratchpad-sync/index.html | 107 +- .../recipes/scratchpad-with-claude/index.html | 105 +- site/recipes/session-archaeology/index.html | 111 +- site/recipes/session-ceremonies/index.html | 111 +- site/recipes/session-changes/index.html | 90 +- site/recipes/session-lifecycle/index.html | 107 +- site/recipes/session-pause/index.html | 105 +- site/recipes/session-reminders/index.html | 105 +- site/recipes/state-maintenance/index.html | 78 +- site/recipes/steering/index.html | 181 ++- site/recipes/system-hooks-audit/index.html | 105 +- site/recipes/task-management/index.html | 107 +- site/recipes/triggers/index.html | 177 ++- site/recipes/troubleshooting/index.html | 135 +- site/recipes/webhook-notifications/index.html | 105 +- .../when-to-use-agent-teams/index.html | 129 +- site/reference/audit-conventions/index.html | 96 +- site/reference/comparison/index.html | 90 +- site/reference/design-invariants/index.html | 84 +- site/reference/index.html | 78 +- site/reference/scratchpad/index.html | 78 +- site/reference/session-journal/index.html | 82 +- site/reference/skills/index.html | 78 +- site/reference/versions/index.html | 78 +- site/search.json | 2 +- site/security/agent-security/index.html | 86 +- site/security/hub/index.html | 148 +- site/security/index.html | 78 +- site/security/reporting/index.html | 80 +- site/sitemap.xml | 33 +- site/thesis/index.html | 94 +- specs/git-push-regex-hardening.md | 48 + zensical.toml | 63 +- 493 files changed, 23827 insertions(+), 16958 deletions(-) create mode 100755 hack/title-case-headings.py create mode 100644 internal/config/regex/cmd_test.go create mode 100644 specs/git-push-regex-hardening.md diff --git a/.claude/skills/_ctx-backup/SKILL.md b/.claude/skills/_ctx-backup/SKILL.md index f29f3da4b..6a2983c5f 100644 --- a/.claude/skills/_ctx-backup/SKILL.md +++ b/.claude/skills/_ctx-backup/SKILL.md @@ -1,7 +1,7 @@ --- name: _ctx-backup description: "Backup project context and global Claude data to SMB share. Use before risky operations, at end of session, or on request." -allowed-tools: Bash(ctx system backup*), Bash(ls /tmp/ctx-backup*) +allowed-tools: Bash(ctx backup*), Bash(ls /tmp/ctx-backup*) --- Backup `.context/`, `.claude/`, `ideas/`, and `~/.claude/` to @@ -44,19 +44,19 @@ Based on the argument, run the appropriate command: ```bash # For "project" -ctx system backup --scope project +ctx backup --scope project # For "global" -ctx system backup --scope global +ctx backup --scope global # For "all" or no argument -ctx system backup --scope all +ctx backup --scope all ``` ## Process 1. Parse the argument (default to `all` if none provided) -2. Run the appropriate `ctx system backup` command +2. Run the appropriate `ctx backup` command 3. Report the archive path and size from the output 4. Confirm success to the user diff --git a/.context/CONVENTIONS.md b/.context/CONVENTIONS.md index a86800509..786d12bed 100644 --- a/.context/CONVENTIONS.md +++ b/.context/CONVENTIONS.md @@ -256,3 +256,9 @@ DO NOT UPDATE FOR: - Warn format strings centralized in config/warn/ — use warn.Close, warn.Write, warn.Remove, warn.Mkdir, warn.Rename, warn.Walk, warn.Getwd, warn.Readdir, warn.Marshal instead of inline format strings in log.Warn calls + +- Nav frontmatter title: fields must not contain ctx — frontmatter does not support backticks, so the brand stays out of nav titles entirely (Hub, not The ctx Hub). Body headings can use `ctx` since markdown supports backticks. + +- CLI flags and slash-commands inside headings or admonition titles must be backticked: `--keep-frontmatter=false`, `/ctx-reflect`. The title-case engine in hack/title-case-headings.py protects these patterns automatically, but authors should still backtick at write time for clarity. + +- File extensions inside headings must be backticked when title-case capitalization would otherwise apply: write `CONSTITUTION.md`, not CONSTITUTION.Md. The title-case engine refuses to capitalize lowercase tokens following a literal . dot, but explicit backticks remain the clearest signal. diff --git a/.context/DECISIONS.md b/.context/DECISIONS.md index b9fbe3457..f8d6423c8 100644 --- a/.context/DECISIONS.md +++ b/.context/DECISIONS.md @@ -3,6 +3,9 @@ | Date | Decision | |----|--------| +| 2026-04-14 | doc.go quality floor: behavior-grounded, ~25-100 body lines, related-packages section required | +| 2026-04-14 | Bootstrap stays under ctx system bootstrap (reverted experimental top-level promotion) | +| 2026-04-14 | Title Case style for docs is AP-leaning with explicit ambiguity carve-outs | | 2026-04-13 | Walk boundary uses git as a hint, not a requirement | | 2026-04-11 | Journal stays local; LEARNINGS.md is the shareable layer | | 2026-04-11 | `Entry.Author` is server-authoritative, not client-authoritative | @@ -122,6 +125,48 @@ For significant decisions: --> +## [2026-04-14-010205] doc.go quality floor: behavior-grounded, ~25-100 body lines, related-packages section required + +**Status**: Accepted + +**Context**: About 140 doc.go files were rewritten this session. User flagged the original 5-line Key exports + See source files + Part of subsystem pattern as lazy minimum effort. + +**Decision**: doc.go quality floor: behavior-grounded, ~25-100 body lines, related-packages section required + +**Rationale**: Behavior-grounded rewrites (read source first, then write) are the only acceptable form for any non-trivial package. The lazy template communicates nothing a future reader cannot grep for; it satisfies tooling without adding signal. + +**Consequence**: Every non-trivial package's doc.go now leads with the package's actual purpose, names key behaviors, calls out non-obvious design choices (Raft-lite, two-step indirection, idempotency contracts), and lists related packages with paths. New packages should follow the same shape. + +--- + +## [2026-04-14-010205] Bootstrap stays under ctx system bootstrap (reverted experimental top-level promotion) + +**Status**: Accepted + +**Context**: Mid-session promoted ctx bootstrap to top-level to make a stale CLAUDE.md instruction work. User reverted it and reaffirmed the original design. + +**Decision**: Bootstrap stays under ctx system bootstrap (reverted experimental top-level promotion) + +**Rationale**: The ctx system namespace is for agent and hook plumbing the user does not type by hand. Bootstrap is invoked by AI agents at session start; surfacing it at top-level pollutes ctx --help for humans without benefit. + +**Consequence**: internal/bootstrap/group.go reverted; internal/config/embed/cmd/system.go header now correctly states bootstrap is intentionally not promoted. The CLAUDE.md template across the repo (and the workspace copy) updated to reference ctx system bootstrap as canonical. + +--- + +## [2026-04-14-010205] Title Case style for docs is AP-leaning with explicit ambiguity carve-outs + +**Status**: Accepted + +**Context**: Needed a deterministic Title Case engine for headings and admonition titles across docs/. User precedent (Working with AI lowercase with) ruled out strict Chicago. + +**Decision**: Title Case style for docs is AP-leaning with explicit ambiguity carve-outs + +**Rationale**: AP lowercase prepositions regardless of length matches user-approved titles. But strict AP would lowercase ambiguous prep/conj/adv words like before, after, since, until, past, near, down, up, off, hurting common cases. Carve-outs leave them at default-cap and let the engine reach a sensible result for ~95 percent of headings without manual review. + +**Consequence**: hack/title-case-headings.py ships an AP-leaning with ambiguity carve-outs PREPOSITIONS set. Future style changes must touch that set explicitly with reasoning. New brand or acronym additions go through the same audited pattern. + +--- + ## [2026-04-13-153617] Walk boundary uses git as a hint, not a requirement **Status**: Accepted diff --git a/.context/LEARNINGS.md b/.context/LEARNINGS.md index d8df325bb..07653013e 100644 --- a/.context/LEARNINGS.md +++ b/.context/LEARNINGS.md @@ -17,6 +17,11 @@ DO NOT UPDATE FOR: | Date | Learning | |----|--------| +| 2026-04-14 | Constitution forbids context window as a deferral excuse | +| 2026-04-14 | docs/cli/system.md and embed/cmd/system.go diverged on bootstrap promotion intent | +| 2026-04-14 | Raft-lite trade-off is the load-bearing choice in internal/hub | +| 2026-04-14 | AST stutter test only checks FuncDecl, not GenDecl | +| 2026-04-14 | Brand-name handling in title-case engines must cover possessives | | 2026-04-13 | GPG signing from non-TTY contexts requires pinentry-mac (or equivalent) | | 2026-04-13 | Load average measures a queue, not CPU utilization | | 2026-04-13 | rc.ContextDir() is the single source of truth — fix the resolver, not callers | @@ -115,6 +120,56 @@ DO NOT UPDATE FOR: --- +## [2026-04-14-010134] Constitution forbids context window as a deferral excuse + +**Context**: Mid-session, agent proposed pacing through doc.go rewrites with the reasoning that context budget was tight. + +**Lesson**: The CONSTITUTION explicitly lists 'We are running out of context window' as a forbidden deferral phrase under No Excuse Generation. The rule is real and applies to agent self-pacing, not just user-facing answers. + +**Application**: When tempted to scope down because context is tight, re-read the constitution. The right move is to do the work end-to-end, not to ask the user which slice to skip. + +--- + +## [2026-04-14-010134] docs/cli/system.md and embed/cmd/system.go diverged on bootstrap promotion intent + +**Context**: Header comment in internal/config/embed/cmd/system.go claimed bootstrap was promoted to top-level; the bootstrap.go registration never actually promoted it. Two contradictory sources of truth coexisted silently. + +**Lesson**: Header-comment claims about command-tree structure are unaudited; they can drift from registrations without any test failing. Trust the code, not the comment. + +**Application**: When evaluating any package_name namespace cleanup type claim about command structure, verify against the actual cobra registration in internal/bootstrap/group.go before acting. + +--- + +## [2026-04-14-010134] Raft-lite trade-off is the load-bearing choice in internal/hub + +**Context**: Discovered while writing thorough doc.go for internal/hub. The package embeds HashiCorp Raft for leader election only; data replication is sequence-based gRPC sync over the append-only JSONL store. + +**Lesson**: A leader crash window between accept and replicate can lose the most recent write. Append-only storage plus idempotent clients make this acceptable; full Raft log replication would not be needed and would not be simpler. + +**Application**: Any future make hub stronger proposal must engage with this trade-off explicitly. Do not abandon Raft-lite accidentally by introducing log-replicated state; that would invalidate the simplicity argument. + +--- + +## [2026-04-14-010134] AST stutter test only checks FuncDecl, not GenDecl + +**Context**: tpl.TplEntryMarkdown stuttered for a long time because TestNoStutteryFunctions in internal/audit walks *ast.FuncDecl only; the constant slipped through. + +**Lesson**: The audit suite has a real coverage gap for *ast.GenDecl (consts, vars, types). Stuttery type/const names will not be caught until the audit is extended to walk those node kinds. + +**Application**: When a stuttery identifier is reported by a human, check both the offending file and whether the audit can catch it; if not, file an audit-extension task. + +--- + +## [2026-04-14-010105] Brand-name handling in title-case engines must cover possessives + +**Context**: First pass of hack/title-case-headings.py produced 'Ctx's' from 'ctx's' because the brand check matched the bare token only. + +**Lesson**: A brand allowlist needs to recognize , 's, s, and short apostrophe-suffixed variants. Single-word matching misses contractions and possessives. + +**Application**: When adding a new always-lowercase brand to hack/title-case-headings.py, extend the suffix-aware loop in title_case_word, not just the BRAND_LOWER set. + +--- + ## [2026-04-13-153618] GPG signing from non-TTY contexts requires pinentry-mac (or equivalent) **Context**: git commit failed from Claude Code's shell with 'gpg: signing diff --git a/.context/TASKS.md b/.context/TASKS.md index 1a112ca92..ed05b7ce8 100644 --- a/.context/TASKS.md +++ b/.context/TASKS.md @@ -2076,3 +2076,13 @@ disambiguates. `make test` (0 failures including the audit exempt-list update and the `gofmt` round-trip on `serve/cmd/root/cmd.go`). #added:2026-04-11 #pr:60 #done:2026-04-11 + +### Later + +- [ ] Optional follow-up doc.go pass: a handful of tiny per-subcommand wrappers under internal/cli/*/cmd/* still have ~5-line bodies. Most are accurate-but-brief; expand only if the brief form proves insufficient in review. #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 + +- [ ] Extend internal/audit/stuttery_functions_test.go to cover *ast.GenDecl (consts, vars, types). Current implementation walks *ast.FuncDecl only and missed tpl.TplEntryMarkdown (since renamed to HubEntryMarkdown). #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 + +- [ ] Decide whether to delete docs/cli/connect.md — verified dead duplicate of docs/cli/connection.md (uses old ctx connect command name; zero inbound references; not in zensical.toml). Awaiting explicit user OK before git rm. #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 + +- [-] PROMPT.md design — belongs in another project; skipped here. #session:4b37e2f6 #added:2026-04-14-010311 #skipped:2026-04-14 diff --git a/.ctxrc.dev b/.ctxrc.dev index 3a0c029e9..dc64d9748 100644 --- a/.ctxrc.dev +++ b/.ctxrc.dev @@ -53,7 +53,7 @@ key_rotation_days: 90 # Days before encryption key rotation nudge # --- Webhook notifications --- # Notifications are opt-in: nothing fires unless events are listed. -# Run `ctx notify setup` to configure the encrypted webhook URL first. +# Run `ctx hook notify setup` to configure the encrypted webhook URL first. # notify: events: @@ -73,5 +73,5 @@ notify: # qa-reminder — QA gate reminder emitted # block-non-path-ctx — blocked non-PATH ctx invocation # -# Note: `ctx notify test` always bypasses the event filter — no need to +# Note: `ctx hook notify test` always bypasses the event filter — no need to # list "test" here. It warns if filtered but sends anyway. diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 96172842d..3cf453f91 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -100,7 +100,7 @@ If `ctx` is installed, use these commands: ctx status # Context summary and health check ctx agent # AI-ready context packet ctx drift # Check for stale context -ctx recall list # Recent session history +ctx journal source --limit 5 # Recent session history ``` diff --git a/Makefile b/Makefile index 2f6e229a5..599038d0a 100644 --- a/Makefile +++ b/Makefile @@ -306,6 +306,14 @@ check-why: @diff -q docs/reference/design-invariants.md internal/assets/why/design-invariants.md || (echo "FAIL: design-invariants.md is stale — run 'make sync-why'" && exit 1) @echo "Why docs are in sync." +## title-case-check: Dry-run title-case checker on docs (or TARGET=path) +title-case-check: + @python3 hack/title-case-headings.py $${TARGET:-docs} + +## title-case-fix: Apply title-case fixes to headings + admonition titles (TARGET=path defaults to docs) +title-case-fix: + @python3 hack/title-case-headings.py --apply $${TARGET:-docs} + ## help: Show this help help: @echo "Context CLI - Available targets:" diff --git a/docs/blog/2026-01-27-building-ctx-using-ctx.md b/docs/blog/2026-01-27-building-ctx-using-ctx.md index 074d5aa81..74f531bc9 100644 --- a/docs/blog/2026-01-27-building-ctx-using-ctx.md +++ b/docs/blog/2026-01-27-building-ctx-using-ctx.md @@ -33,7 +33,7 @@ This is the story of `ctx`, how it evolved from a hasty "*YOLO mode*" experiment to a disciplined system for **persistent AI context**, and what I have learned along the way. -!!! info "Context is a Record" +!!! info "Context Is a Record" **Context** *is a* **persistent record**. By "*context*", I **don't** mean model memory or stored thoughts: @@ -165,7 +165,7 @@ The `git` history tells the story: 4f0e195 feat: separate orchestrator directive from agent tasks ``` -## YOLO Mode: Fast, But Dangerous +## YOLO Mode: Fast, but Dangerous The *Ralph Loop* made feature development *incredibly fast*. @@ -277,12 +277,12 @@ conventions...*) should go in to `CONVENTIONS.md`. Here's how `ctx` explained why the distinction was important: -!!! tip "Decision record, 2026-01-25" +!!! tip "Decision Record, 2026-01-25" Overly strict constitution creates friction and gets ignored. Conventions can be bent; constitution **cannot**. -## Hooks: Harder Than They Look +## Hooks: Harder than They Look Claude Code hooks seemed simple: Run a script before/after certain events. @@ -339,7 +339,7 @@ By the time of this writing this project's `ctx` sessions They are not part of the source code due to security, privacy, and size concerns. -!!! tip "Middle Ground: the Scratchpad" +!!! tip "Middle Ground: The Scratchpad" For sensitive notes that *do* need to travel with the project, `ctx pad` stores encrypted one-liners in git, and `ctx pad add "label" --file PATH` can ingest small files. @@ -470,7 +470,7 @@ The archive from January 23rd shows 13 phases of work: That's an impressive ^^173 commits** across **8 days** of development. -## What I Learned About AI-Assisted Development +## What I Learned about AI-Assisted Development **1. Memory changes everything** @@ -551,7 +551,7 @@ If you are reading this, chances are that you already have heard about `ctx`. [github.com/ActiveMemory/ctx](https://github.com/ActiveMemory/ctx), * and the documentation lives at [ctx.ist](https://ctx.ist). -!!! note "Session Records are a Gold Mine" +!!! note "Session Records Are a Gold Mine" By the time of this writing, I have **more than 70 megabytes** of **text-only** session capture, spread across >100 Markdown and `JSONL` files. diff --git a/docs/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release.md b/docs/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release.md index e369fde4f..8ecbfd9f2 100644 --- a/docs/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release.md +++ b/docs/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release.md @@ -22,11 +22,11 @@ topics: ![ctx](../images/ctx-banner.png) -## Digging Through the Past to Build the Future +## Digging through the Past to Build the Future *Jose Alekhinne / 2026-02-01* -!!! question "What if Your AI Could Remember Everything?" +!!! question "What If Your AI Could Remember Everything?" Not just the current session, but **every** session: * **Every** decision made, @@ -74,7 +74,7 @@ I found myself grepping through files to answer questions like: * "*What was the session where we fixed the hook regex?*" * "*How did the `embed.go` split actually happen?*" -!!! note "Fate is Whimsical" +!!! note "Fate Is Whimsical" The irony was **painful**: I built a tool to prevent AI amnesia, but I was suffering from @@ -114,7 +114,7 @@ them in a human-readable format: Slugs are auto-generated from session IDs (*memorable names instead of UUIDs*). The goal (*as the name implies*) is **recall**, not archival accuracy. -!!! note "2,121 lines of new code" +!!! note "2,121 Lines of New Code" The `ctx recall` feature was the largest single addition: parser library, CLI commands, test suite, and slash command. @@ -214,7 +214,7 @@ ctx add learning "CGO breaks ARM64 builds" \ --application "Added to Makefile and CI config" ``` -!!! quote "Structured entries are prompts to the AI" +!!! quote "Structured Entries Are Prompts to the AI" When the AI reads a decision with full context, rationale, and consequences, it understands the **why**, *not* just the **what**. @@ -250,7 +250,7 @@ always loaded first. The same structure serves two very different readers. -!!! tip "Reindex after manual edits" +!!! tip "Reindex After Manual Edits" If you edit entries by hand, rebuild the index with: ```bash @@ -301,7 +301,7 @@ a human can **reason** about. ### 2. Enforcement > Documentation -!!! quote "The Prompt is a Guideline" +!!! quote "The Prompt Is a Guideline" The code is more what you'd call '*guidelines*' than actual rules. -**Hector Barbossa** @@ -324,7 +324,7 @@ The journal system started as a way to understand `ctx` itself. It immediately became useful for everything else. -## v0.2.0 in The Numbers +## v0.2.0 in the Numbers This was a heavy release. The numbers reflect that: diff --git a/docs/blog/2026-02-01-refactoring-with-intent.md b/docs/blog/2026-02-01-refactoring-with-intent.md index 6c0cfc74f..f8ff81b70 100644 --- a/docs/blog/2026-02-01-refactoring-with-intent.md +++ b/docs/blog/2026-02-01-refactoring-with-intent.md @@ -196,7 +196,7 @@ This is where **v0.2.0** becomes more than a refactor. The biggest feature of this change window wasn't a refactor; it was the **journal system**. -!!! note "45 files changed, 1680 insertions" +!!! note "45 Files Changed, 1680 Insertions" This commit added the infrastructure for synthesizing AI session history into human-readable content. @@ -245,7 +245,7 @@ The work also introduced **thread safety** in the **recall parser** and centralized shared validation logic; removing duplication that had quietly spread during YOLO mode. -## I (Re)learned My Lessons +## I (Re)Learned My Lessons Similar to what I've learned in [the former human-assisted refactoring post][first-post], this diff --git a/docs/blog/2026-02-03-the-attention-budget.md b/docs/blog/2026-02-03-the-attention-budget.md index 724c33b20..0751ee251 100644 --- a/docs/blog/2026-02-03-the-attention-budget.md +++ b/docs/blog/2026-02-03-the-attention-budget.md @@ -166,7 +166,7 @@ Also noted: ~100 tokens (title-only summaries for overflow) The constraint is the feature: It enforces ruthless prioritization. -### Primitive 3: Indexes Over Full Content +### Primitive 3: Indexes over Full Content `DECISIONS.md` and `LEARNINGS.md` both include index sections: @@ -243,7 +243,7 @@ cat .context/sessions/... # Deep dive when needed Summaries **first**. Details: **on demand**. -## Quality Over Quantity +## Quality over Quantity Here is the counterintuitive part: **more context can make AI worse**. diff --git a/docs/blog/2026-02-04-skills-that-fight-the-platform.md b/docs/blog/2026-02-04-skills-that-fight-the-platform.md index f083accc1..eeca176f6 100644 --- a/docs/blog/2026-02-04-skills-that-fight-the-platform.md +++ b/docs/blog/2026-02-04-skills-that-fight-the-platform.md @@ -15,11 +15,11 @@ topics: ![ctx](../images/ctx-banner.png) -## When Your Custom Prompts Work Against You +## When Your Custom Prompts Work against You *Jose Alekhinne / 2026-02-04* -!!! question "Have You Ever Written a Skill that Made Your AI Worse?" +!!! question "Have You Ever Written a Skill That Made Your AI Worse?" You craft detailed instructions. You add examples. You build elaborate guardrails... @@ -62,7 +62,7 @@ Here is a partial overview of what's built in: **Skills should complement this, not compete with it.** -!!! tip "You are the Guest, not the Host" +!!! tip "You Are the Guest, Not the Host" Treat the system prompt like a kernel scheduler. You don't re-implement it in user space: @@ -119,7 +119,7 @@ Once judgment is suppressed, every other safeguard becomes **optional**. | "*Just this once*" | No exceptions | ``` -!!! danger "Judgment Suppression is Dangerous" +!!! danger "Judgment Suppression Is Dangerous" The **attack vector** structurally identical to **prompt injection**. It teaches the AI that its own judgment is wrong. @@ -238,7 +238,7 @@ Skills designed to activate on every interaction regardless of relevance. Universal triggers override the platform's **relevance matching**: The AI spends tokens on process overhead instead of the actual task. -!!! tip "`ctx` preserves relevance" +!!! tip "`ctx` Preserves Relevance" This is exactly the failure mode `ctx` exists to mitigate: Wasting attention budget on irrelevant process instead of diff --git a/docs/blog/2026-02-05-you-cant-import-expertise.md b/docs/blog/2026-02-05-you-cant-import-expertise.md index 9a723786d..879c6ef50 100644 --- a/docs/blog/2026-02-05-you-cant-import-expertise.md +++ b/docs/blog/2026-02-05-you-cant-import-expertise.md @@ -19,7 +19,7 @@ topics: *Jose Alekhinne / 2026-02-05* -!!! question "Have You Ever Dropped a Well-Crafted Template Into a Project and Had It Do... Nothing Useful?" +!!! question "Have You Ever Dropped a Well-Crafted Template into a Project and Had It Do... Nothing Useful?" * The template was **thorough**, * The structure was **sound**, * The advice was **correct**... @@ -40,7 +40,7 @@ Then I stopped, and applied `ctx`'s own evaluation framework: This post is about **why**. -!!! tip "It Is About Encoding Templates" +!!! tip "It Is about Encoding Templates" **Templates describe categories of problems.** **Expertise encodes which problems actually happen, and how often.** @@ -138,7 +138,7 @@ that competed with the AI's built-in capabilities. ## What the Skill Didn't Know -!!! tip "AI Without Context is Just a Corpus" +!!! tip "AI without Context Is Just a Corpus" * LLMs are optimized on insanely large **corpora**. * And then they are passed through several layers of human-assisted **refinement**. @@ -305,7 +305,7 @@ that **actually** drift in this project. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Frameworks travel. Expertise doesn't.** You can import structures, matrices, and workflows. diff --git a/docs/blog/2026-02-07-the-anatomy-of-a-skill-that-works.md b/docs/blog/2026-02-07-the-anatomy-of-a-skill-that-works.md index c11e6c32c..b3f31f06a 100644 --- a/docs/blog/2026-02-07-the-anatomy-of-a-skill-that-works.md +++ b/docs/blog/2026-02-07-the-anatomy-of-a-skill-that-works.md @@ -20,11 +20,11 @@ topics: ![ctx](../images/ctx-banner.png) -## What 20 Skill Rewrites Taught Me About Guiding AI +## What 20 Skill Rewrites Taught Me about Guiding AI *Jose Alekhinne / 2026-02-07* -!!! question "Why do some skills produce great results while others get ignored or produce garbage?" +!!! question "Why Do Some Skills Produce Great Results While Others Get Ignored or Produce Garbage?" I had 20 skills. Most were well-intentioned stubs: a description, a command to run, and a wish for the best. @@ -151,7 +151,7 @@ These are not just nice-to-have. They are **load-bearing**. Withoutthem, the agent will trigger the skill at the *wrong* time, produce *unwanted* output, and **erode the user's trust** in the skill system. -## Lesson 3: Examples Set Boundaries Better Than Rules +## Lesson 3: Examples Set Boundaries Better than Rules The most common failure mode of thin skills was not wrong behavior but *vague* behavior. The agent would do roughly the right thing, @@ -201,7 +201,7 @@ The agent does not have a notes app. It does not browse the web to find one. This instruction, clearly written for a human audience, was *dead weight* in a skill consumed by an AI. -!!! tip "Skills are for the Agents" +!!! tip "Skills Are for the Agents" **Every sentence in a skill should be actionable by the agent**. If the guidance requires human judgment or human tools, it belongs in @@ -322,7 +322,7 @@ checklists is a **tool**: the difference is not the content; it is **whether the agent can reliably execute it without human intervention**. -!!! tip "Skills are Interfaces" +!!! tip "Skills Are Interfaces" **Good skills are not instructions. They are contracts.**: * They **specify** preconditions, postconditions, and boundaries. @@ -331,7 +331,7 @@ intervention**. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Skills that work have bones, not just flesh.** Quality gates, negative triggers, examples, and checklists diff --git a/docs/blog/2026-02-08-not-everything-is-a-skill.md b/docs/blog/2026-02-08-not-everything-is-a-skill.md index e8a6502e5..db9db7780 100644 --- a/docs/blog/2026-02-08-not-everything-is-a-skill.md +++ b/docs/blog/2026-02-08-not-everything-is-a-skill.md @@ -20,11 +20,11 @@ topics: ![ctx](../images/ctx-banner.png) -## What a Codebase Audit Taught Me About Restraint +## What a Codebase Audit Taught Me about Restraint *Jose Alekhinne / 2026-02-08* -!!! question "When You Find a Useful Prompt, What Do You Do With It?" +!!! question "When You Find a Useful Prompt, What Do You Do with It?" My instinct was to make it a *skill*. I had just spent **three posts** explaining how to build skills that work. @@ -334,7 +334,7 @@ time: If yes, it is a skill. If no, it is a recipe. If you are not sure, it is a recipe until proven otherwise. -## This Mindset In the Context of `ctx` +## This Mindset in the Context of `ctx` `ctx` is a **tool** that gives AI agents persistent memory. Its purpose is **automation**: reducing the **friction** of context loading, session @@ -356,7 +356,7 @@ the right things and to make the rest easy to find when you need it. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **The best automation decision is sometimes not to automate.** A **runbook** in a Markdown file costs nothing until you use it. diff --git a/docs/blog/2026-02-09-defense-in-depth-securing-ai-agents.md b/docs/blog/2026-02-09-defense-in-depth-securing-ai-agents.md index ebcafe4b0..0855e5bfe 100644 --- a/docs/blog/2026-02-09-defense-in-depth-securing-ai-agents.md +++ b/docs/blog/2026-02-09-defense-in-depth-securing-ai-agents.md @@ -73,7 +73,7 @@ iteration `N`. **The agent can rewrite its own guardrails.** -## Five Layers, Each With a Hole +## Five Layers, Each with a Hole That's five nested layers of swiss cheese. Alone, each of them has large holes. Together, they create a **boundary**. @@ -102,7 +102,7 @@ The agent *usually* follows them. **Verdict**: Necessary. Not sufficient. Good for the common case. Never trust it as a security boundary. -### Layer 2: Application Controls (*Deterministic at Runtime, Mutable Across Iterations*) +### Layer 2: Application Controls (*Deterministic at Runtime, Mutable across Iterations*) Permission allowlists in `.claude/settings.local.json`: @@ -367,7 +367,7 @@ overnight. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Markdown is not a security boundary.** `CONSTITUTION.md` is a nudge. An allowlist is a gate. diff --git a/docs/blog/2026-02-12-how-deep-is-too-deep.md b/docs/blog/2026-02-12-how-deep-is-too-deep.md index f87543e3a..e7f157ade 100644 --- a/docs/blog/2026-02-12-how-deep-is-too-deep.md +++ b/docs/blog/2026-02-12-how-deep-is-too-deep.md @@ -19,7 +19,7 @@ topics: *Jose Alekhinne / 2026-02-12* -!!! question "Have You Ever Felt Like You Should Understand More of the Stack Beneath You?" +!!! question "Have You Ever Felt like You Should Understand More of the Stack beneath You?" You can talk about transformers at a whiteboard. You can explain attention to a colleague. @@ -316,7 +316,7 @@ loss function. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" Go deep enough to diagnose your failures. Stop before you are solving problems that do not propagate to your layer. diff --git a/docs/blog/2026-02-14-irc-as-context.md b/docs/blog/2026-02-14-irc-as-context.md index 4e5dd7718..ff0b07adb 100644 --- a/docs/blog/2026-02-14-irc-as-context.md +++ b/docs/blog/2026-02-14-irc-as-context.md @@ -89,7 +89,7 @@ Client sessions become **ephemeral**. Presence becomes **infrastructural**. -!!! tip "ZNC is tmux for IRC" +!!! tip "ZNC Is Tmux for IRC" * Close your laptop. * ZNC remains. @@ -100,7 +100,7 @@ This is **not** convenience; this is **continuity**. --- -## Presence Without Flapping +## Presence without Flapping With a bouncer: diff --git a/docs/blog/2026-02-14-parallel-agents-with-worktrees.md b/docs/blog/2026-02-14-parallel-agents-with-worktrees.md index 2e88f38a9..c940ee5dc 100644 --- a/docs/blog/2026-02-14-parallel-agents-with-worktrees.md +++ b/docs/blog/2026-02-14-parallel-agents-with-worktrees.md @@ -19,7 +19,7 @@ topics: *Jose Alekhinne / 2026-02-14* -!!! question "What Do You Do With 30 Open Tasks?" +!!! question "What Do You Do with 30 Open Tasks?" You could work through them one at a time. One agent, one branch, one commit stream. @@ -274,7 +274,7 @@ The hard part is not the `git` commands; it is the **discipline**: --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Partition by blast radius, not by priority.** Two tasks that touch the same files belong in the same track, diff --git a/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md b/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md index f7853149f..db0eb5735 100644 --- a/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md +++ b/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md @@ -19,7 +19,7 @@ topics: *Jose Alekhinne / February 15, 2026* -!!! question "What Does a Release Look Like When Most of the Work Is Invisible?" +!!! question "What Does a Release Look like When Most of the Work Is Invisible?" No new headline feature. No architectural pivot. No rewrite. Just **35+ documentation and quality commits** against **~15 feature diff --git a/docs/blog/2026-02-15-why-zensical.md b/docs/blog/2026-02-15-why-zensical.md index 5caa9e32b..b52852fde 100644 --- a/docs/blog/2026-02-15-why-zensical.md +++ b/docs/blog/2026-02-15-why-zensical.md @@ -15,7 +15,7 @@ topics: ![ctx](../images/ctx-banner.png) -## Why ctx's Journal Site Runs on a v0.0.21 Tool +## Why `ctx`'s Journal Site Runs on a v0.0.21 Tool *Jose Alekhinne / 2026-02-15* @@ -329,7 +329,7 @@ This is the same kind of decision that shows up throughout `ctx`: --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Version numbers measure where a project has been.** **The team and the architecture tell you where it's going.** diff --git a/docs/blog/2026-02-17-code-is-cheap-judgment-is-not.md b/docs/blog/2026-02-17-code-is-cheap-judgment-is-not.md index 677844094..b356ad609 100644 --- a/docs/blog/2026-02-17-code-is-cheap-judgment-is-not.md +++ b/docs/blog/2026-02-17-code-is-cheap-judgment-is-not.md @@ -25,7 +25,7 @@ topics: *Jose Alekhinne / February 17, 2026* -!!! question "Are You Worried About AI Taking Your Job?" +!!! question "Are You Worried about AI Taking Your Job?" You might be confusing the thing that's *cheap* with the thing that's **valuable**. @@ -116,7 +116,7 @@ accountable. --- -## The Evidence From Building ctx +## The Evidence from Building ctx I did **not** arrive at this conclusion theoretically. @@ -306,7 +306,7 @@ the *replacement*: --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Code is cheap. Judgment is not.** AI replaces unstructured effort, not directed expertise. The diff --git a/docs/blog/2026-02-17-context-as-infrastructure.md b/docs/blog/2026-02-17-context-as-infrastructure.md index ec0e625d9..c0c20ac43 100644 --- a/docs/blog/2026-02-17-context-as-infrastructure.md +++ b/docs/blog/2026-02-17-context-as-infrastructure.md @@ -25,7 +25,7 @@ topics: *Jose Alekhinne / February 17, 2026* -!!! question "Where does your AI's knowledge live between sessions?" +!!! question "Where Does Your AI's Knowledge Live between Sessions?" If the answer is "in a prompt I paste at the start," you are treating context as a **consumable**. Something assembled, used, and discarded. @@ -392,7 +392,7 @@ The tool is a convenience: **The principles are what matter**. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Prompts are conversations. Infrastructure persists.** Your AI does not need a better prompt. It needs a filesystem: diff --git a/docs/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress.md b/docs/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress.md index 6f18a5be5..42ec905eb 100644 --- a/docs/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress.md +++ b/docs/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress.md @@ -15,11 +15,11 @@ topics: ![ctx](../images/ctx-banner.png) -## When the Screen Looks Like Progress +## When the Screen Looks like Progress *Jose Alekhinne / 2026-02-17* -!!! question "How Many Terminals Are too Many?" +!!! question "How Many Terminals Are Too Many?" You discover agents can run in parallel. So you open ten... @@ -135,7 +135,7 @@ Real parallelism comes from **task topology**, not from tooling. * Four agents editing the same implementation surface -!!! tip "Context is the Boundary" +!!! tip "Context Is the Boundary" * The goal is **not** to keep agents busy. * The goal is to keep **contexts isolated**. @@ -343,7 +343,7 @@ A well-written task description that takes 50 tokens outperforms a rambling one that takes 200: **Not** just because it is cheaper, but because it leaves more **headroom** for the model to actually **think**. -!!! tip "Literature is NOT Overrated" +!!! tip "Literature Is NOT Overrated" * Attention is a **finite** budget. * **Language** determines how fast you spend it. @@ -382,7 +382,7 @@ Every post has arrived (*and made me converge*) at the same answer so far: --- -## `ctx` Was Never About Spawning More Minds +## `ctx` Was Never about Spawning More Minds `ctx` is about: @@ -426,7 +426,7 @@ This is *slower* to watch. **Faster** to ship. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Progress is not what the machine produces while you sleep.** **Progress is what survives contact with the main branch.** diff --git a/docs/blog/2026-02-17-the-3-1-ratio.md b/docs/blog/2026-02-17-the-3-1-ratio.md index d6f855b7b..65f365b7a 100644 --- a/docs/blog/2026-02-17-the-3-1-ratio.md +++ b/docs/blog/2026-02-17-the-3-1-ratio.md @@ -25,7 +25,7 @@ topics: *Jose Alekhinne / February 17, 2026* -!!! question "How often should you stop building and start cleaning?" +!!! question "How Often Should You Stop Building and Start Cleaning?" Every developer knows technical debt exists. Every developer postpones dealing with it. @@ -334,7 +334,7 @@ already paid the compounding cost. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" **Three sessions of building. One session of cleaning.** **Not** because the code is dirty, **but** because drift compounds @@ -345,7 +345,7 @@ already paid the compounding cost. --- -## The Arc So Far +## The Arc so Far This post sits at a crossroads in the `ctx` story. Looking back: diff --git a/docs/blog/2026-02-17-when-a-system-starts-explaining-itself.md b/docs/blog/2026-02-17-when-a-system-starts-explaining-itself.md index 8a661e2bc..4030e9709 100644 --- a/docs/blog/2026-02-17-when-a-system-starts-explaining-itself.md +++ b/docs/blog/2026-02-17-when-a-system-starts-explaining-itself.md @@ -94,7 +94,7 @@ The question is correct. The category is wrong. * Skills live in people. * Infrastructure lives in the environment. -!!! tip "`ctx` Is not a Skill: It is a Form of Relief" +!!! tip "`ctx` Is Not a Skill: It Is a Form of Relief" What early adopters experience is **not** an ability. **It is the removal of a cognitive constraint**. @@ -303,7 +303,7 @@ a **shared infrastructure for thought**. --- -!!! quote "**If You Remember One Thing From This Post...**" +!!! quote "**If You Remember One Thing from This Post...**" You do not know a substrate is real when people praise it. You know it is real when: diff --git a/docs/blog/2026-02-25-the-homework-problem.md b/docs/blog/2026-02-25-the-homework-problem.md index 4370af9c4..1c27f4636 100644 --- a/docs/blog/2026-02-25-the-homework-problem.md +++ b/docs/blog/2026-02-25-the-homework-problem.md @@ -233,7 +233,7 @@ does the heavy lifting here: Without the relay requirement, the agent would silently rationalize skipping. With it, skipping becomes a **visible, auditable decision** that the user can override. -### The Compliance Canary +### The Compliance Canary Here's the design insight that only became clear after watching it work across multiple sessions: **the relay block is a compliance canary**. @@ -640,7 +640,7 @@ context at session start**. The [Prompting Guide](../home/prompting-guide.md) applies this insight directly: Scope constraints, verification commands, and the reliability checklist are all **one-hop**, moment-of-action patterns. -### 2. Delegation chains decay +### 2. Delegation Chains Decay Every hop in an instruction chain loses authority: diff --git a/docs/blog/2026-03-04-agent-memory-is-infrastructure.md b/docs/blog/2026-03-04-agent-memory-is-infrastructure.md index 46bea17f2..d40daa54b 100644 --- a/docs/blog/2026-03-04-agent-memory-is-infrastructure.md +++ b/docs/blog/2026-03-04-agent-memory-is-infrastructure.md @@ -244,7 +244,7 @@ That's the test. That's the difference. --- -## What Gets Lost Without Infrastructure Memory +## What Gets Lost without Infrastructure Memory Consider the knowledge that accumulates around a non-trivial project: @@ -410,7 +410,7 @@ The project's memory is an infrastructure problem. And **infrastructure belongs in the repository**. -!!! quote "If You Remember One Thing From This Post..." +!!! quote "If You Remember One Thing from This Post..." **Prompts are conversations: Infrastructure persists.** Your AI doesn't need a better notepad. It needs a filesystem: diff --git a/docs/blog/2026-03-23-we-broke-the-3-1-rule.md b/docs/blog/2026-03-23-we-broke-the-3-1-rule.md index ba7f84004..7363848ca 100644 --- a/docs/blog/2026-03-23-we-broke-the-3-1-rule.md +++ b/docs/blog/2026-03-23-we-broke-the-3-1-rule.md @@ -216,7 +216,7 @@ was the same; the experience was worse. felt productive. But the codebase on March 5 was harder to modify than the codebase on February 16, despite having more features. -!!! tip "Speed Without Structure" +!!! tip "Speed without Structure" Speed without structure is negative progress. **Agents amplify both building and debt**: The same AI that can diff --git a/docs/blog/2026-04-02-code-structure-as-an-agent-interface.md b/docs/blog/2026-04-02-code-structure-as-an-agent-interface.md index 994fe41b7..d5ba41776 100644 --- a/docs/blog/2026-04-02-code-structure-as-an-agent-interface.md +++ b/docs/blog/2026-04-02-code-structure-as-an-agent-interface.md @@ -20,7 +20,7 @@ topics: # Code Structure as an Agent Interface -## What 19 AST Tests Taught Us About Agent-Readable Code +## What 19 AST Tests Taught Us about Agent-Readable Code ![ctx](../images/ctx-banner.png) diff --git a/docs/cli/config.md b/docs/cli/config.md index 0f5326590..b60479add 100644 --- a/docs/cli/config.md +++ b/docs/cli/config.md @@ -8,8 +8,8 @@ title: Config icon: lucide/settings-2 --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ### `ctx config` diff --git a/docs/cli/connect.md b/docs/cli/connect.md index 5ffe3a6fa..4cdc41850 100644 --- a/docs/cli/connect.md +++ b/docs/cli/connect.md @@ -9,6 +9,8 @@ title: Connect icon: lucide/link --- +![ctx](../images/ctx-banner.png) + ## `ctx connect` Connect a project to a `ctx` Hub for cross-project @@ -16,7 +18,7 @@ knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context. -!!! tip "New to the hub?" +!!! tip "New to the Hub?" Start with the [`ctx` Hub overview](../recipes/hub-overview.md) for the mental model (what the hub is, who it's for, what it is @@ -92,7 +94,7 @@ Show hub connection state and entry statistics. ctx connect status ``` -## Automatic sharing +## Automatic Sharing Use `--share` on `ctx add` to write locally AND publish to the hub: @@ -107,13 +109,13 @@ If the hub is unreachable, the local write succeeds and a warning is printed. The `--share` flag is best-effort — it never blocks local context updates. -## Auto-sync +## Auto-Sync Once registered, the `check-hub-sync` hook automatically syncs new entries from the hub at the start of each session (daily throttled). No manual `ctx connect sync` needed. -## Shared files +## Shared Files Entries from the hub are stored in `.context/hub/`: @@ -128,7 +130,7 @@ Entries from the hub are stored in `.context/hub/`: These files are read-only (managed by sync/listen) and never mixed with local context files. -## Agent integration +## Agent Integration Include shared knowledge in agent context packets: diff --git a/docs/cli/connection.md b/docs/cli/connection.md index 1d9dd7af0..41093382e 100644 --- a/docs/cli/connection.md +++ b/docs/cli/connection.md @@ -8,8 +8,8 @@ title: Connect icon: lucide/link --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ## `ctx connect` @@ -107,7 +107,7 @@ Show ctx Hub connection state and entry statistics. ctx connection status ``` -## Automatic sharing +## Automatic Sharing Use `--share` on `ctx add` to write locally AND publish to the ctx Hub: @@ -122,13 +122,13 @@ If the hub is unreachable, the local write succeeds and a warning is printed. The `--share` flag is best-effort — it never blocks local context updates. -## Auto-sync +## Auto-Sync Once registered, the `check-hub-sync` hook automatically syncs new entries from the ctx Hub at the start of each session (daily throttled). No manual `ctx connection sync` needed. -## Shared files +## Shared Files Entries from the ctx Hub are stored in `.context/hub/`: @@ -143,7 +143,7 @@ Entries from the ctx Hub are stored in `.context/hub/`: These files are read-only (managed by sync/listen) and never mixed with local context files. -## Agent integration +## Agent Integration Include shared knowledge in agent context packets: diff --git a/docs/cli/context.md b/docs/cli/context.md index ad0315d10..0288858e2 100644 --- a/docs/cli/context.md +++ b/docs/cli/context.md @@ -8,8 +8,8 @@ title: Context Management icon: lucide/layers --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ### `ctx add` diff --git a/docs/cli/hub.md b/docs/cli/hub.md index 85662360f..cf200eab3 100644 --- a/docs/cli/hub.md +++ b/docs/cli/hub.md @@ -19,7 +19,7 @@ projects. Use `ctx hub` to start and stop the server, inspect cluster state, add or remove peers at runtime, and hand off leadership before maintenance. -!!! tip "Who needs this page" +!!! tip "Who Needs This Page" You only need `ctx hub` if you are **running** a hub server or cluster. For client-side operations (register, subscribe, sync, publish, listen), see @@ -47,7 +47,7 @@ client projects. Subsequent runs reuse the stored token from **Default data directory**: `~/.ctx/hub-data/` -#### Daemon mode +#### Daemon Mode Run the hub as a detached background process: @@ -59,7 +59,7 @@ ctx hub stop # Graceful shutdown The daemon writes a PID file to `/hub.pid`. Stop the daemon with `ctx hub stop` (see below). -#### Cluster mode +#### Cluster Mode For high availability, run multiple hubs with Raft-based leader election: @@ -146,7 +146,7 @@ maintenance. ctx hub stepdown ``` -### See also +### See Also - [`ctx connect`](connection.md) — client-side commands (register, subscribe, sync, publish, listen) diff --git a/docs/cli/index.md b/docs/cli/index.md index dc10e8a8e..89507128f 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -45,37 +45,37 @@ Commands that work before initialization: `ctx init`, `ctx setup`, | [`ctx init`](init-status.md#ctx-init) | Initialize `.context/` directory with templates | | [`ctx status`](init-status.md#ctx-status) | Show context summary (files, tokens, drift) | | [`ctx guide`](guide.md#ctx-guide) | Quick-reference cheat sheet | +| [`ctx why`](why.md#ctx-why) | Read the philosophy behind `ctx` | ## Context | Command | Description | |-----------------------------------------------|----------------------------------------------------------| | [`ctx add`](context.md#ctx-add) | Add a task, decision, learning, or convention | -| [`ctx load`](init-status.md#ctx-load) | Output assembled context in read order | -| [`ctx agent`](init-status.md#ctx-agent) | Print token-budgeted context packet for AI consumption | -| [`ctx skill`](skill.md#ctx-skill) | Manage reusable instruction bundles | +| [`ctx load`](context.md#ctx-load) | Output assembled context in read order | +| [`ctx agent`](context.md#ctx-agent) | Print token-budgeted context packet for AI consumption | | [`ctx sync`](context.md#ctx-sync) | Reconcile context with codebase state | | [`ctx drift`](context.md#ctx-drift) | Detect stale paths, secrets, missing files | | [`ctx compact`](context.md#ctx-compact) | Archive completed tasks, clean up files | | [`ctx fmt`](context.md#ctx-fmt) | Format context files to 80-char line width | - -## Artifacts - -| Command | Description | -|-----------------------------------------------|----------------------------------------------------------| | [`ctx decision`](context.md#ctx-decision) | Manage `DECISIONS.md` (reindex) | | [`ctx learning`](context.md#ctx-learning) | Manage `LEARNINGS.md` (reindex) | | [`ctx task`](context.md#ctx-task) | Task completion, archival, and snapshots | | [`ctx reindex`](context.md#ctx-reindex) | Regenerate indices for `DECISIONS.md` and `LEARNINGS.md` | +| [`ctx permission`](context.md#ctx-permission) | Permission snapshots (golden image) | +| [`ctx change`](change.md#ctx-change) | Show what changed since last session | +| [`ctx memory`](memory.md#ctx-memory) | Bridge Claude Code auto memory into `.context/` | +| [`ctx watch`](watch.md#ctx-watch) | Auto-apply context updates from AI output | ## Sessions | Command | Description | |-----------------------------------------------|----------------------------------------------------------| | [`ctx journal`](journal.md#ctx-journal) | Browse, import, enrich, and lock session history | -| [`ctx memory`](memory.md#ctx-memory) | Bridge Claude Code auto memory into `.context/` | -| [`ctx remind`](remind.md#ctx-remind) | Session-scoped reminders that surface at session start | | [`ctx pad`](pad.md#ctx-pad) | Encrypted scratchpad for sensitive one-liners | +| [`ctx remind`](remind.md#ctx-remind) | Session-scoped reminders that surface at session start | +| [`ctx hook pause`](pause.md) | Pause context hooks for the current session | +| [`ctx hook resume`](resume.md) | Resume paused context hooks | ## Integrations @@ -84,22 +84,22 @@ Commands that work before initialization: `ctx init`, `ctx setup`, | [`ctx setup`](setup.md#ctx-setup) | Generate AI tool integration configs | | [`ctx steering`](steering.md#ctx-steering) | Manage steering files (behavioral rules for AI tools) | | [`ctx trigger`](trigger.md#ctx-trigger) | Manage lifecycle triggers (scripts for automation) | -| [`ctx serve`](serve.md#ctx-serve) | Serve a static site locally via zensical | -| [`ctx hub`](hub.md#ctx-hub) | Operate a ctx Hub server or cluster | -| [`ctx connection`](connection.md#ctx-connection) | Connect to a ctx Hub | -| [`ctx mcp`](mcp.md#ctx-mcp) | MCP server for AI tool integration (stdin/stdout) | -| [`ctx watch`](watch.md#ctx-watch) | Auto-apply context updates from AI output | +| [`ctx skill`](skill.md#ctx-skill) | Manage reusable instruction bundles | +| [`ctx mcp`](mcp.md#ctx-mcp) | MCP server for AI tool integration (stdin/stdout) | +| [`ctx hook notify`](notify.md) | Webhook notifications (setup, test, send) | | [`ctx loop`](loop.md#ctx-loop) | Generate autonomous loop script | +| [`ctx connection`](connection.md#ctx-connection) | Client-side commands for connecting to a `ctx` Hub | +| [`ctx hub`](hub.md#ctx-hub) | Operate a `ctx` Hub server or cluster | +| [`ctx serve`](serve.md#ctx-serve) | Serve a static site locally via zensical | +| [`ctx site`](site.md#ctx-site) | Site management (feed generation) | ## Diagnostics | Command | Description | |-----------------------------------------------|----------------------------------------------------------| | [`ctx doctor`](doctor.md#ctx-doctor) | Structural health check (hooks, drift, config) | -| [`ctx change`](change.md#ctx-change) | Show what changed since last session | -| [`ctx why`](why.md#ctx-why) | Read the philosophy behind `ctx` | | [`ctx trace`](trace.md#ctx-trace) | Show context behind git commits | -| [`ctx sysinfo`](sysinfo.md#ctx-sysinfo) | Show system resource usage (memory, swap, disk, load) | +| [`ctx sysinfo`](sysinfo.md#ctx-sysinfo) | Show system resource usage (memory, swap, disk, load) | | [`ctx usage`](usage.md#ctx-usage) | Show session token usage stats | ## Runtime @@ -107,10 +107,10 @@ Commands that work before initialization: `ctx init`, `ctx setup`, | Command | Description | |-----------------------------------------------|----------------------------------------------------------| | [`ctx config`](config.md#ctx-config) | Manage runtime configuration profiles | -| [`ctx permission`](context.md#ctx-permission) | Permission snapshots (golden image) | -| [`ctx hook`](hook.md#ctx-hook) | Hook message, notification, and lifecycle controls | | [`ctx backup`](backup.md#ctx-backup) | Back up context and Claude data to tar.gz / SMB | | [`ctx prune`](prune.md#ctx-prune) | Clean stale per-session state files | +| [`ctx hook`](hook.md#ctx-hook) | Hook message, notification, and lifecycle controls | +| [`ctx system`](system.md#ctx-system) | Hook plumbing and agent-only commands (not user-facing) | ## Shell @@ -118,13 +118,6 @@ Commands that work before initialization: `ctx init`, `ctx setup`, |-----------------------------------------------|----------------------------------------------------------| | [`ctx completion`](completion.md#ctx-completion) | Generate shell autocompletion scripts | -## Hidden - -| Command | Description | -|-----------------------------------------------|----------------------------------------------------------| -| [`ctx site`](site.md#ctx-site) | Site management (feed generation) | -| [`ctx system`](system.md#ctx-system) | Hook plumbing and agent-only commands (not user-facing) | - --- ## Exit Codes diff --git a/docs/cli/journal.md b/docs/cli/journal.md index dc20b9c24..28f8b3f31 100644 --- a/docs/cli/journal.md +++ b/docs/cli/journal.md @@ -8,8 +8,8 @@ title: Journal icon: lucide/history --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ### `ctx journal` diff --git a/docs/cli/serve.md b/docs/cli/serve.md index 29760da36..3625c6148 100644 --- a/docs/cli/serve.md +++ b/docs/cli/serve.md @@ -25,7 +25,7 @@ ctx serve ./my-site # Serve a specific directory ctx serve ./docs # Serve any zensical site ``` -!!! info "This command does NOT start a hub" +!!! info "This Command Does NOT Start a Hub" `ctx serve` is purely for static-site serving. To run a `ctx` Hub for cross-project knowledge sharing, use [`ctx hub start`](hub.md). That command lives in its @@ -55,7 +55,7 @@ ctx serve ./my-site # Serve a specific directory ctx serve ./docs # Serve any zensical site ``` -### See also +### See Also - [`ctx journal`](journal.md) — generate the journal site that `ctx serve` displays. diff --git a/docs/cli/setup.md b/docs/cli/setup.md index daaedfac2..6a7c291f4 100644 --- a/docs/cli/setup.md +++ b/docs/cli/setup.md @@ -37,7 +37,7 @@ ctx setup [flags] | `copilot` | GitHub Copilot | | `windsurf` | Windsurf IDE | -!!! note "Claude Code uses the plugin system" +!!! note "Claude Code Uses the Plugin System" Claude Code integration is now provided via the `ctx` plugin. Running `ctx setup claude-code` prints plugin install instructions. diff --git a/docs/cli/steering.md b/docs/cli/steering.md index 6f4734592..38e6eda99 100644 --- a/docs/cli/steering.md +++ b/docs/cli/steering.md @@ -27,7 +27,7 @@ prompt, and syncs them out to each AI tool's native format ctx steering ``` -!!! tip "Steering vs decisions vs conventions" +!!! tip "Steering vs Decisions vs Conventions" The three look similar on disk but serve different purposes: - **Decisions** record *what* was chosen and *why*. @@ -42,7 +42,7 @@ ctx steering If you find yourself writing "the AI should always do X" — that belongs in steering, not decisions. -### Anatomy of a steering file +### Anatomy of a Steering File ```yaml --- @@ -165,7 +165,7 @@ each matching steering file to the appropriate directory with tool-specific frontmatter transforms. Unchanged files are skipped (idempotent). -### How Claude Code and Codex consume steering +### How Claude Code and Codex Consume Steering Claude Code has no native "steering files" primitive, so `ctx steering sync` skips it entirely. Instead, steering @@ -237,7 +237,7 @@ file. covers Claude Code. For rules you need to fire automatically on both, use `inclusion: always`. -### `ctx agent` integration +### `ctx agent` Integration When `ctx agent` builds a context packet, steering files are loaded as Tier 6 of the budget-aware assembly (see @@ -246,7 +246,7 @@ loaded as Tier 6 of the budget-aware assembly (see scored against the current prompt and included in priority order until the tier budget is exhausted. -### See also +### See Also - [`ctx setup`](setup.md) — configure which tools receive steering syncs diff --git a/docs/cli/system.md b/docs/cli/system.md index eb17b0292..819df2e11 100644 --- a/docs/cli/system.md +++ b/docs/cli/system.md @@ -11,7 +11,6 @@ icon: lucide/settings ![ctx](../images/ctx-banner.png) - ### `ctx system` Hidden parent command that hosts Claude Code hook plumbing and a small @@ -23,7 +22,7 @@ integrations. The parent is registered without a visible group in ctx system ``` -!!! note "Commands previously under `ctx system`" +!!! note "Commands Previously under `ctx system`" Several user-facing maintenance commands used to live under `ctx system` and were promoted to top-level: @@ -38,7 +37,7 @@ ctx system agent-only command. Update any scripts or personal docs that reference the old paths. -## Plumbing subcommands +## Plumbing Subcommands These are not hook handlers — they're called by skills and editor integrations during the session lifecycle. Safe to run manually. @@ -104,7 +103,7 @@ ctx system session-event --type start --caller vscode ctx system session-event --type end --caller vscode ``` -## Hook subcommands +## Hook Subcommands Hidden Claude Code hook handlers implementing the hook contract: read JSON from stdin, perform logic, emit output on stdout, exit 0. Block diff --git a/docs/cli/trace.md b/docs/cli/trace.md index 4bfe93005..f37a69c75 100644 --- a/docs/cli/trace.md +++ b/docs/cli/trace.md @@ -8,8 +8,8 @@ title: Commit Context Tracing icon: lucide/git-commit-horizontal --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ### `ctx trace` diff --git a/docs/cli/trigger.md b/docs/cli/trigger.md index 024a13fcd..a826b3582 100644 --- a/docs/cli/trigger.md +++ b/docs/cli/trigger.md @@ -23,7 +23,7 @@ events. ctx trigger ``` -!!! warning "Triggers execute arbitrary scripts" +!!! warning "Triggers Execute Arbitrary Scripts" A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like @@ -31,7 +31,7 @@ ctx trigger understand. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data. -### Where triggers live +### Where Triggers Live Triggers live in `.context/hooks//` as executable scripts. The on-disk directory name is still `hooks/` for @@ -53,7 +53,7 @@ Each script: └── record-edit.sh ``` -### Trigger types +### Trigger Types | Type | Fires when | |-----------------|--------------------------------------| @@ -64,7 +64,7 @@ Each script: | `file-save` | When a file is saved | | `context-add` | When a context entry is added | -### Input and output contract +### Input and Output Contract Each trigger receives a JSON object on stdin with the event details. Minimal contract (fields vary by trigger type): @@ -189,7 +189,7 @@ ctx trigger disable inject-context # Disabled .context/hooks/session-start/inject-context.sh ``` -### Three hooking concepts in ctx — don't confuse them +### Three Hooking Concepts in ctx — Don't Confuse Them This is a common source of confusion. `ctx` has three distinct hook-like layers, and they serve different purposes: @@ -207,7 +207,7 @@ be portable across tools. `ctx system` hooks are not something you author — they're the internal nudge machinery that ships with ctx. -### See also +### See Also - [`ctx steering`](steering.md) — persistent AI behavioral rules (a different concept; rules vs scripts) diff --git a/docs/home/common-workflows.md b/docs/home/common-workflows.md index a09ac82bd..1fa7e51c4 100644 --- a/docs/home/common-workflows.md +++ b/docs/home/common-workflows.md @@ -24,7 +24,7 @@ For deeper, step-by-step guides, see [Recipes](../recipes/index.md). ## Track Context -!!! tip "Prefer skills over raw commands" +!!! tip "Prefer Skills over Raw Commands" When working with an AI agent, use `/ctx-task-add`, `/ctx-decision-add`, or `/ctx-learning-add` instead of raw `ctx add` commands. The agent automatically picks up session ID, @@ -141,7 +141,7 @@ Open [http://localhost:8000](http://localhost:8000) to browse. To update after new sessions, run the same two commands again. -### Safe By Default +### Safe by Default `ctx journal import --all` is **safe by default**: diff --git a/docs/home/configuration.md b/docs/home/configuration.md index 528951b68..58518c04b 100644 --- a/docs/home/configuration.md +++ b/docs/home/configuration.md @@ -53,7 +53,7 @@ per-project. via `ctx config switch dev` / `ctx config switch base`. See [Contributing: Configuration Profiles](contributing.md#configuration-profiles). -!!! tip "Using a Different .context Directory" +!!! tip "Using a Different .Context Directory" The default `.context/` directory can be changed per-project via the `context_dir` key in `.ctxrc`, the `CTX_DIR` environment variable, or the `--context-dir` CLI flag. diff --git a/docs/home/contributing.md b/docs/home/contributing.md index 33e7d44a7..c61094b18 100644 --- a/docs/home/contributing.md +++ b/docs/home/contributing.md @@ -153,7 +153,7 @@ and are now available to all ctx users: `/ctx-brainstorm`, `/ctx-link-check`, ---- -## How To Add Things +## How to Add Things ### Adding a New CLI Command @@ -214,7 +214,7 @@ internal/err/config/config.go # errors for configuration internal/err/cli/cli.go # errors for CLI argument validation ``` -#### Config constants: `internal/config/` +#### Config Constants: `internal/config/` Pure-constant leaf packages with zero internal dependencies (stdlib only). Over 60 sub-packages, organized by domain. See @@ -230,7 +230,7 @@ only). Over 60 sub-packages, organized by domain. See | User-facing text YAML keys | `config/embed/text/.go` | | Time durations, thresholds | `config//` | -#### The assets pipeline +#### The Assets Pipeline User-facing text flows through a three-level chain: @@ -258,7 +258,7 @@ new AI tool (e.g. Aider, Cursor): Pattern to follow: the Claude Code JSONL parser in `internal/journal/parser/`. -!!! note "Multilingual session headers" +!!! note "Multilingual Session Headers" The Markdown parser recognizes session header prefixes configured via `session_prefixes` in `.ctxrc` (default: `Session:`). To support a new language, users add a prefix to their `.ctxrc` - no code change needed. @@ -319,7 +319,7 @@ make plugin-reload # nukes ~/.claude/plugins/cache/activememory-ctx/ The plugin will be re-installed from your local marketplace on startup. No version bump is needed during development. -!!! tip "Version bumps are for releases, not iteration" +!!! tip "Version Bumps Are for Releases, Not Iteration" Only bump `VERSION`, `plugin.json`, and `marketplace.json` when cutting a release. During development, `make plugin-reload` is all you need. diff --git a/docs/home/faq.md b/docs/home/faq.md index aa472d143..e0c98e7a6 100644 --- a/docs/home/faq.md +++ b/docs/home/faq.md @@ -19,7 +19,7 @@ terminal, a browser, or a code review. There's no schema to learn, no binary format to decode, no vendor lock-in. You can inspect your context with `cat`, diff it with `git diff`, and review it in a PR. -## Does ctx work offline? +## Does `ctx` Work Offline? Yes. ctx is completely local. It reads and writes files on disk, generates context packets from local state, and requires no network @@ -27,7 +27,7 @@ access. The only feature that touches the network is the optional [webhook notifications](../recipes/webhook-notifications.md) hook, which you have to explicitly configure. -## What gets committed to git? +## What Gets Committed to Git? The `.context/` directory: yes, commit it. That's the whole point. Team members and AI agents read the same context files. @@ -42,7 +42,7 @@ What **not** to commit: commit if you want shared scratchpad state. See [Scratchpad](../reference/scratchpad.md) for details. -## How big should my token budget be? +## How Big Should My Token Budget Be? The default is 8000 tokens, which works well for most projects. Configure it via `.ctxrc` or the `CTX_TOKEN_BUDGET` environment @@ -65,7 +65,7 @@ content first, so CONSTITUTION and TASKS always make the cut. See [Configuration](configuration.md) for all available settings. -## Why not a database? +## Why Not a Database? Files are inspectable, diffable, and reviewable in pull requests. You can `grep` them, `cat` them, pipe them through `jq` or `awk`. @@ -75,7 +75,7 @@ A database would add a dependency, require migrations, and make context opaque. The design bet is that context should be as visible and portable as the code it describes. -## Does it work with tools other than Claude Code? +## Does It Work with Tools Other than Claude Code? Yes. `ctx agent` outputs a context packet that any AI tool can consume: paste it into ChatGPT, Cursor, Copilot, Aider, or anything @@ -89,7 +89,7 @@ instruction files or manual pasting. See [Integrations](../operations/integrations.md) for tool-specific setup, including the [multi-tool recipe](../recipes/multi-tool-setup.md). -## Can I use ctx on an existing project? +## Can I Use `ctx` on an Existing Project? Yes. Run `ctx init` in any repo and it creates `.context/` with template files. Start recording decisions, tasks, and conventions as @@ -100,7 +100,7 @@ See [Getting Started](getting-started.md) for the full setup flow, or [Joining a ctx Project](joining-a-project.md) if someone else already initialized it. -## What happens when context files get too big? +## What Happens When Context Files Get Too Big? Token budgeting handles this automatically. `ctx agent` prioritizes content by file priority (CONSTITUTION first, GLOSSARY last) and @@ -113,7 +113,7 @@ old entries, keeping active context lean. You can also run The goal is to keep context files focused on **current** state. Historical entries belong in git history or the archive. -## Is .context/ meant to be shared? +## Is `.context/` Meant to Be Shared? Yes. Commit it to your repo. Every team member and every AI agent reads the same files. That's the mechanism for shared memory: diff --git a/docs/home/first-session.md b/docs/home/first-session.md index e7c0472d7..975724342 100644 --- a/docs/home/first-session.md +++ b/docs/home/first-session.md @@ -125,7 +125,7 @@ This loads your context and presents a structured confirm the agent knows what is going on. Context also loads automatically via hooks, but the explicit ceremony gives you a **readback** to verify. -!!! tip "Steering files fire automatically" +!!! tip "Steering Files Fire Automatically" If you edited the four foundation files scaffolded by `ctx init` (`.context/steering/product.md`, `tech.md`, `structure.md`, `workflow.md`), their `inclusion: @@ -262,7 +262,7 @@ You should **`.gitignore`** the generated and sensitive paths: .claude/settings.local.json ``` -!!! tip "`ctx init` Patches Your .gitignore for You" +!!! tip "`ctx init` Patches Your .Gitignore for You" `ctx init` automatically adds these entries to your `.gitignore`. Review the additions with `cat .gitignore` after init. diff --git a/docs/home/getting-started.md b/docs/home/getting-started.md index 954527cd6..4fc6de7d1 100644 --- a/docs/home/getting-started.md +++ b/docs/home/getting-started.md @@ -253,7 +253,7 @@ For other tools, paste the output of: ctx agent --budget 8000 ``` -### 3b. Set Up for Your AI Tool +### 3B. Set Up for Your AI Tool If you use an MCP-compatible tool, generate the integration config with `ctx setup`: diff --git a/docs/home/hub.md b/docs/home/hub.md index f9d3fe16c..3dba9811f 100644 --- a/docs/home/hub.md +++ b/docs/home/hub.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: The ctx Hub +title: Hub icon: lucide/network --- ![ctx](../images/ctx-banner.png) -## The `ctx` Hub +## Sharing Is Caring `ctx` projects are normally **independent**: each project has its own `.context/` directory, its own decisions, its own learnings, @@ -26,7 +26,7 @@ A is the same gotcha waiting for you in service B. The **`ctx` Hub** is the feature that makes those specific entries travel, without replicating everything else. -## What the Hub actually is +## What the Hub Actually Is In one paragraph: the `ctx` Hub is a **fan-out channel** for four specific kinds of structured entries — `decision`, @@ -52,11 +52,11 @@ If you want "my agent in project B sees everything my agent did in project A," that's not the Hub. Local session density stays local. -## Who it's for +## Who It's For Two shapes, same mechanics, different trust models. -### Personal cross-project brain +### Personal Cross-Project Brain **One developer, many projects.** You want a learning from project A to show up when you open project B a week later. You @@ -64,7 +64,7 @@ want a convention you codified in your dotfiles project to be visible everywhere else on your workstation. Run a Hub on localhost, register each project, done. -### Small trusted team +### Small Trusted Team **A few teammates on a LAN or a hub.ctx-like self-hosted server.** You want team conventions to propagate without a @@ -77,7 +77,7 @@ The Hub is **not** a multi-tenant public service. It assumes everyone holding a client token is friendly. Don't stand up `hub.example.com` for untrusted participants. -## Going further +## Going Further - **First-time setup:** [Hub: Getting Started](../recipes/hub-getting-started.md) — a five-minute walkthrough on localhost. diff --git a/docs/home/index.md b/docs/home/index.md index 441db5467..7b7f83119 100644 --- a/docs/home/index.md +++ b/docs/home/index.md @@ -12,63 +12,53 @@ icon: lucide/home *Deterministic. Git-native. Human-readable. Local-first*. -**Start here**. +**Start here**. Learn what `ctx` does, set it up, and run your first session. !!! warning "Pre-1.0: Moving Fast" `ctx` is under active development. This website tracks the - **development branch**, not the latest release: + **development branch**, not the latest release: - Some features described here may not exist in the binary + Some features described here may not exist in the binary you have installed. - Expect rough edges. + Expect rough edges. If something is missing or broken, [open an issue](https://github.com/ActiveMemory/ctx/issues). --- -### [About ctx](about.md) +## Introduction + +### [About](about.md) What `ctx` is, how it works, and why **persistent context changes** how you work with AI. ---- - ### [Is It Right for Me?](is-ctx-right.md) Good fit, not-so-good fit, and a **5-minute trial** to find out for yourself. ---- - -### [Community](community.md) +### [FAQ](faq.md) -We are the builders who care about **durable** context.
-Join the community. Hang out in IRC. Star `ctx` on GitHub. +Quick answers to the questions newcomers ask most about +**`ctx`**, files, tooling, and trade-offs. --- -### [Contributing](contributing.md) - -**Development setup**, project layout, and pull request process. - ---- +## Get Started ### [Getting Started](getting-started.md) Install the **binary**, set up the **plugin**, and **verify** it works. ---- - ### [Your First Session](first-session.md) **Step-by-step** walkthrough from `ctx init` to verified recall. ---- - ### [Common Workflows](common-workflows.md) Day-to-day commands for **tracking** context, **checking** health, @@ -76,19 +66,69 @@ and browsing **history**. --- +## Concepts + ### [Context Files](context-files.md) What each `.context/` file does. What's their **purpose**. How do we best **leverage** them. ---- - ### [Configuration](configuration.md) Flexible **configuration**: `.ctxrc`, environment variables, and CLI flags. +### [Hub](hub.md) + +A **fan-out channel** for decisions, learnings, conventions, and +tasks that need to cross **project boundaries** — without replicating +everything else. + --- +## Working with AI + ### [Prompting Guide](prompting-guide.md) **Effective prompts** for AI sessions with `ctx`. + +### [Keeping AI Honest](keeping-ai-honest.md) + +AI agents **confabulate**: they invent history, claim familiarity +with decisions never made, and sometimes declare tasks complete +when they aren't. Tools and habits to push back. + +### [My AI Keeps Making the Same Mistakes](repeated-mistakes.md) + +Stop **rediscovering** the same bugs and dead-ends across sessions. + +### [Joining a Project](joining-a-project.md) + +You inherited a `.context/` directory. Get **oriented fast**: +priority order, what to read first, how to ramp up. + +--- + +## Customization + +### [Steering Files](steering.md) + +Tell the assistant **how to behave** when a specific kind +of prompt arrives. + +### [Lifecycle Triggers](triggers.md) + +Make things **happen** at session boundaries: block dangerous +tool calls, inject standup notes, log file saves. + +--- + +## Community + +### [#ctx](community.md) + +We are the builders who care about **durable** context.
+Join the community. Hang out in IRC. Star `ctx` on GitHub. + +### [Contributing](contributing.md) + +**Development setup**, project layout, and pull request process. diff --git a/docs/home/joining-a-project.md b/docs/home/joining-a-project.md index 8655ed43a..2b44f1ba7 100644 --- a/docs/home/joining-a-project.md +++ b/docs/home/joining-a-project.md @@ -5,7 +5,7 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Joining a ctx Project +title: Joining a Project icon: lucide/user-plus --- diff --git a/docs/home/keeping-ai-honest.md b/docs/home/keeping-ai-honest.md index 5b16fb0cd..52b8158dd 100644 --- a/docs/home/keeping-ai-honest.md +++ b/docs/home/keeping-ai-honest.md @@ -51,7 +51,7 @@ does not exist, the AI is hallucinating - and you know immediately. This is **grounded memory**: claims that trace back to artifacts you control and can audit. -## CONSTITUTION.md: Hard Guardrails +## `CONSTITUTION.md`: Hard Guardrails CONSTITUTION.md defines rules the AI must treat as inviolable. These are not suggestions or best practices - they are constraints that @@ -123,7 +123,7 @@ verification step confirms that context still matches reality. When it does not, you fix it - and the next session starts from truth, not from drift. -## Trust Through Structure +## Trust through Structure The common thread across all of these mechanisms is **structure over prose**. Timestamps make claims verifiable. Constitutional rules make diff --git a/docs/home/prompting-guide.md b/docs/home/prompting-guide.md index 5d80ffe6b..8f4631a19 100644 --- a/docs/home/prompting-guide.md +++ b/docs/home/prompting-guide.md @@ -64,7 +64,7 @@ This guide documents prompts that **reliably** produce **good results**. ## Session Start -### "*Do you remember?*" +### "*do you remember?*" Triggers the AI to silently read `TASKS.md`, `DECISIONS.md`, `LEARNINGS.md`, and check recent history via `ctx journal` before @@ -95,7 +95,7 @@ protocol (*including the failure modes, the timing problem, and the hook design that solved it*) see [The Dog Ate My Homework](../blog/2026-02-25-the-homework-problem.md). -### "*What's the current state?*" +### "*What's the Current State?*" Prompts reading of `TASKS.md`, recent sessions, and status overview. @@ -111,7 +111,7 @@ Use this when **resuming work** after a break. ## During Work -### "*Why doesn't X work?*" +### "*Why Doesn't X Work?*" This triggers **root cause analysis** rather than surface-level fixes. @@ -126,7 +126,7 @@ through code, check configurations, and identify the actual cause. This was a fix that benefited all users of `ctx`. -### "*Is this consistent with our decisions?*" +### "*Is This Consistent with Our Decisions?*" This prompts checking `DECISIONS.md` before implementing. @@ -137,7 +137,7 @@ Use this before making architectural choices. * "*Check if we've decided on this before*" * "*Does this align with our conventions?*" -### "*What would break if we...*" +### "*What Would Break If We...*" This triggers **defensive thinking** and **impact analysis**. @@ -147,7 +147,7 @@ Use this before making significant changes. What would break if we change the Settings struct? ``` -### "*Before you start, read X*" +### "*Before You Start, Read X*" This ensures specific context is loaded before work begins. @@ -218,7 +218,7 @@ Use them **in the moment** when you see the behavior. ## Reflection and Persistence -### "*What did we learn?*" +### "*What Did We Learn?*" This prompts **reflection** on the session and often triggers adding learnings to `LEARNINGS.md`. @@ -228,7 +228,7 @@ Use this after completing a task or debugging session. This is an **explicit reflection prompt**. The AI will summarize insights and often offer to persist them. -### "*Add this as a learning/decision*" +### "*Add This as a Learning/decision*" This is an **explicit persistence request**. @@ -242,7 +242,7 @@ Add this as a learning. # and let the AI autonomously infer and summarize. ``` -### "*Save context before we end*" +### "*Save Context Before We End*" This triggers **context persistence** before the session closes. @@ -260,7 +260,7 @@ Use it at the end of the session or before switching topics. ## Exploration and Research -### "Explore the codebase for X" +### "Explore the Codebase for X" This triggers thorough codebase search rather than guessing. @@ -269,7 +269,7 @@ Use this when you need to understand how something works. This works because "**Explore**" signals that **investigation is needed**, not immediate action. -### "*How does X work in this codebase?*" +### "*How Does X Work in This Codebase?*" This prompts reading actual code rather than explaining general concepts. @@ -279,7 +279,7 @@ Use this to understand the existing implementation. How does session saving work in this codebase? ``` -### "*Find all places where X*" +### "*Find All Places Where X*" This triggers a **comprehensive search** across the codebase. @@ -289,14 +289,14 @@ Use this before refactoring or understanding the impact. ## Meta and Process -### "*What should we document from this?*" +### "*What Should We Document from This?*" This prompts identifying learnings, decisions, and conventions worth persisting. Use this after complex discussions or implementations. -### "*Is this the right approach?*" +### "*Is This the Right Approach?*" This invites the AI to challenge the current direction. @@ -311,7 +311,7 @@ AIs often default to agreeing; this prompt signals you want an This sets the tone for the entire session: The AI will flag questionable choices proactively instead of waiting to be asked. -### "*What am I missing?*" +### "*What Am I Missing?*" This prompts thinking about **edge cases**, overlooked requirements, or **unconsidered approaches**. @@ -378,7 +378,7 @@ Use `ctx` skills by name: Skills combine a prompt, tool permissions, and domain knowledge into a single invocation. -!!! info "Skills Beyond Claude Code" +!!! info "Skills beyond Claude Code" The `/slash-command` syntax above is Claude Code native, but the underlying `SKILL.md` files are a standard markdown format that any agent can consume. If you use a different coding agent, consult its @@ -427,7 +427,7 @@ evidence. ## Safety Invariants -!!! warning "These are **Invariants**: Not Suggestions" +!!! warning "These Are **Invariants**: Not Suggestions" A prompting guide earns its trust by **being honest about risk**. These four rules mentioned below don't change with model versions, agent diff --git a/docs/home/repeated-mistakes.md b/docs/home/repeated-mistakes.md index 0a72400ce..d4fbcb6dc 100644 --- a/docs/home/repeated-mistakes.md +++ b/docs/home/repeated-mistakes.md @@ -30,7 +30,7 @@ persistent context, every session starts with amnesia. `ctx` gives your AI three files that directly prevent repeated mistakes, each targeting a different failure mode. -### DECISIONS.md: Stop Relitigating Settled Choices +### `DECISIONS.md`: Stop Relitigating Settled Choices When you make an architectural decision, record it with rationale and rejected alternatives. The AI reads this at session start and treats @@ -56,7 +56,7 @@ Next session, when the AI considers auth, it reads this entry and builds on the decision instead of re-debating it. If someone asks "why not sessions?", the rationale is already there. -### LEARNINGS.md: Capture Gotchas Once +### `LEARNINGS.md`: Capture Gotchas Once Learnings are the bugs, quirks, and non-obvious behaviors that cost you time the first time around. Write them down so they cost you zero time @@ -81,7 +81,7 @@ for SQLite builds. Never set CGO_ENABLED=0. Without this entry, the next session that touches the Dockerfile will hit the same wall. With it, the AI knows before it starts. -### CONSTITUTION.md: Draw Hard Lines +### `CONSTITUTION.md`: Draw Hard Lines Some mistakes are not about forgetting - they are about boundaries the AI should never cross. CONSTITUTION.md sets inviolable rules. diff --git a/docs/home/steering.md b/docs/home/steering.md index c8bc69a82..572df0b85 100644 --- a/docs/home/steering.md +++ b/docs/home/steering.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Steering files +title: Steering Files icon: lucide/compass --- ![ctx](../images/ctx-banner.png) -## Steering files +## Steering Files `ctx` projects talk to AI assistants through several layers — context files, decisions, conventions, the agent context @@ -27,7 +27,7 @@ and syncs them out to each AI tool's native config (Claude Code, Cursor, Kiro, Cline) so the rules actually land in the prompt pipeline. -## Not the same as decisions or conventions +## Not the Same as Decisions or Conventions The three look similar on disk but serve different purposes: @@ -40,7 +40,7 @@ The three look similar on disk but serve different purposes: If you find yourself writing "the AI should always do X when asked about Y," that belongs in steering, not decisions. -## Your first steering files +## Your First Steering Files **`ctx init` scaffolds four foundation steering files** in `.context/steering/` so you start with something to edit @@ -72,7 +72,7 @@ Re-running `ctx init` is safe: existing files are left alone, so your edits survive. Use `ctx init --no-steering-init` to opt out of the scaffold entirely. -## Inclusion modes +## Inclusion Modes Each steering file declares an inclusion mode in its frontmatter: @@ -120,7 +120,7 @@ pick `always`, even if it's overkill for your Cursor setup. The context budget cost is small; the alternative (silently not firing) is worse. -## Two families of AI tools, two delivery paths +## Two Families of AI Tools, Two Delivery Paths Not every AI tool consumes steering the same way. ctx handles two tool families differently, and it's worth @@ -165,7 +165,7 @@ works for Claude Code. - Using both? → Run `sync` for the native-rules tools; the hook+MCP pipeline covers Claude Code automatically. -## Two shapes of automation: rules and scripts +## Two Shapes of Automation: Rules and Scripts Steering is one of **two** hook-like layers ctx provides for customizing AI behavior. They're complementary: @@ -180,7 +180,7 @@ Pick steering when you want "always remind the AI of X." Pick triggers when you want "do Y when event Z happens." They can coexist — many projects use both. -## Where to go next +## Where to Go Next - **[Writing Steering Files](../recipes/steering.md)** — a six-step walkthrough: scaffold, write the rule, preview diff --git a/docs/home/triggers.md b/docs/home/triggers.md index a9e71880e..0d91808b5 100644 --- a/docs/home/triggers.md +++ b/docs/home/triggers.md @@ -5,7 +5,7 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Lifecycle triggers +title: Lifecycle Triggers icon: lucide/zap --- @@ -38,9 +38,9 @@ type. | `file-save` | A file is saved | Lint on save, update indices | | `context-add` | A new entry is added to `.context/` | Cross-link, notify, enrich | -## Triggers are arbitrary code — treat them like pre-commit hooks +## Triggers Are Arbitrary Code — Treat Them like Pre-Commit Hooks -!!! warning "Only enable scripts you've read and understand" +!!! warning "Only Enable Scripts You've Read and Understand" A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. A malicious or buggy @@ -52,7 +52,7 @@ type. `ctx trigger enable ` after reviewing the contents. That's not a suggestion — it's the security model. -## Three hook-like layers in ctx +## Three Hook-like Layers in ctx Triggers are one of **three** distinct hook-like concepts in ctx. The names are similar but the owners and use cases are @@ -67,7 +67,7 @@ not: This page is about the first category. The other two run automatically and are invisible to you. -## Triggers vs steering — same problem, different shape +## Triggers vs Steering — Same Problem, Different Shape Triggers are the imperative counterpart to [**steering files**](steering.md). Steering expresses @@ -80,7 +80,7 @@ complementary, not competing: Most projects use both. -## Where to go next +## Where to Go Next - **[Authoring Lifecycle Triggers](../recipes/triggers.md)** — walkthrough with security guidance: scaffold, test, diff --git a/docs/index.md b/docs/index.md index 31fab1939..1b922530b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: The ctx Manifesto +title: Manifesto icon: lucide/flame --- ![ctx](images/ctx-banner.png) -# `ctx` Manifesto +# The `ctx` Manifesto **Creation, not code**. @@ -101,7 +101,7 @@ Vision, goals, and direction are **human responsibilities**. **Nothing** critical should depend on recall. -!!! danger "Oral Tradition Does not Scale" +!!! danger "Oral Tradition Does Not Scale" If intent cannot be inspected, it cannot be enforced. --- @@ -189,7 +189,7 @@ Memory heuristics **drift**. ## Verified Reality Is the Scoreboard -!!! danger "Activity is a False Proxy" +!!! danger "Activity Is a False Proxy" Output volume correlates *poorly* with impact. * *Code* is **not** progress. @@ -238,7 +238,7 @@ We build to: ## Failures Are Assets -!!! important "Failure Without Capture is Waste" +!!! important "Failure without Capture Is Waste" **Pain** that does not teach is pure *loss*. **Failures** are *not* erased: They are **preserved**. @@ -280,9 +280,9 @@ A repeated mistake is a missing `ctx` artifact. --- -## Encode Intent Into the Environment +## Encode Intent into the Environment -!!! danger "Goodwill Does not Belong to the Table" +!!! danger "Goodwill Does Not Belong to the Table" *Alignment* that depends on memory will **drift**. *Alignment* **cannot depend on** *memory* or *goodwill*. @@ -364,7 +364,7 @@ Transparent `ctx` **compounds** understanding. ## Continuously Verify the System -!!! warning "Stability is Temporary" +!!! warning "Stability Is Temporary" Every assumption has a half-life: * Models drift. @@ -387,7 +387,7 @@ Transparent `ctx` **compounds** understanding. ## `ctx` Is Leverage -!!! note "Humans are Decision Engines" +!!! note "Humans Are Decision Engines" *Execution* should **not** consume *judgment*. Humans **must not be** typists. diff --git a/docs/operations/autonomous-loop.md b/docs/operations/autonomous-loop.md index 8336ca862..ae67a7b15 100644 --- a/docs/operations/autonomous-loop.md +++ b/docs/operations/autonomous-loop.md @@ -129,7 +129,7 @@ Claude Code has built-in loop support: This is convenient for quick iterations, but be aware of important caveats: -!!! warning "This Loop Is not Pure" +!!! warning "This Loop Is Not Pure" Claude Code's `/loop` runs all iterations **within the same session**. This means: @@ -332,7 +332,7 @@ my-project/ └── src/ # Your code ``` -### Sample TASKS.md for Autonomous Loops +### Sample `TASKS.md` for Autonomous Loops ```markdown # Tasks diff --git a/docs/operations/hub-failure-modes.md b/docs/operations/hub-failure-modes.md index f28a3f267..d587eac10 100644 --- a/docs/operations/hub-failure-modes.md +++ b/docs/operations/hub-failure-modes.md @@ -11,13 +11,13 @@ icon: lucide/alert-triangle ![ctx](../images/ctx-banner.png) -# `ctx` Hub: Failure modes +# `ctx` Hub: Failure Modes What can go wrong, what the system does about it, and what you should do. Complementary to [`ctx` Hub Operations](hub.md). -!!! info "Design posture" +!!! info "Design Posture" The hub is **best-effort knowledge sharing**, not a durable ledger. Local `.context/` files are the source of truth for each project; the hub is a fan-out channel. This framing @@ -25,7 +25,7 @@ should do. Complementary to ## Network -### Client loses connection mid-stream +### Client Loses Connection Mid-Stream **What happens:** `ctx connection listen` detects the EOF, waits with exponential backoff, and reconnects. On reconnect it passes @@ -34,7 +34,7 @@ its last-seen sequence; the hub replays everything newer. **What you should do:** nothing. If reconnects are looping, check firewall state on the hub and `ctx hub status` output. -### Partition — majority side reachable +### Partition — Majority Side Reachable **What happens:** clients routed to the majority side continue to publish and listen. The minority nodes step down to followers @@ -43,7 +43,7 @@ that cannot accept writes (Raft quorum lost). **What you should do:** let it heal. When the partition closes, followers catch up via sequence-based sync automatically. -### Partition — split brain (no quorum) +### Partition — Split Brain (No Quorum) **What happens:** no node holds a majority, so no leader is elected. All nodes become read-only. `ctx connection publish` and @@ -54,7 +54,7 @@ still succeed. permanent (e.g., a data center is gone), bootstrap a new cluster from the survivors with `ctx hub peer remove` for the dead nodes. -### Hub unreachable during `ctx add --share` +### Hub Unreachable during `ctx add --share` **What happens:** the local write succeeds; the share step prints a warning and exits non-zero on the share leg only. `--share` is @@ -66,7 +66,7 @@ The hub deduplicates by entry ID. ## Storage -### Disk full on the leader +### Disk Full on the Leader **What happens:** `entries.jsonl` append fails. The hub rejects writes with an error and stays up for read traffic. Clients @@ -88,7 +88,7 @@ earlier line is malformed, the hub refuses to start. line. Move the bad region to a `.quarantine` file, then start. Nothing is ever silently dropped. -### `meta.json` / `entries.jsonl` sequence mismatch +### `meta.json` / `entries.jsonl` Sequence Mismatch **What happens:** the hub refuses to start. This usually means someone copied one file without the other. @@ -99,13 +99,13 @@ or accept the higher sequence by regenerating `meta.json` from ## Cluster -### Leader crash, clean shutdown +### Leader Crash, Clean Shutdown **What happens:** `ctx hub stop` triggers `stepdown` first, so a new leader is elected before the old one exits. In-flight writes drain. Clients reconnect to the new leader transparently. -### Leader crash, hard fail (kill -9, power loss) +### Leader Crash, Hard Fail (Kill -9, Power Loss) **What happens:** Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted @@ -116,7 +116,7 @@ warning in [the cluster recipe](../recipes/hub-cluster.md). `ctx connection listen` on a dedicated "collector" project that persists entries locally as a write-ahead backup. -### Split-brain after rejoin +### Split-Brain After Rejoin **What happens:** Raft reconciles: the minority side's uncommitted writes are discarded, and the majority's log is authoritative. @@ -126,9 +126,9 @@ minority had important writes, grep for them in `/entries.jsonl.rejected` (written by the reconciliation pass) and replay them with `ctx connection publish`. -## Auth and tokens +## Auth and Tokens -### Lost admin token +### Lost Admin Token **What happens:** you cannot register new projects. @@ -137,7 +137,7 @@ pass) and replay them with `ctx connection publish`. and regenerate — note that **all existing client tokens keep working**; only new registrations need the admin token. -### Compromised admin token +### Compromised Admin Token **What happens:** anyone with the token can register new projects and publish. They cannot read existing entries without @@ -148,7 +148,7 @@ a client token for a project that subscribes. suspicious client registrations via `clients.json`, and audit `entries.jsonl` for unexpected origins. -### Compromised client token +### Compromised Client Token **What happens:** the attacker can publish as that project and read anything that project is subscribed to. Because `Origin` @@ -164,7 +164,7 @@ published after the compromise timestamp and quarantine any that look suspicious — remember that `Origin` on those entries proves nothing. -### Compromised hub host +### Compromised Hub Host **What happens:** `/clients.json` stores client tokens **verbatim** (not hashed). Anyone with read access to @@ -178,7 +178,7 @@ See [Security model](../security/hub.md#hub-side-token-storage) for the mitigations that reduce the blast radius while the hashing follow-up is pending. -## Clock skew +## Clock Skew Hub entries carry a timestamp assigned **by the publishing client**. The hub does not rewrite timestamps. Clients with @@ -189,7 +189,7 @@ order in the shared feed. see entries dated in the future or far past, the publisher's clock is the culprit. -## The short list +## The Short List | Symptom | First thing to check | |-----------------------------------|-----------------------------------| @@ -200,7 +200,7 @@ clock is the culprit. | Duplicate entries in shared feed | Client replayed after restore — safe, dedup by ID | | Followers lagging | Disk or network on the follower, not the leader | -## See also +## See Also - [`ctx` Hub Operations](hub.md) - [`ctx` Hub security model](../security/hub.md) diff --git a/docs/operations/hub.md b/docs/operations/hub.md index 79ea3b09f..0e9d512db 100644 --- a/docs/operations/hub.md +++ b/docs/operations/hub.md @@ -24,7 +24,7 @@ explains what the hub is, the two user stories it supports it does **not** do. A client-side tour is in [Getting Started](../recipes/hub-getting-started.md). -!!! info "Operator cheat sheet" +!!! info "Operator Cheat Sheet" - The hub fans out four entry types only: `decision`, `learning`, `convention`, `task`. Journals, scratchpad, and other local state are out of scope. @@ -34,7 +34,7 @@ it does **not** do. A client-side tour is in - The data model is an **append-only JSONL log** plus two small JSON sidecar files. Nothing is rewritten in place. -## Data directory layout +## Data Directory Layout The hub stores everything under a single data directory (default `~/.ctx/hub-data/`, override with `--data-dir`). @@ -63,7 +63,7 @@ The hub stores everything under a single data directory * `clients.json` holds hashed client tokens; losing it invalidates all client registrations. -## Starting and stopping +## Starting and Stopping === "Foreground" @@ -85,7 +85,7 @@ in-flight RPCs to drain, then exits. If the daemon is wedged, remove `hub.pid` and send `SIGKILL` manually — `entries.jsonl` is crash-safe, so you will not lose accepted writes. -## Systemd unit +## Systemd Unit For production single-node deployments, run the hub as a systemd service instead of `--daemon`: @@ -120,7 +120,7 @@ sudo systemctl enable --now ctx-hub sudo journalctl -u ctx-hub -f ``` -## Backup and restore +## Backup and Restore Because `entries.jsonl` is append-only, backups are trivial: @@ -150,7 +150,7 @@ will re-publish on the next `listen` reconnect, because the hub now reports a lower sequence than what clients have on disk. This is safe — the store deduplicates by entry ID. -## Log rotation +## Log Rotation `entries.jsonl` grows unbounded. For long-lived hubs, rotate it offline: @@ -191,13 +191,13 @@ For cluster deployments, watch for: ## Upgrading -The JSONL format is versioned in `meta.json`. Ctx refuses to start +The JSONL format is versioned in `meta.json`. `ctx` refuses to start against a newer store version than it understands; older store versions are upgraded in place at first start after an upgrade. **Always back up `/` before upgrading.** -## See also +## See Also - [`ctx` Hub failure modes](hub-failure-modes.md) - [`ctx` Hub security model](../security/hub.md) diff --git a/docs/operations/index.md b/docs/operations/index.md index 965299a68..1adf96e59 100644 --- a/docs/operations/index.md +++ b/docs/operations/index.md @@ -10,28 +10,7 @@ Guides for **installing**, **upgrading**, **integrating**, and --- -## Hub - -Operator guides for running a `ctx` Hub — the gRPC server that -fans out structured entries across projects. If you're a client -connecting to a Hub someone else runs, see -[`ctx connect`](../cli/connection.md) and the -[Hub recipes](../recipes/hub-overview.md) instead. - -### [Hub Operations](hub.md) - -Data directory layout, daemon management, systemd unit, -backup and restore, log rotation, monitoring, and upgrades. - -### [Hub Failure Modes](hub-failure-modes.md) - -What can go wrong in network, storage, cluster, auth, and -clock layers — and what you should do about each one. Includes -the short-list table oncall engineers will want bookmarked. - ---- - -## Operating `ctx` +## Day-to-Day Everyday operation guides for anyone running `ctx` in a project or adopting it in a team. @@ -58,6 +37,27 @@ with `ctx` providing persistent memory between iterations. --- +## Hub + +Operator guides for running a `ctx` Hub — the gRPC server that +fans out structured entries across projects. If you're a client +connecting to a Hub someone else runs, see +[`ctx connect`](../cli/connection.md) and the +[Hub recipes](../recipes/hub-overview.md) instead. + +### [Hub Operations](hub.md) + +Data directory layout, daemon management, systemd unit, +backup and restore, log rotation, monitoring, and upgrades. + +### [Hub Failure Modes](hub-failure-modes.md) + +What can go wrong in network, storage, cluster, auth, and +clock layers — and what you should do about each one. Includes +the short-list table oncall engineers will want bookmarked. + +--- + ## Maintainers Runbooks for people shipping `ctx` itself. diff --git a/docs/operations/integrations.md b/docs/operations/integrations.md index c002c08b2..6d6abc533 100644 --- a/docs/operations/integrations.md +++ b/docs/operations/integrations.md @@ -289,7 +289,7 @@ These are invoked in Claude Code with `/skill-name`. #### Blogging Skills -!!! tip "Blogging is a Better Way of Creating Release Notes" +!!! tip "Blogging Is a Better Way of Creating Release Notes" The blogging workflow can also double as generating release notes: AI reads your git commit history and creates a "*narrative*", @@ -669,7 +669,7 @@ The `ctx watch` command parses update commands from AI output. Use this format: | `convention` | CONVENTIONS.md | None | | `complete` | TASKS.md | None | -### Simple Format (tasks, conventions, complete) +### Simple Format (Tasks, Conventions, Complete) ```xml Implement rate limiting @@ -677,7 +677,7 @@ The `ctx watch` command parses update commands from AI output. Use this format: rate limiting ``` -### Structured Format (*learnings, decisions*) +### Structured Format (*Learnings, Decisions*) Learnings and decisions support structured attributes for better documentation: diff --git a/docs/operations/migration.md b/docs/operations/migration.md index e7077956a..4a975e645 100644 --- a/docs/operations/migration.md +++ b/docs/operations/migration.md @@ -204,7 +204,7 @@ ctx setup copilot # Generate Copilot tips ctx setup windsurf # Generate Windsurf config ``` -### Migrating Content Into `.context/` +### Migrating Content into `.context/` If you have project knowledge scattered across `.cursorrules` or custom prompt files, consider migrating it: @@ -261,7 +261,7 @@ git push Teammates pull and immediately have context. No per-developer setup needed. -### What About `.claude/`? +### What about `.claude/`? The `.claude/` directory contains permissions that `ctx init` seeds. Hooks and skills are provided by the `ctx` plugin (*not per-project files*). diff --git a/docs/operations/release.md b/docs/operations/release.md index 84c9c3ff9..1f3daace3 100644 --- a/docs/operations/release.md +++ b/docs/operations/release.md @@ -140,7 +140,7 @@ from the VERSION file. No source file needs editing. ## Troubleshooting -### "Release notes not found" +### "Release Notes Not Found" ``` ERROR: dist/RELEASE_NOTES.md not found. @@ -149,7 +149,7 @@ ERROR: dist/RELEASE_NOTES.md not found. Run `/_ctx-release-notes` in Claude Code first, or write `dist/RELEASE_NOTES.md` manually. -### "Working tree is not clean" +### "Working Tree Is Not Clean" ``` ERROR: Working tree is not clean. @@ -157,7 +157,7 @@ ERROR: Working tree is not clean. Commit or stash all changes before running `make release`. -### "Tag already exists" +### "Tag Already Exists" ``` ERROR: Tag v0.9.0 already exists. @@ -172,7 +172,7 @@ git tag -d v0.9.0 git push origin :refs/tags/v0.9.0 ``` -### CI build fails after tag push +### CI Build Fails After Tag Push The tag is already published. Fix the issue, bump to a patch version (e.g. `0.9.1`), and release again. Do not force-push diff --git a/docs/operations/upgrading.md b/docs/operations/upgrading.md index 607a79068..69dbb0415 100644 --- a/docs/operations/upgrading.md +++ b/docs/operations/upgrading.md @@ -39,7 +39,7 @@ ctx init --force --merge # /plugin → select ctx → Update now (if using Claude Code) ``` -## What Changes Between Versions +## What Changes between Versions `ctx init` generates two categories of files: diff --git a/docs/recipes/architecture-deep-dive.md b/docs/recipes/architecture-deep-dive.md index 258869892..63cde7ae1 100644 --- a/docs/recipes/architecture-deep-dive.md +++ b/docs/recipes/architecture-deep-dive.md @@ -8,8 +8,8 @@ title: Architecture Deep Dive icon: lucide/layers --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ## The Problem @@ -47,7 +47,7 @@ artifacts and extends them. ## The Workflow -### Pass 1: Map what exists +### Pass 1: Map What Exists ```text /ctx-architecture @@ -77,7 +77,7 @@ dependencies). /ctx-architecture principal ``` -### Pass 2: Enrich with code intelligence +### Pass 2: Enrich with Code Intelligence ```text /ctx-architecture-enrich @@ -101,7 +101,7 @@ confidence for refactoring decisions or risk assessment. **Requires**: GitNexus MCP server connected. -### Pass 3: Hunt for failure modes +### Pass 3: Hunt for Failure Modes ```text /ctx-architecture-failure-analysis diff --git a/docs/recipes/autonomous-loops.md b/docs/recipes/autonomous-loops.md index 78281dc99..d3dd5d8d8 100644 --- a/docs/recipes/autonomous-loops.md +++ b/docs/recipes/autonomous-loops.md @@ -375,7 +375,7 @@ The self-check becomes the trigger: after meaningful work, the agent evaluates whether the context files reflect reality and updates them immediately if they do not. -### What the Agent Does Proactively Between Iterations +### What the Agent Does Proactively between Iterations At milestones within an iteration, the agent persists without waiting for instructions: diff --git a/docs/recipes/claude-code-permissions.md b/docs/recipes/claude-code-permissions.md index 5634e5d46..ce9e727b2 100644 --- a/docs/recipes/claude-code-permissions.md +++ b/docs/recipes/claude-code-permissions.md @@ -214,7 +214,7 @@ Project-local hooks (not part of the plugin) catch regex edge cases: ## The Maintenance Workflow -### After busy sessions +### After Busy Sessions Permissions accumulate fastest during debugging and exploration sessions. After a session where you clicked "*Allow*" many times: @@ -245,14 +245,14 @@ Run `/ctx-permission-sanitize` to catch security issues: * Overly broad permissions * Injection vectors -### When adding new skills +### When Adding New Skills If you create a custom `ctx-*` skill, add its `Skill()` entry to the allowlist manually. `ctx init` only populates the default permissions: It won't pick up custom skills. -### Golden image snapshots +### Golden Image Snapshots If manual cleanup is too tedious, use a **golden image** to automate it: diff --git a/docs/recipes/configuration-profiles.md b/docs/recipes/configuration-profiles.md index e885a69ad..bd90137bf 100644 --- a/docs/recipes/configuration-profiles.md +++ b/docs/recipes/configuration-profiles.md @@ -22,7 +22,7 @@ normal sessions. --- -## How it works +## How It Works The `ctx` repo ships two source profiles committed to git: @@ -37,7 +37,7 @@ is always a clean snapshot of one of the two sources. --- -## Switching profiles +## Switching Profiles ```bash # Switch to dev (verbose logging, notifications) @@ -58,7 +58,7 @@ in `.ctxrc`: present means dev, absent means base. --- -## Checking the active profile +## Checking the Active Profile ```bash ctx config status @@ -74,7 +74,7 @@ active: none (.ctxrc does not exist) --- -## Typical workflow +## Typical Workflow 1. **Start of a debugging session**: switch to dev for verbose logging and webhook notifications so you can trace hook @@ -95,7 +95,7 @@ active: none (.ctxrc does not exist) --- -## Customizing profiles +## Customizing Profiles Edit the source files directly: @@ -108,14 +108,14 @@ Edit the source files directly: After editing a source file, re-run `ctx config switch ` to apply the changes to the working copy. -!!! tip "Commit your profiles" +!!! tip "Commit Your Profiles" Both `.ctxrc.base` and `.ctxrc.dev` should be committed to git so team members share the same profile definitions. The working copy `.ctxrc` stays gitignored. --- -## Using the skill +## Using the Skill In a Claude Code session, say any of: diff --git a/docs/recipes/context-health.md b/docs/recipes/context-health.md index c29c0c01b..5b9f2ba38 100644 --- a/docs/recipes/context-health.md +++ b/docs/recipes/context-health.md @@ -143,7 +143,7 @@ How's the context looking? This turns maintenance from a scheduled chore into a conversation that happens **when** it matters. -### Step 3: Real-Time Detection During Work +### Step 3: Real-Time Detection during Work Agents *can* notice drift while working: When a mismatch is directly in the path of their current task. If an agent reads `ARCHITECTURE.md` to find where @@ -219,7 +219,7 @@ ctx doctor # everything in one pass ctx doctor --json # machine-readable for scripting ``` -!!! tip "Use `/ctx-doctor` Too" +!!! tip "Use `/ctx-doctor` Too" For agent-driven diagnosis that adds semantic analysis on top of the structural checks, use `/ctx-doctor`. diff --git a/docs/recipes/customizing-hook-messages.md b/docs/recipes/customizing-hook-messages.md index 2bcced119..7109bc418 100644 --- a/docs/recipes/customizing-hook-messages.md +++ b/docs/recipes/customizing-hook-messages.md @@ -176,7 +176,7 @@ manually. Not all messages are equal. The `list` command shows each message's category: -### Customizable (17 messages) +### Customizable (17 Messages) Messages that are **opinions**: project-specific wording that benefits from customization. These are the primary targets for override. @@ -200,7 +200,7 @@ from customization. These are the primary targets for override. | post-commit | nudge | Post-commit context capture | | qa-reminder | gate | Pre-commit QA gate | -### ctx-specific (10 messages) +### ctx-Specific (10 Messages) Messages specific to ctx's own development workflow. You *can* customize them, but `edit` will warn you first. diff --git a/docs/recipes/design-before-coding.md b/docs/recipes/design-before-coding.md index ec73940df..9826a348d 100644 --- a/docs/recipes/design-before-coding.md +++ b/docs/recipes/design-before-coding.md @@ -109,7 +109,7 @@ and won't accept "none" without a challenge. Sections that don't apply can be skipped. The result is a complete spec at `specs/{feature-name}.md`. -### Step 3: Break Into Tasks +### Step 3: Break into Tasks After the spec is written, the skill offers to create tasks: diff --git a/docs/recipes/hook-sequence-diagrams.md b/docs/recipes/hook-sequence-diagrams.md index 2ca1b7b6f..50c1d3f06 100644 --- a/docs/recipes/hook-sequence-diagrams.md +++ b/docs/recipes/hook-sequence-diagrams.md @@ -17,7 +17,7 @@ This page documents the **ctx system hooks** — the built-in `.claude/hooks.json` at lifecycle events. These are owned by ctx itself, not authored by users. -!!! info "Not to be confused with `ctx trigger`" +!!! info "Not to Be Confused with `ctx trigger`" `ctx` has **three distinct hook-like layers**: - **`ctx system` hooks** (this page) — built-in, owned @@ -49,7 +49,7 @@ on stdout. These fire **before** a tool executes. They can block, gate, or inject context. -### context-load-gate +### Context-Load-Gate Matcher: `.*` (all tools) @@ -96,7 +96,7 @@ sequenceDiagram Hook->>State: Write oversize flag if tokens > threshold ``` -### block-non-path-ctx +### Block-Non-Path-ctx Matcher: `Bash` @@ -126,7 +126,7 @@ sequenceDiagram Hook->>Hook: NudgeAndRelay(message) ``` -### qa-reminder +### Qa-Reminder Matcher: `Bash` @@ -153,7 +153,7 @@ sequenceDiagram Hook->>Hook: Relay(message) ``` -### specs-nudge +### Specs-Nudge Matcher: `EnterPlanMode` @@ -185,7 +185,7 @@ sequenceDiagram These fire **after** a tool completes. They observe, nudge, and track state. -### post-commit +### Post-Commit Matcher: `Bash` @@ -218,7 +218,7 @@ sequenceDiagram Hook->>Hook: CheckVersionDrift() ``` -### check-task-completion +### Check-Task-Completion Matcher: `Edit`, `Write` @@ -263,7 +263,7 @@ sequenceDiagram These fire **on every user prompt**, before any tools run. They perform health checks, track state, and nudge for housekeeping. -### check-context-size +### Check-Context-Size Adaptive context window monitoring. Fires checkpoints, window warnings, and billing alerts based on prompt count and token usage. @@ -325,7 +325,7 @@ sequenceDiagram Hook->>State: Write session stats ``` -### check-ceremonies +### Check-Ceremonies Daily check for `/ctx-remember` and `/ctx-wrap-up` usage in recent journal entries. @@ -362,7 +362,7 @@ sequenceDiagram Hook->>State: Touch throttle marker ``` -### check-freshness +### Check-Freshness Daily check for technology-dependent constants that may need review. @@ -393,7 +393,7 @@ sequenceDiagram Hook->>State: Touch throttle marker ``` -### check-journal +### Check-Journal Daily check for unimported sessions and unenriched journal entries. @@ -433,7 +433,7 @@ sequenceDiagram Hook->>State: Touch throttle marker ``` -### check-knowledge +### Check-Knowledge Daily check for knowledge file entry/line counts exceeding configured thresholds. @@ -473,7 +473,7 @@ sequenceDiagram Hook->>State: Touch throttle marker ``` -### check-map-staleness +### Check-Map-Staleness Daily check for architecture map age and relevant code changes. @@ -513,7 +513,7 @@ sequenceDiagram Hook->>State: Touch throttle marker ``` -### check-memory-drift +### Check-Memory-Drift Per-session check for MEMORY.md changes since last sync. @@ -548,7 +548,7 @@ sequenceDiagram Hook->>State: Touch session tombstone ``` -### check-persistence +### Check-Persistence Tracks context file modification and nudges when edits happen without persisting context. Adaptive threshold based on prompt count. @@ -589,7 +589,7 @@ sequenceDiagram Hook->>State: Update LastNudge = Count, write state ``` -### check-reminders +### Check-Reminders Per-prompt check for due reminders. No throttle. @@ -618,7 +618,7 @@ sequenceDiagram Hook->>Hook: NudgeAndRelay(message) ``` -### check-resources +### Check-Resources Checks system resources (memory, swap, disk, load). Fires on every prompt. No initialization required. @@ -647,7 +647,7 @@ sequenceDiagram Hook->>Hook: NudgeAndRelay(message) ``` -### check-version +### Check-Version Daily binary-vs-plugin version comparison with piggybacked key rotation check. @@ -691,7 +691,7 @@ sequenceDiagram Hook->>Hook: CheckKeyAge() (piggybacked) ``` -### heartbeat +### Heartbeat Silent per-prompt pulse. Tracks prompt count, context modification, and token usage. The agent never sees this hook's output. @@ -727,7 +727,7 @@ sequenceDiagram These hooks are configured in `settings.local.json` and are **not** shipped with ctx. They are specific to individual developer setups. -### block-dangerous-commands +### Block-Dangerous-Commands Lifecycle: PreToolUse. Matcher: `Bash` @@ -770,7 +770,7 @@ sequenceDiagram Hook->>Hook: NudgeAndRelay(message) ``` -### check-backup-age +### Check-Backup-Age Lifecycle: UserPromptSubmit. diff --git a/docs/recipes/hub-cluster.md b/docs/recipes/hub-cluster.md index 9f4c41b90..301c231b1 100644 --- a/docs/recipes/hub-cluster.md +++ b/docs/recipes/hub-cluster.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: HA cluster +title: HA Cluster icon: lucide/layers --- ![ctx](../images/ctx-banner.png) -# `ctx` Hub: High-availability cluster +# `ctx` Hub: High-Availability Cluster Run **multiple** hub nodes with Raft-based leader election for redundancy. Any follower can take over if the leader dies. @@ -23,7 +23,7 @@ sense in the "small trusted team" story — a personal cross-project brain on one workstation does not need three Raft peers. -!!! warning "Raft-lite" +!!! warning "Raft-Lite" ctx uses Raft **only for leader election**, not for data consensus. Entry replication happens via sequence-based gRPC sync on the append-only JSONL store. This is simpler than full @@ -56,7 +56,7 @@ it doubles failure probability without providing quorum. gRPC (data sync) ``` -## Step 1 — Bootstrap the first node +## Step 1 — Bootstrap the First Node ```bash ctx hub start --daemon \ @@ -66,7 +66,7 @@ ctx hub start --daemon \ The node starts a Raft election as soon as it sees its peers. -## Step 2 — Start the other nodes +## Step 2 — Start the Other Nodes On `hub-b.lan`: @@ -87,7 +87,7 @@ ctx hub start --daemon \ After a few seconds, one node wins the election and becomes the **leader**. The other two are followers. -## Step 3 — Verify cluster state +## Step 3 — Verify Cluster State From any node: @@ -106,7 +106,7 @@ entries: 1248 uptime: 3h42m ``` -## Step 4 — Register clients with failover peers +## Step 4 — Register Clients with Failover Peers When registering a client, give it the **full peer list**: @@ -120,7 +120,7 @@ If the leader becomes unreachable, the client reconnects to the next peer. Followers redirect to the current leader, so writes always land on the right node. -## Runtime membership changes +## Runtime Membership Changes Add a new peer without downtime: @@ -134,7 +134,7 @@ Remove a decommissioned peer: ctx hub peer remove hub-c.lan:9900 ``` -## Planned maintenance +## Planned Maintenance Before taking a leader offline, hand off leadership: @@ -146,7 +146,7 @@ ssh hub-a.lan 'ctx hub stepdown' before the leader goes offline. In-flight clients briefly pause, then reconnect to the new leader. -## Failure modes at a glance +## Failure Modes at a Glance | Event | What happens | |-----------------------------|----------------------------------------------| @@ -159,7 +159,7 @@ then reconnect to the new leader. For the full list, see [Hub failure modes](../operations/hub-failure-modes.md). -## See also +## See Also - [Multi-machine recipe](hub-multi-machine.md) — single-node deployment diff --git a/docs/recipes/hub-getting-started.md b/docs/recipes/hub-getting-started.md index 7e53dac45..52b084b67 100644 --- a/docs/recipes/hub-getting-started.md +++ b/docs/recipes/hub-getting-started.md @@ -17,14 +17,14 @@ Stand up a **single-node** `ctx` Hub on localhost, register two projects, publish a decision from one, and see it appear in the other — all in under five minutes. -!!! tip "Read this first" +!!! tip "Read This First" If you haven't already, skim the [`ctx` Hub overview](hub-overview.md). It explains the mental model, names the two user stories (personal vs small team), and — importantly — lists what the hub **does not do**. This recipe assumes you already know you want the feature. -## What you'll get out of this recipe +## What You'll Get out of This Recipe By the end, you will have: @@ -39,7 +39,7 @@ project becomes visible to your agent the next time you open another project — **without** touching local files in the second project or opening another editor window. -## What this recipe does *not* cover +## What This Recipe Does *Not* Cover - Sharing `.context/journal/`, `.context/pad`, or any other local state. The hub only fans out `decision`, `learning`, @@ -56,7 +56,7 @@ project or opening another editor window. - Two project directories, each already initialized with `ctx init` -## Step 1 — Start the hub +## Step 1 — Start the Hub In a dedicated terminal: @@ -76,7 +76,7 @@ data dir: ~/.ctx/hub-data/ The admin token is written to `~/.ctx/hub-data/admin.token` so you can recover it later. Treat it like a password. -## Step 2 — Register the first project +## Step 2 — Register the First Project ```bash cd ~/projects/alpha @@ -88,7 +88,7 @@ This stores an **encrypted** connection config in per-project client token; the admin token itself is never persisted in the project. -## Step 3 — Choose what to receive +## Step 3 — Choose What to Receive ```bash ctx connection subscribe decision learning convention @@ -97,7 +97,7 @@ ctx connection subscribe decision learning convention Only the entry types you subscribe to will be delivered by `sync` and `listen`. -## Step 4 — Publish a decision +## Step 4 — Publish a Decision Either use `ctx add --share` to write locally *and* push to the ctx Hub: @@ -114,7 +114,7 @@ Or publish an existing entry directly: ctx connection publish decision "Use UTC timestamps everywhere" ``` -## Step 5 — Register a second project and sync +## Step 5 — Register a Second Project and Sync ```bash cd ~/projects/beta @@ -127,7 +127,7 @@ The decision from `alpha` now appears in `~/projects/beta/.context/hub/decisions.md` with an origin tag and timestamp. -## Step 6 — Watch entries arrive live +## Step 6 — Watch Entries Arrive Live Instead of re-running `sync`, stream new entries as they land: @@ -138,7 +138,7 @@ ctx connection listen Leave this running in a terminal; every `--share` publish from any registered project will appear in `.context/hub/` immediately. -## Step 7 — Feed shared knowledge into the agent +## Step 7 — Feed Shared Knowledge into the Agent Once entries exist in `.context/hub/`, include them in the agent context packet: @@ -150,13 +150,13 @@ ctx agent --include-hub Shared entries are added as a dedicated tier in the budget-aware assembly, scored by recency and type relevance. -## Auto-sync on session start +## Auto-Sync on Session Start After `register`, the `check-hub-sync` hook pulls new entries at the start of each session (daily throttled). Most users never need to call `ctx connection sync` manually. -## Where to go next +## Where to Go Next - **[Multi-machine hub](hub-multi-machine.md)**: run the hub on a LAN host and connect from other workstations. diff --git a/docs/recipes/hub-multi-machine.md b/docs/recipes/hub-multi-machine.md index 6b0ed4a10..78d2df720 100644 --- a/docs/recipes/hub-multi-machine.md +++ b/docs/recipes/hub-multi-machine.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Multi-machine +title: Multi-Machine icon: lucide/network --- ![ctx](../images/ctx-banner.png) -# `ctx` Hub: Multi-machine +# `ctx` Hub: Multi-Machine Run the hub on a **LAN host** and connect from project directories on other workstations. This recipe is the **Story 2 ("small trusted @@ -43,7 +43,7 @@ tasks — **not** journals, scratchpad, or raw context files). +-------------------+ ``` -## Step 1 — Start the daemon on the LAN host +## Step 1 — Start the Daemon on the LAN Host On the machine that will hold the hub (call it `nexus`): @@ -58,7 +58,7 @@ later with: ctx hub stop ``` -## Step 2 — Firewall and port +## Step 2 — Firewall and Port Open port `9900/tcp` on `nexus` to the LAN only — **never** expose the hub to the public internet without a reverse proxy and TLS in @@ -87,7 +87,7 @@ Typical LAN allowlist rules: tcp dport 9900 accept ``` -## Step 3 — Retrieve the admin token +## Step 3 — Retrieve the Admin Token The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead: @@ -99,7 +99,7 @@ cat ~/.ctx/hub-data/admin.token Copy the token over a trusted channel (SSH, password manager, or an encrypted note). **Do not email it or put it in chat.** -## Step 4 — Register projects from each workstation +## Step 4 — Register Projects from Each Workstation On workstation `A`: @@ -133,7 +133,7 @@ ctx connection status You should see the ctx Hub address, role (`leader` for single-node), subscription filters, and the sequence number you're synced to. -## TLS (recommended) +## TLS (Recommended) For anything beyond a trusted home LAN, terminate TLS in front of the hub. The hub speaks gRPC, so the reverse proxy must speak @@ -155,14 +155,14 @@ server { Point `ctx connection register` at the public hostname and port 443. -## Handling daemon restarts +## Handling Daemon Restarts The hub is **append-only JSONL** — restarts are safe. Clients keep their last-seen sequence in `.context/hub/.sync-state.json` and pick up exactly where they left off on the next `sync` or `listen` reconnect. -## See also +## See Also - [HA cluster recipe](hub-cluster.md) — for redundancy - [Hub operations](../operations/hub.md) — backup, rotation diff --git a/docs/recipes/hub-overview.md b/docs/recipes/hub-overview.md index 1a23643f0..ebba91acc 100644 --- a/docs/recipes/hub-overview.md +++ b/docs/recipes/hub-overview.md @@ -17,7 +17,7 @@ Start here before the other hub recipes. This page answers *what* the hub is, *who* it's for, *why* you'd run one, and — equally important — *what it is not*. -## Mental model in one paragraph +## Mental Model in One Paragraph The hub is a **fan-out channel for structured knowledge entries across projects**. When you publish a decision, learning, @@ -32,7 +32,7 @@ knowledge bus** for a small, curated set of entry types. It is **not** a shared memory, a shared journal, or a multi-user database. -## What flows through the hub +## What Flows through the Hub Only four entry types: @@ -48,7 +48,7 @@ publishing project's name as `Origin`, a timestamp, and a hub-assigned sequence number. Once published, entries are never rewritten. -## What does *not* flow through the hub +## What Does *Not* Flow through the Hub This is the part new users get wrong most often: @@ -68,13 +68,13 @@ If you were expecting "now my agent in project B can see everything my agent did in project A," that's not this feature. Local session density still lives on the local machine. -## Two user stories +## Two User Stories The hub makes sense in two different shapes. Pick the one that matches your situation — the mechanics are identical but the trust model and threat surface are very different. -### Story 1: Personal cross-project brain +### Story 1: Personal Cross-Project Brain **One developer, many projects, one hub — usually on localhost.** @@ -103,7 +103,7 @@ LAN, use the default single-node setup, don't worry about TLS. setup, then [Personal cross-project brain](hub-personal.md) for the day-to-day workflow. -### Story 2: Small trusted team +### Story 2: Small Trusted Team **A few teammates, projects they each own, one hub on a LAN host they all trust.** @@ -143,7 +143,7 @@ deployment, [Team knowledge bus](hub-team.md) for the day-to-day team workflow, then [HA cluster](hub-cluster.md) if you need redundancy. -## Identity: projects, not users +## Identity: Projects, Not Users The hub has **no concept of users.** Its unit of identity is the *project*. `ctx connection register` binds a hub token to a project @@ -159,7 +159,7 @@ project share either: Either works; neither gives you per-human attribution. If you need "who wrote this," the hub is the wrong tool. -## When *not* to use it +## When *Not* to Use It - **Solo, single-project work.** Local `.context/` files are enough. The hub adds operational surface for no payoff. @@ -176,7 +176,7 @@ need "who wrote this," the hub is the wrong tool. above. If that's what you want, this feature won't provide it — talk to us in the issue tracker about what *would*. -## How entries reach your agent +## How Entries Reach Your Agent Once a project is registered and subscribed, entries arrive by three mechanisms: @@ -193,7 +193,7 @@ Once entries exist in `.context/hub/`, `ctx agent context packet, scored by recency and type relevance. That's the end of the pipeline. -## Where to go next +## Where to Go Next | If you're… | Read | |---------------------------------------------------|--------------------------------------------------| diff --git a/docs/recipes/hub-personal.md b/docs/recipes/hub-personal.md index 8cd0863af..deb24d330 100644 --- a/docs/recipes/hub-personal.md +++ b/docs/recipes/hub-personal.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Personal cross-project brain +title: Personal Cross-Project Brain icon: lucide/brain --- ![ctx](../images/ctx-banner.png) -# Personal cross-project brain +# Personal Cross-Project Brain This recipe shows **how one developer uses a `ctx` Hub across their own projects day-to-day** — the "Story 1" @@ -25,7 +25,7 @@ surface when you open project B next Thursday. ~5-minute setup). This recipe assumes the hub is already running and you've registered at least two projects. -## The core loop +## The Core Loop Every day, the same three verbs matter: @@ -41,7 +41,7 @@ Every day, the same three verbs matter: That's the whole workflow. The rest of this recipe fills in the concrete moments where each verb matters. -## A realistic day +## A Realistic Day You have three projects on your workstation: @@ -58,7 +58,7 @@ user unit — see [Hub operations](../operations/hub.md)). All three subscribe to `decision`, `learning`, and `convention`. -### 09:00 — Start work on `api` +### 09:00 — Start Work on `api` You `cd ~/projects/api` and start a Claude Code session. Behind the scenes, the plugin's `PreToolUse` hook calls @@ -73,7 +73,7 @@ So the "use UTC timestamps everywhere" decision you recorded in `dotfiles` last week is already in Claude's context for this session, without any manual `sync`. -### 10:30 — You discover a gotcha +### 10:30 — You Discover a Gotcha While debugging, you find that the API's retry loop silently drops the last error when the transport times @@ -101,7 +101,7 @@ Within seconds, `cli/.context/hub/learnings.md` and of this learning (the `ctx connection listen` daemon picks it up from the ctx Hub's Listen stream). -### 12:00 — You switch to `cli` +### 12:00 — You Switch to `cli` `cd ~/projects/cli`, open a new session. The agent packet for `cli` now includes **the learning you just @@ -112,7 +112,7 @@ recorded in `api`**, because `cli` is subscribed to You don't have to re-explain the retry-loop gotcha. Claude already sees it. -### 14:00 — You codify a convention +### 14:00 — You Codify a Convention You've been writing error messages in `api` and decided you want a consistent pattern: lowercase start, no @@ -133,14 +133,14 @@ Claude Code session in either project gets the convention injected into the steering-adjacent slot of the agent packet. -### 16:30 — End of day +### 16:30 — End of Day You didn't run `ctx connection sync` once. You didn't `git push` anything between projects. You didn't remember to tell your agent about the retry-loop gotcha in the new project. The hub did all of it for you. -## What the workflow actually looks like +## What the Workflow Actually Looks Like Stripped of prose, the day's commands were: @@ -161,7 +161,7 @@ The hub is passive infrastructure. You never talk **to** it directly — you talk **through** it by using `--share` on commands you were already running. -## Tips for solo use +## Tips for Solo Use **Pick a "standards" project.** One of your projects should play the role of "canonical source for rules you @@ -212,7 +212,7 @@ TLS setup from the relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon. -## What this recipe is *not* +## What This Recipe Is *Not* **Not a setup guide.** For the one-time hub install and project registration, use @@ -228,7 +228,7 @@ rotation, failure recovery, and HA, see [Hub operations](../operations/hub.md) and [Hub failure modes](../operations/hub-failure-modes.md). -## See also +## See Also - [Hub overview](hub-overview.md) — when to use the Hub and when not to. diff --git a/docs/recipes/hub-team.md b/docs/recipes/hub-team.md index 224a6235c..37562cca3 100644 --- a/docs/recipes/hub-team.md +++ b/docs/recipes/hub-team.md @@ -5,13 +5,13 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Team knowledge bus +title: Team Knowledge Bus icon: lucide/users --- ![ctx](../images/ctx-banner.png) -# Team knowledge bus +# Team Knowledge Bus This recipe shows **how a small trusted team uses a `ctx` Hub as a shared knowledge bus** — the "Story 2" shape @@ -32,7 +32,7 @@ without ceremony. `ctx connection register`-ed their working projects with the hub. -## Trust model — read this first +## Trust Model — Read This First The hub assumes **everyone holding a client token is friendly**. There's no per-user attribution you can rely @@ -61,7 +61,7 @@ If your team is: not** support today. Use a wiki or a dedicated knowledge platform instead. -## The team's three verbs +## The Team's Three Verbs Everyone on the team does three things, same as in the [personal recipe](hub-personal.md), but with different @@ -80,7 +80,7 @@ different is the *culture* around publishing: when do you `--share`, and what belongs on the hub vs. in your local `.context/`. -## What goes on the hub (team rules of thumb) +## What Goes on the Hub (Team Rules of Thumb) **Share it if it's true for more than one person.** The central question: "would the next teammate who hits this @@ -120,7 +120,7 @@ problem save time if they already knew this?" If yes, to `task` unless the team has a specific reason (e.g., a cross-cutting migration you want visible everywhere). -## A realistic week +## A Realistic Week **Monday — 3 AM incident, shared learning** @@ -134,8 +134,12 @@ records the learning: ```bash ctx add learning --share \ --context "Payment service 3 AM incident, 2026-04-03" \ - --lesson "grpc-go v1.62+ changes DialContext behavior under high concurrency: connections from a single channel can deadlock if the server emits GOAWAY mid-stream. Symptom: 500 errors cluster in 30s bursts, no error in grpc client logs." \ - --application "Any service on grpc-go. Pin to v1.61 or patch with keepalive: https://github.com/grpc/grpc-go/issues/..." + --lesson "grpc-go v1.62+ changes DialContext behavior under high \ + concurrency: connections from a single channel can deadlock if the \ + server emits GOAWAY mid-stream. Symptom: 500 errors cluster in \ + 30s bursts, no error in grpc client logs." \ + --application "Any service on grpc-go. Pin to v1.61 or patch with \ + keepalive: https://github.com/grpc/grpc-go/issues/..." ``` By Tuesday morning, every other engineer's agent @@ -152,9 +156,13 @@ records the decision: ```bash ctx add decision --share \ - --context "Need consistent API versioning across all 6 services. Current URL-based /v1/ isn't working for gradual rollouts." \ - --rationale "Header-based versioning lets us route by header at the edge, which makes canary rollouts trivial. URL-based versioning forces clients to update their paths." \ - --consequence "All new endpoints use X-API-Version header. Existing /v1/ endpoints stay. Deprecation schedule in q3." \ + --context "Need consistent API versioning across all 6 services. \ + Current URL-based /v1/ isn't working for gradual rollouts." \ + --rationale "Header-based versioning lets us route by header at the \ + edge, which makes canary rollouts trivial. URL-based versioning \ + forces clients to update their paths." \ + --consequence "All new endpoints use X-API-Version header. \ + Existing /v1/ endpoints stay. Deprecation schedule in q3." \ "Use header-based API versioning for new endpoints" ``` @@ -181,7 +189,7 @@ Lowercase start, no trailing period, single sentence. He fixes the PR. No lookup on the wiki, no question in chat, no context-switch penalty. -## Workflow tips for teams +## Workflow Tips for Teams **Designate a "champion" for decisions.** The team lead or platform engineer should be the person who explicitly @@ -216,7 +224,7 @@ need to share knowledge, they should share a hub. Splitting hubs by team creates silos — which is often exactly the thing you were trying to solve. -## Operational concerns +## Operational Concerns The team recipe assumes someone owns the hub host. That person (or a small group) is responsible for: @@ -239,7 +247,7 @@ so the hub survives individual node failures. See [HA cluster](hub-cluster.md). For teams under 10 people, a single-node hub with daily backups is usually fine. -## Token management +## Token Management Every team member has a client token stored in their `.context/.connect.enc`. Rules of thumb: @@ -256,7 +264,7 @@ Every team member has a client token stored in their encrypted with the local machine key, but don't push it to shared repos — it's per-workstation. -## What this recipe is *not* +## What This Recipe Is *Not* **Not a wiki replacement.** The hub is for structured entries, not prose. Put your architecture overviews, @@ -276,7 +284,7 @@ existing tracker doesn't capture well. internal team infrastructure. Do not expose the hub to customers, partners, or the open internet. -## See also +## See Also - [Hub overview](hub-overview.md) — when to use the hub and when not to. diff --git a/docs/recipes/index.md b/docs/recipes/index.md index a926f7e42..0c6b42d25 100644 --- a/docs/recipes/index.md +++ b/docs/recipes/index.md @@ -19,7 +19,7 @@ Train your agent to be proactive through **ask, guide, reinforce**. --- -### [Setup Across AI Tools](multi-tool-setup.md) +### [Setup across AI Tools](multi-tool-setup.md) Initialize `ctx` and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf. Includes **shell completion**, @@ -30,6 +30,17 @@ Aider, Copilot, or Windsurf. Includes **shell completion**, --- +### [Multilingual Session Parsing](multilingual-sessions.md) + +Parse session journal entries written in **other languages**. +Configure recognized session-header prefixes so the journal +pipeline works for Turkish, Japanese, and any other locale. + +**Uses**: `ctx journal source`, `ctx journal import`, +`session_prefixes` in `.ctxrc` + +--- + ### [Keeping Context in a Separate Repo](external-context.md) Store context files **outside** the project tree: in a private repo, @@ -110,7 +121,7 @@ hooks still fire. --- -## Knowledge & Tasks +## Knowledge and Tasks ### [Persisting Decisions, Learnings, and Conventions](knowledge-capture.md) @@ -125,7 +136,7 @@ survive across sessions and team members. --- -### [Tracking Work Across Sessions](task-management.md) +### [Tracking Work across Sessions](task-management.md) **Add**, **prioritize**, **complete**, **snapshot**, and **archive** tasks. Keep `TASKS.md` focused as your project evolves across dozens of @@ -146,7 +157,7 @@ storage out. --- -### [Syncing Scratchpad Notes Across Machines](scratchpad-sync.md) +### [Syncing Scratchpad Notes across Machines](scratchpad-sync.md) Distribute your **scratchpad** encryption key, push and pull encrypted notes via git, and resolve merge conflicts when two machines edit @@ -167,7 +178,7 @@ entries into structured context files with heuristic classification. --- -## Hooks & Notifications +## Hooks and Notifications ### [Hook Output Patterns](hook-output-patterns.md) @@ -323,7 +334,7 @@ Each step produces an artifact that feeds the next. --- -## Agents & Automation +## Agents and Automation ### [Building Project Skills](building-skills.md) @@ -411,7 +422,17 @@ disabled, test with mock input, enable only after review. --- -## `ctx` Hub +## Hub + +### [Hub Overview](hub-overview.md) + +Mental model and three user stories for the `ctx` Hub. What flows, +what doesn't, and when not to use it. Read this before any of the +other Hub recipes. + +**Uses**: `ctx hub`, `ctx connection`, `ctx add --share` + +--- ### [`ctx` Hub: Getting Started](hub-getting-started.md) @@ -425,7 +446,7 @@ End-to-end in under five minutes. --- -### [Personal cross-project brain](hub-personal.md) +### [Personal Cross-Project Brain](hub-personal.md) **Story 1** day-to-day workflow: one developer, many projects, one hub on localhost. Records a learning in @@ -439,7 +460,7 @@ ceremony. --- -### [Team knowledge bus](hub-team.md) +### [Team Knowledge Bus](hub-team.md) **Story 2** day-to-day workflow: a small trusted team sharing decisions, learnings, and conventions via a hub on @@ -453,7 +474,7 @@ stay signal-rich. --- -### [`ctx` Hub: Multi-machine](hub-multi-machine.md) +### [`ctx` Hub: Multi-Machine](hub-multi-machine.md) Run the hub on a **LAN host** as a daemon and connect from project directories on other workstations. Firewall guidance, TLS via a @@ -464,7 +485,7 @@ reverse proxy, and safe daemon restart semantics. --- -### [`ctx` Hub: HA cluster](hub-cluster.md) +### [`ctx` Hub: HA Cluster](hub-cluster.md) Raft-based leader election across three or more nodes for redundancy. Covers bootstrap, runtime peer management, graceful diff --git a/docs/recipes/knowledge-capture.md b/docs/recipes/knowledge-capture.md index 0012ef72a..69dd63496 100644 --- a/docs/recipes/knowledge-capture.md +++ b/docs/recipes/knowledge-capture.md @@ -19,7 +19,7 @@ rejected. **How do you make sure important context survives across sessions?** -!!! tip "Prefer skills over raw commands" +!!! tip "Prefer Skills to Raw Commands" Use `/ctx-decision-add` and `/ctx-learning-add` instead of raw `ctx add` commands. The agent automatically picks up session ID, branch, and commit hash from its context — no manual flags needed. @@ -208,7 +208,7 @@ individually with `ctx decision reindex` or `ctx learning reindex`. Run reindex after any manual edit. The index lets AI tools scan all entries without reading the full file, which matters when token budgets are tight. -### Step 6: Use /ctx-reflect to Surface What to Capture +### Step 6: Use `/ctx-reflect` to Surface What to Capture !!! tip "Keep It Conversational" `/ctx-reflect` is not the only way to trigger reflection. @@ -308,7 +308,7 @@ rather than waiting for explicit instructions. ## Putting It All Together -### Command-line Approach (*Scripting and Automation*) +### Command-Line Approach (*Scripting and Automation*) ```bash # Decision: record the trade-off diff --git a/docs/recipes/memory-bridge.md b/docs/recipes/memory-bridge.md index 8fbd8f8b2..245237828 100644 --- a/docs/recipes/memory-bridge.md +++ b/docs/recipes/memory-bridge.md @@ -182,7 +182,7 @@ ctx memory import --dry-run # 2. Preview what would be imported ctx memory import # 3. Promote entries to .context/ files ``` -## Publishing Context to MEMORY.md +## Publishing Context to `MEMORY.md` Push curated `.context/` content back into MEMORY.md so Claude Code sees structured project context on session start - without needing hooks. diff --git a/docs/recipes/multi-tool-setup.md b/docs/recipes/multi-tool-setup.md index 3e064955a..867799bef 100644 --- a/docs/recipes/multi-tool-setup.md +++ b/docs/recipes/multi-tool-setup.md @@ -134,13 +134,13 @@ tool. No action needed. Just install `ctx` from the Marketplace as `ActiveMemory/ctx`. -!!! tip "Claude Code is a First-Class Citizen" +!!! tip "Claude Code Is a First-Class Citizen" With the `ctx` plugin installed, Claude Code gets hooks and skills automatically. The `PreToolUse` hook runs `ctx agent --budget 4000` on every tool call (*with a 10-minute cooldown so it only fires once per window*). -#### Cursor +#### Cursor Add the system prompt snippet to `.cursor/settings.json`: diff --git a/docs/recipes/multilingual-sessions.md b/docs/recipes/multilingual-sessions.md index ab7e6060b..934f8f1da 100644 --- a/docs/recipes/multilingual-sessions.md +++ b/docs/recipes/multilingual-sessions.md @@ -55,7 +55,7 @@ regardless of prefix configuration. ## Configuration -### Adding a language +### Adding a Language Add the prefix with a trailing colon to your `.ctxrc`: @@ -65,18 +65,18 @@ session_prefixes: - "Sesión:" # Spanish ``` -!!! warning "Include Session: explicitly" +!!! warning "Include Session: Explicitly" When you override `session_prefixes`, **the default is replaced**, not extended. If you still want English headers recognized, include `"Session:"` in your list. -### Team setup +### Team Setup Commit `.ctxrc` to the repo so all team members share the same prefix list. This ensures `ctx journal import` and journal generation pick up sessions from all team members regardless of language. -### Common prefixes +### Common Prefixes | Language | Prefix | |------------|------------| diff --git a/docs/recipes/parallel-worktrees.md b/docs/recipes/parallel-worktrees.md index 7f1aa7811..2678aced2 100644 --- a/docs/recipes/parallel-worktrees.md +++ b/docs/recipes/parallel-worktrees.md @@ -107,7 +107,7 @@ claude Each agent sees the full project, including `.context/`, and can work independently. -!!! warning "Do Not Initialize Context in Worktrees" +!!! warning "Do Not Initialize Context in Worktrees" **Do not** run `ctx init` in worktrees: The `.context` directory is already tracked in `git`. diff --git a/docs/recipes/publishing.md b/docs/recipes/publishing.md index 76ab865cf..6fa08f4b1 100644 --- a/docs/recipes/publishing.md +++ b/docs/recipes/publishing.md @@ -195,7 +195,7 @@ it to `docs/blog/YYYY-MM-DD-slug.md`. Posts are written in first person with code snippets, commit references, and an honest discussion of what went wrong. -!!! info "The Output is `zensical`-Flavored Markdown" +!!! info "The Output Is `zensical`-Flavored Markdown" The blog skills produce Markdown tuned for a [zensical](https://pypi.org/project/zensical/) site: `topics:` frontmatter (zensical's tag field), a `docs/blog/` output path, diff --git a/docs/recipes/scratchpad-sync.md b/docs/recipes/scratchpad-sync.md index a9dfbd8f9..26e3fdb91 100644 --- a/docs/recipes/scratchpad-sync.md +++ b/docs/recipes/scratchpad-sync.md @@ -30,7 +30,7 @@ chmod 600 ~/.ctx/.ctx.key # 3. secure it !!! tip "Finding Your Key File" The key is always at `~/.ctx/.ctx.key` - one key, one machine. -!!! danger "Treat the Key Like a Password" +!!! danger "Treat the Key like a Password" The scratchpad key is the only thing protecting your **encrypted** entries. Store a backup in a secure enclave such as a password manager, and treat diff --git a/docs/recipes/session-archaeology.md b/docs/recipes/session-archaeology.md index d89ce7fc3..ec1137262 100644 --- a/docs/recipes/session-archaeology.md +++ b/docs/recipes/session-archaeology.md @@ -172,7 +172,7 @@ ctx journal import gleaming-wobbling-sutherland ctx journal import --all --all-projects ``` -!!! warning "--keep-frontmatter=false Discards Enrichments" +!!! warning "`--keep-frontmatter=false` Discards Enrichments" `--keep-frontmatter=false` discards enriched YAML frontmatter during regeneration. @@ -190,7 +190,7 @@ To re-import existing files (*e.g., after a format improvement*), use YAML frontmatter you or the **enrichment** skill has added. You'll be prompted before any files are overwritten. -!!! danger "--regenerate Replaces the Markdown Body" +!!! danger "`--regenerate` Replaces the Markdown Body" `--regenerate` preserves YAML frontmatter but **replaces the entire Markdown body** with freshly generated content from the source JSONL. @@ -389,7 +389,7 @@ Claude Code exposes a `cleanupPeriodDays` setting in its configuration | `60`, `90`, etc. | Extends the retention window | | `0` | **Disables writing new transcripts entirely** - not "keep forever" | -!!! warning "Setting `cleanupPeriodDays` to 0" +!!! warning "Setting `cleanupPeriodDays` To 0" Setting this to `0` does **not** mean "never delete." It disables transcript creation altogether. No new JSONL files are written, which means `ctx journal` sees nothing new. This is rarely what you want. diff --git a/docs/recipes/session-ceremonies.md b/docs/recipes/session-ceremonies.md index 80b4c47df..eb5e6207d 100644 --- a/docs/recipes/session-ceremonies.md +++ b/docs/recipes/session-ceremonies.md @@ -71,7 +71,7 @@ braces. | `ctx journal source` | CLI | List recent sessions | | `ctx add` | CLI | Persist learnings, decisions, conventions, tasks | -## Session Start: /ctx-remember +## Session Start: `/ctx-remember` Invoke at the beginning of every session: @@ -96,7 +96,7 @@ The readback should feel like recall, not a file system tour. If the agent says "Let me check if there are files..." instead of a confident summary, the skill is not working correctly. -!!! note "What About '*do you remember?*'" +!!! note "What about '*do you remember?*'" The conversational trigger still works. But `/ctx-remember` guarantees the full ceremony runs: @@ -107,7 +107,7 @@ summary, the skill is not working correctly. The conversational version *may* cut corners. -## Session End: /ctx-wrap-up +## Session End: `/ctx-wrap-up` Invoke before ending a session where meaningful work happened: diff --git a/docs/recipes/session-changes.md b/docs/recipes/session-changes.md index dcaea1a68..4bf574843 100644 --- a/docs/recipes/session-changes.md +++ b/docs/recipes/session-changes.md @@ -7,8 +7,8 @@ title: Reviewing Session Changes --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) ## What Changed While You Were Away? @@ -48,7 +48,7 @@ everything after that. ## What It Reports -### Context file changes +### Context File Changes Any `.md` file in `.context/` modified after the reference time: @@ -58,7 +58,7 @@ Any `.md` file in `.context/` modified after the reference time: - `DECISIONS.md` - modified 2026-03-11 09:15 ``` -### Code changes +### Code Changes Git activity since the reference time: @@ -70,7 +70,7 @@ Git activity since the reference time: - **Authors**: jose, claude ``` -## Integrating Into Session Start +## Integrating into Session Start Pair `ctx change` with the `/ctx-remember` ceremony for a complete session-start picture: diff --git a/docs/recipes/session-lifecycle.md b/docs/recipes/session-lifecycle.md index 9245350fc..b471db09f 100644 --- a/docs/recipes/session-lifecycle.md +++ b/docs/recipes/session-lifecycle.md @@ -29,7 +29,7 @@ persisting context before you close it, so you can see how each piece connects. Read on for the full walkthrough with examples. -!!! note "What is a Readback?" +!!! note "What Is a Readback?" A **readback** is a **structured summary** where the agent plays back what it knows: diff --git a/docs/recipes/steering.md b/docs/recipes/steering.md index 7ab90744c..828faf3de 100644 --- a/docs/recipes/steering.md +++ b/docs/recipes/steering.md @@ -19,7 +19,7 @@ walks through writing a steering file from scratch, validating which prompts will trigger it, and syncing it out to your configured AI tools. -!!! tip "Before you start" +!!! tip "Before You Start" If you're unsure whether a rule belongs in `steering/`, `DECISIONS.md`, or `CONVENTIONS.md`, read the "Steering vs decisions vs conventions" admonition on the @@ -28,7 +28,7 @@ configured AI tools. when asked about Y," that's steering. Otherwise it's probably a decision or convention. -## Start here — customize the foundation files +## Start Here — Customize the Foundation Files **`ctx init` scaffolds four foundation steering files** for you the first time you initialize a project: @@ -92,7 +92,7 @@ pick up the new rules automatically via the plugin's (Cursor, Cline, Kiro) need `ctx steering sync` to export into their native format. -!!! note "Prefer a bare `.context/steering/` directory?" +!!! note "Prefer a Bare `.context/steering/` Directory?" Re-run `ctx init --no-steering-init` and delete the scaffolded files. `ctx init` leaves existing files alone, so the flag is only needed if you want to opt @@ -110,7 +110,7 @@ before touching the database. You want the AI to flag this concern automatically whenever it's asked to write an HTTP handler, without you having to remind it every session. -!!! warning "Claude Code users: pick `always`, not `auto`" +!!! warning "Claude Code Users: Pick `always`, Not `auto`" This walkthrough uses `inclusion: auto` because the scenario is a scoped rule that matches a specific kind of prompt. That works natively on **Cursor, Cline, and @@ -135,7 +135,7 @@ handler, without you having to remind it every session. "Prefer `inclusion: always` for Claude Code" section for the full trade-off. -## Step 1 — scaffold the file +## Step 1 — Scaffold the File ```bash ctx steering add api-validation @@ -158,7 +158,7 @@ The defaults are deliberately conservative: `inclusion: manual` means the file won't be applied until you opt in, which keeps the rules out of the prompt until you've reviewed them. -## Step 2 — fill in the rule +## Step 2 — Fill in the Rule Open the file and write the rule body plus a focused description. The description is what `inclusion: auto` matches @@ -197,7 +197,7 @@ Notes on the choices: validation and request parsing" — the `auto` matcher scores prompts against these words. -## Step 3 — preview which prompts match +## Step 3 — Preview Which Prompts Match Before committing the file, validate your description catches the prompts you care about: @@ -224,7 +224,7 @@ If `api-validation` incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords. -## Step 4 — list to confirm metadata +## Step 4 — List to Confirm Metadata ```bash ctx steering list @@ -234,13 +234,13 @@ Should show `api-validation` alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos. -## Step 5 — get the rules in front of the AI +## Step 5 — Get the Rules in Front of the AI **Steering files are authored once in `.context/steering/`, but how they reach the AI depends on which tool you use.** There are two delivery mechanisms: -### Path A — native-rules tools (Cursor, Cline, Kiro) +### Path A — Native-Rules Tools (Cursor, Cline, Kiro) These tools read a specific directory for rules. `ctx steering sync` exports your files into that directory with @@ -261,7 +261,7 @@ Depending on the active tool in `.ctxrc` or `--tool`: The sync is idempotent — unchanged files are skipped. Run it whenever you edit a steering file. -### Path B — Claude Code and Codex (hook + MCP) +### Path B — Claude Code and Codex (Hook + MCP) Claude Code and Codex have **no native rules primitive**, so `ctx steering sync` is a **no-op** for them — it @@ -299,7 +299,7 @@ up on the next tool call — no sync step needed. to keep Cursor up to date; the Claude pipeline takes care of itself via the hook. -## Step 6 — verify the AI sees it +## Step 6 — Verify the AI Sees It Open your AI tool and ask it something the rule should fire on: @@ -340,7 +340,7 @@ If nothing happens, the fix depends on which path you're on: has access but isn't pulling it into the initial context packet — tighten the description keywords. -## Common mistakes +## Common Mistakes **Too-generic descriptions.** `description: general coding` will match almost every prompt and flood the context window. @@ -362,7 +362,7 @@ permanently. Only use `always` for true invariants (security, safety, licensing). Everything else should be `auto` or `manual`. -## See also +## See Also - [`ctx steering` reference](../cli/steering.md) — full command, flag, and frontmatter reference. diff --git a/docs/recipes/task-management.md b/docs/recipes/task-management.md index 5dc23a7fb..9d8fc2abf 100644 --- a/docs/recipes/task-management.md +++ b/docs/recipes/task-management.md @@ -16,7 +16,7 @@ grows cluttered with completed checkboxes that obscure the remaining work. How do you manage work items that span multiple sessions without losing context? -!!! tip "Prefer skills over raw commands" +!!! tip "Prefer Skills over Raw Commands" When working with an AI agent, use `/ctx-task-add` instead of raw `ctx add task`. The agent automatically picks up session ID, branch, and commit hash from its context — no manual flags needed. diff --git a/docs/recipes/triggers.md b/docs/recipes/triggers.md index 66587a427..e374734d9 100644 --- a/docs/recipes/triggers.md +++ b/docs/recipes/triggers.md @@ -19,7 +19,7 @@ specific events during an AI session. They're how you express this path, check Y first." This recipe walks through writing your first trigger, testing it, and enabling it safely. -!!! danger "Triggers execute arbitrary code" +!!! danger "Triggers Execute Arbitrary Code" A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. **Treat triggers like @@ -44,7 +44,7 @@ editing anything in `internal/crypto/` without explicit confirmation. Cryptographic code is sensitive, and accidental edits have caused outages before — you want a hard gate. -## Step 1 — scaffold the script +## Step 1 — Scaffold the Script ```bash ctx trigger add pre-tool-use protect-crypto @@ -75,7 +75,7 @@ on-disk layout still uses `hooks/` even though the command is `ctx trigger`. If you `ls .context/hooks/`, that's where your triggers live. -## Step 2 — write the logic +## Step 2 — Write the Logic Open the file and replace the template body: @@ -123,7 +123,7 @@ A few things to note: string concatenation when the message may contain special characters. -## Step 3 — test with a mock payload +## Step 3 — Test with a Mock Payload Before enabling the trigger, test it with a realistic mock input using `ctx trigger test`. This runs the script against @@ -155,7 +155,7 @@ If any of these cases misbehave, **fix the trigger before enabling it.** The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions. -## Step 4 — enable it +## Step 4 — Enable It Once the test cases pass, enable the trigger: @@ -175,7 +175,7 @@ ctx trigger list Should show `protect-crypto` under `pre-tool-use` with an enabled indicator. -## Step 5 — iterate safely +## Step 5 — Iterate Safely If you discover a bug after enabling, **disable first, fix second**: @@ -191,9 +191,9 @@ Disabling simply clears the executable bit — the script stays on disk, and `ctx trigger enable` re-enables it without rewriting anything. -## Patterns worth copying +## Patterns Worth Copying -### Logging, not blocking +### Logging, Not Blocking For auditing or analytics, return `{"action":"allow"}` always and append to a log as a side effect: @@ -206,20 +206,20 @@ echo "$payload" >> .context/logs/tool-use.jsonl echo '{"action":"allow"}' ``` -### Context injection at session start +### Context Injection at Session Start A `session-start` trigger can prepend text to the agent's initial prompt by emitting `{"action":"inject", "content": "..."}` — useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file. -### Chaining triggers of the same type +### Chaining Triggers of the Same Type Multiple scripts in the same type directory all run. If any returns `action: block`, the block wins. Keep individual triggers single-purpose and rely on composition. -## Common mistakes +## Common Mistakes **Forgetting the shebang.** Without `#!/usr/bin/env bash`, the trigger won't execute even with the executable bit set. @@ -242,7 +242,7 @@ strings. The feature was renamed; the directory name lags behind. Don't let this confuse you — they refer to the same thing. -## See also +## See Also - [`ctx trigger` reference](../cli/trigger.md) — full command, flag, and event-type reference. diff --git a/docs/recipes/troubleshooting.md b/docs/recipes/troubleshooting.md index c8d267caf..c299f9943 100644 --- a/docs/recipes/troubleshooting.md +++ b/docs/recipes/troubleshooting.md @@ -137,7 +137,7 @@ QA reminder events from that specific session. ## Common Problems -### "ctx: not initialized" +### "ctx: Not Initialized" **Symptoms**: Any `ctx` command fails with `ctx: not initialized - run "ctx init" first`. @@ -156,7 +156,7 @@ ctx init --minimal # or just the essentials (CONSTITUTION, TASKS, DECISIONS) **Commands that work without initialization**: `ctx init`, `ctx setup`, `ctx doctor`, and help-only grouping commands (`ctx`, `ctx system`). -### "My hook isn't firing" +### "My Hook Isn't Firing" **Symptoms**: No nudges appearing, webhook silent, event log shows no entries for the expected hook. @@ -186,7 +186,7 @@ ctx hook event --hook check-persistence * **Hook silenced**: a custom message override may be an empty file: check `ctx hook message list` for overrides -### "*Too many nudges*" +### "*Too Many Nudges*" **Symptoms**: The agent is overwhelmed with hook output. Context checkpoints, persistence reminders, and QA gates fire constantly. @@ -217,7 +217,7 @@ ctx hook event --json | jq -r '.detail.hook // "unknown"' \ * **`ctx` version mismatch**: Build (*or download*) and install the latest `ctx` vesion. -### "*Context seems stale*" +### "*Context Seems Stale*" **Symptoms**: The agent references outdated information, paths that don't exist, or decisions that were reversed. @@ -247,7 +247,7 @@ ctx status --verbose being used, context doesn't get refreshed. See [Session Ceremonies](session-ceremonies.md). -### "*The agent isn't following instructions*" +### "*The Agent Isn't Following Instructions*" **Symptoms**: The agent ignores conventions, forgets decisions, or acts contrary to `CONSTITUTION.md` rules. diff --git a/docs/recipes/when-to-use-agent-teams.md b/docs/recipes/when-to-use-agent-teams.md index 261c67748..f12297fef 100644 --- a/docs/recipes/when-to-use-agent-teams.md +++ b/docs/recipes/when-to-use-agent-teams.md @@ -170,7 +170,7 @@ before anyone does any work. For small tasks, that overhead dominates. Four practical team compositions for common workflows. -### Feature Development (3 agents) +### Feature Development (3 Agents) | Role | Responsibility | |-------------|-----------------------------------------------------------| @@ -184,7 +184,7 @@ implementer starts. Reviewer runs after each implementer commit. **Anti-pattern**: All three agents editing the same file simultaneously. Sequence the work so only one agent touches a file at a time. -### Consolidation Sprint (3-4 agents) +### Consolidation Sprint (3-4 Agents) | Role | Responsibility | |------------|----------------------------------------------------------| @@ -199,7 +199,7 @@ Each agent claims a subset of issues by adding `#in-progress` labels. **Anti-pattern**: Fixer and doc writer both editing ARCHITECTURE.md. Assign file ownership explicitly. -### Release Prep (2 agents) +### Release Prep (2 Agents) | Role | Responsibility | |---------------|--------------------------------------------------------| @@ -212,7 +212,7 @@ notes agent works from `git log`; validation agent works from `make audit`. **Anti-pattern**: Release notes agent running tests "to verify." Each agent stays in its lane. -### Documentation Sprint (3 agents) +### Documentation Sprint (3 Agents) | Role | Responsibility | |---------------|------------------------------------------------------------| diff --git a/docs/reference/audit-conventions.md b/docs/reference/audit-conventions.md index 4ab3a42f3..32acce572 100644 --- a/docs/reference/audit-conventions.md +++ b/docs/reference/audit-conventions.md @@ -8,8 +8,8 @@ title: Code Conventions icon: lucide/scroll-text --- -![ctx](../images/ctx-banner.png) +![ctx](../images/ctx-banner.png) # Code Conventions: Common Patterns and Fixes @@ -22,7 +22,7 @@ code under `internal/`. --- -## Variable Shadowing (bare `err :=` reuse) +## Variable Shadowing (Bare `err :=` Reuse) **Test:** `TestNoVariableShadowing` @@ -715,7 +715,7 @@ func Journal(cmd *cobra.Command, ...) { --- -## Predicate Naming (no `Is`/`Has`/`Can` prefix) +## Predicate Naming (No `Is`/`Has`/`Can` Prefix) **Test:** None (manual review convention) @@ -778,7 +778,7 @@ parse.go --- -## Stray err.go Files +## Stray Err.Go Files **Test:** `TestNoStrayErrFiles` diff --git a/docs/reference/comparison.md b/docs/reference/comparison.md index 6837b80bd..56b5ff6e9 100644 --- a/docs/reference/comparison.md +++ b/docs/reference/comparison.md @@ -259,7 +259,7 @@ The two are complementary. `ctx` can absorb auto-memory as an input source (importing what the model remembered into structured context files) while providing the durable, inspectable layer that auto-memory lacks. -### .cursorrules / .claude/rules +### .Cursorrules / .Claude/rules Static rule files (`.cursorrules`, `.claude/rules/`) declare conventions: coding style, forbidden patterns, preferred libraries. They are effective @@ -287,7 +287,7 @@ ordering across file types, and no structured format for decisions or learnings. `ctx` provides the full lifecycle: load, accumulate, persist, and budget. -### Copilot @workspace +### Copilot @Workspace GitHub Copilot's `@workspace` performs workspace-wide code search. It answers **"what code exists?"** - finding function definitions, usages, diff --git a/docs/reference/design-invariants.md b/docs/reference/design-invariants.md index c8a2d4cd4..9c23691aa 100644 --- a/docs/reference/design-invariants.md +++ b/docs/reference/design-invariants.md @@ -162,7 +162,7 @@ permanent assets. --- -## 11. Policies Are Encoded, not Remembered +## 11. Policies Are Encoded, Not Remembered Alignment **must not** depend on recall or goodwill. diff --git a/docs/reference/session-journal.md b/docs/reference/session-journal.md index 2fb347517..a697cadf5 100644 --- a/docs/reference/session-journal.md +++ b/docs/reference/session-journal.md @@ -375,7 +375,7 @@ To use: open the output directory in Obsidian ("Open folder as vault"). ctx journal obsidian --output ~/vaults/ctx-journal ``` -!!! tip "Static site vs Obsidian Vault" +!!! tip "Static Site vs Obsidian Vault" Use `ctx journal site` when you want a **web-browsable** archive with search and dark mode. Use `ctx journal obsidian` when you want **graph view**, **backlinks**, and **tag-based navigation** inside Obsidian. Both use the @@ -397,7 +397,7 @@ import → enrich → rebuild | **Rebuild** | `ctx journal site --build` | Generates static HTML site | -- | | **Obsidian** | `ctx journal obsidian` | Generates Obsidian vault with wikilinks | -- | -!!! tip "One-command pipeline" +!!! tip "One-Command Pipeline" `/ctx-journal-enrich-all` handles import automatically - it detects unimported sessions and imports them before enriching. You only need to run `ctx journal site --build` afterward. diff --git a/docs/security/agent-security.md b/docs/security/agent-security.md index 69d013706..4fb86b1f4 100644 --- a/docs/security/agent-security.md +++ b/docs/security/agent-security.md @@ -78,7 +78,7 @@ rules stated early. Edge cases where instructions are ambiguous. **Verdict**: Necessary but not sufficient. Good for the common case. **Do not** rely on it for security boundaries. -### Layer 2: Application Controls (*Deterministic at Runtime, Mutable Across Iterations*) +### Layer 2: Application Controls (*Deterministic at Runtime, Mutable across Iterations*) AI tool runtimes (*Claude Code, Cursor, etc.*) provide permission systems: tool allowlists, command restrictions, confirmation prompts. @@ -204,7 +204,7 @@ docker run --rm \ ./loop.sh ``` -!!! danger "Docker Socket is sudo Access" +!!! danger "Docker Socket Is Sudo Access" Critical: **never mount the Docker socket** (`/var/run/docker.sock`). An agent with socket access can spawn sibling containers with full host diff --git a/docs/security/hub.md b/docs/security/hub.md index 15e0bf6cb..e3e5ee25e 100644 --- a/docs/security/hub.md +++ b/docs/security/hub.md @@ -11,12 +11,12 @@ icon: lucide/shield ![ctx](../images/ctx-banner.png) -# `ctx` Hub: Security model +# `ctx` Hub: Security Model What the hub defends against, what it **does not** defend against, and the concrete mechanisms in play. -## Threat model +## Threat Model The hub is designed for **trusted cross-project knowledge sharing** within a team or homelab. It assumes: @@ -39,7 +39,7 @@ use a dedicated tool and keep the hub for knowledge sharing. ## Mechanisms -### Bearer tokens +### Bearer Tokens All RPCs except `Register` require a bearer token in gRPC metadata. Two kinds of tokens exist: @@ -54,7 +54,7 @@ prevent timing oracles, and looked up via an `O(1)` hash map so the comparison cost does not depend on the total number of registered clients. -### Client-side encryption at rest +### Client-Side Encryption at Rest `.context/.connect.enc` stores the client token and hub address, encrypted with **AES-256-GCM** using the same scheme the @@ -65,9 +65,9 @@ An attacker with read access to the project directory cannot learn the client token without also breaking ctx's local keyring. -### Hub-side token storage +### Hub-Side Token Storage -!!! warning "Tokens are stored in plaintext on the hub host" +!!! warning "Tokens Are Stored in Plaintext on the Hub Host" `/clients.json` currently stores client tokens **verbatim**, not hashed. Anyone with read access to the hub's data directory sees every registered client's token @@ -92,7 +92,7 @@ keyring. that lands, assume a hub host compromise equals total hub compromise. -### Input validation +### Input Validation Every published entry is validated before it touches the log: @@ -105,14 +105,14 @@ Every published entry is validated before it touches the log: replays an old `Register` call gets an error, not a second token. -### No script execution +### No Script Execution The hub never interprets entry content. There is no expression language, no template evaluation, no markdown rendering at ingest. Content is stored as bytes and fanned out to clients verbatim. -### Audit trail +### Audit Trail `entries.jsonl` is append-only. Every accepted publish is recorded with the publishing project's origin tag and sequence @@ -120,7 +120,7 @@ number. Nothing is ever deleted by the hub; retention is managed manually by the operator (see [log rotation](../operations/hub.md#log-rotation)). -## What the hub does **not** defend against +## What the Hub Does **Not** Defend Against - **Untrusted entry senders.** A client with a valid token can publish anything (within the 1 MB cap). There is no content @@ -137,7 +137,7 @@ manually by the operator (see decision containing an API key. Sanitize content before publishing. -## Operational hardening checklist +## Operational Hardening Checklist - [ ] Run the hub as an **unprivileged user** with `NoNewPrivileges=true` and `ProtectSystem=strict` (see @@ -155,12 +155,12 @@ manually by the operator (see - [ ] Run NTP on all clients to prevent entry-timestamp skew. - [ ] Do not publish from machines you do not trust. -## Responsible disclosure +## Responsible Disclosure Security issues in the hub follow the same process as the rest of ctx — see [Reporting](reporting.md). -## See also +## See Also - [`ctx` Hub Operations](../operations/hub.md) - [`ctx` Hub failure modes](../operations/hub-failure-modes.md) diff --git a/docs/security/reporting.md b/docs/security/reporting.md index 04cc63645..1b29e144c 100644 --- a/docs/security/reporting.md +++ b/docs/security/reporting.md @@ -65,7 +65,7 @@ who report valid vulnerabilities (*unless they prefer to remain anonymous*). ### Response Timeline -!!! note "Open source, Best-Effort Timelines" +!!! note "Open Source, Best-Effort Timelines" `ctx` is a volunteer-maintained open source project. The timelines below are **guidelines**, not guarantees, and depend on diff --git a/docs/thesis/index.md b/docs/thesis/index.md index 89d96c974..e6f068262 100644 --- a/docs/thesis/index.md +++ b/docs/thesis/index.md @@ -282,7 +282,7 @@ or time-dependent scoring) destroys the ability to reproduce a context window and therefore to diagnose why a model produced a given output. Determinism in the assembly path is what makes the persistence layer auditable. -### Invariant 4: Human Authority Over Persistent State +### Invariant 4: Human Authority over Persistent State The agent may propose changes to context files but must not unilaterally modify them. All persistent changes go through human-reviewable git commits. @@ -471,7 +471,7 @@ automate it away. --- -## 6. Worked Example: Architectural Decision Under Two Models +## 6. Worked Example: Architectural Decision under Two Models We now instantiate the three-tier model in a concrete system (`ctx`) and illustrate the difference between prompt-time retrieval and cognitive state diff --git a/examples/demo/README.md b/examples/demo/README.md index d5a020a62..0a0bee16a 100644 --- a/examples/demo/README.md +++ b/examples/demo/README.md @@ -17,7 +17,7 @@ ctx agent ctx add task "Implement feature X" # Mark a task complete -ctx tasks complete "feature X" +ctx task complete "feature X" # Check for stale context ctx drift diff --git a/hack/title-case-headings.py b/hack/title-case-headings.py new file mode 100755 index 000000000..c58ef181d --- /dev/null +++ b/hack/title-case-headings.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +# / ctx: https://ctx.ist +# ,'`./ do you remember? +# `.,'\ +# \ Copyright 2026-present Context contributors. +# SPDX-License-Identifier: Apache-2.0 +"""Title-case headings (H1-H6) and admonition titles in Markdown files. + +Style: AP-leaning. Lowercase ALL articles, prepositions (any length), and +coordinating conjunctions when they appear MID-phrase. First word, last word, +and the first word after a colon are always capitalized. Subordinating +conjunctions (when, while, after, before, etc.) are capitalized. + +Protected verbatim: + - Backticked code spans `...` + - Markdown link URLs `(...)` immediately following `]` + - Markdown reference-style link labels `][label]` + - All-uppercase tokens of length >= 2 (acronyms) + - Mixed-case tokens like macOS, GitHub, JavaScript, JSONL + - Single uppercase letter labels (A, B, ... in 'Appendix A:') + - Brand 'ctx' always lowercase, including possessive 'ctx's' + - Version-number tokens (v0, v0.8.0) + +Skipped contexts: + - YAML frontmatter at file head + - Fenced code blocks ```...``` + +Usage: + hack/title-case-headings.py # dry-run, prints diffs + hack/title-case-headings.py --apply # write changes in place + + may be a single Markdown file or a directory (recursively scanned for +*.md). Exits non-zero if any changes are needed (in dry-run mode), so it's +safe to wire into CI. +""" +import re +import sys +import pathlib + +ARTICLES = {'a', 'an', 'the'} +PREPOSITIONS = { + # AP-strict; ambiguous words (after/before/since/until/past/near/down/up/off) + # excluded so they cap when they're conjunctions or adj/adv. + 'about','above','across','against','along','among','around','as','at', + 'behind','below','beneath','beside','between','beyond','by','despite', + 'during','except','for','from','in','inside','into','like','of','on', + 'onto','out','outside','over','per','plus','regarding','than','through', + 'throughout','till','to','toward','under','underneath','unto','upon', + 'versus','via','vs','with','within','without', +} +COORD_CONJ = {'and','or','but','nor','so','yet','for'} + +LOWER_MID = ARTICLES | PREPOSITIONS | COORD_CONJ +BRAND_LOWER = {'ctx'} + +WORD_RE = re.compile(r"[A-Za-z][A-Za-z0-9'/]*") + +def title_case_word(word, force_cap=False): + if not word: + return word + # Hyphenated: each segment treated as own word; segments after the first + # are always capitalized (Chicago hyphen rule). + if '-' in word: + segs = word.split('-') + new = [title_case_word(segs[0], force_cap=force_cap)] + for s in segs[1:]: + new.append(title_case_word(s, force_cap=True)) + return '-'.join(new) + lw = word.lower() + # Brand and brand-with-suffix (ctx, ctx's, ctxs) + for brand in BRAND_LOWER: + if lw == brand: + return brand + if lw.startswith(brand) and len(lw) > len(brand): + tail = lw[len(brand):] + # Allow possessive or short plural-like suffix + if tail in ("'s", 's', "'") or (tail.startswith("'") and tail[1:].isalpha() and len(tail) <= 3): + return brand + word[len(brand):] + # Single uppercase letter label (e.g., 'A' in 'Appendix A') + if len(word) == 1 and word.isupper(): + return word + # Acronym already all-caps + if len(word) >= 2 and word.isupper(): + return word + # Version-number token: lowercase 'v' followed by digits (v0, v1, v0.8 etc.) + if re.match(r'^v\d', word): + return word + # Mixed-case (interior caps): preserve (macOS, GitHub, JavaScript) + if any(c.isupper() for c in word[1:]): + return word + # Mid-phrase function word + if not force_cap and lw in LOWER_MID: + return lw + # Default: cap first letter + return word[0].upper() + word[1:] + +PROTECT_RE = re.compile( + r'(`[^`]*`)' # backtick code span + r'|(\][ ]*\([^)]*\))' # markdown inline link URL incl. ']' + r'|(\][ ]*\[[^\]]+\])' # markdown reference-style link label + # Brand tagline — italic lowercase, with or without quotes/punctuation. + r'|(\*do you remember\??\*)' + # CLI long-flag tokens (--keep-frontmatter, --keep-frontmatter=false) + r'|(--[a-z][a-z0-9_-]*(?:=\S+)?)' + # Slash-prefixed commands (/ctx-remember, /ctx-decision-add) + r'|(/[a-z][a-z0-9_-]*)' +) + +def split_protected(text): + pieces = [] + last = 0 + for m in PROTECT_RE.finditer(text): + if m.start() > last: + pieces.append(('plain', text[last:m.start()])) + pieces.append(('protected', m.group(0))) + last = m.end() + if last < len(text): + pieces.append(('plain', text[last:])) + return pieces + +def title_case_text(text): + pieces = split_protected(text) + total = 0 + for kind, t in pieces: + if kind == 'protected': + total += 1 + else: + total += len(WORD_RE.findall(t)) + if total == 0: + return text + overall = 0 + after_colon = False + out = [] + for kind, t in pieces: + if kind == 'protected': + overall += 1 + out.append(t) + continue + last = 0 + buf = [] + for m in WORD_RE.finditer(t): + literal = t[last:m.start()] + if re.search(r':\s*$', literal): + after_colon = True + buf.append(literal) + overall += 1 + is_first = (overall == 1) + is_last = (overall == total) + # Contraction tail: when the preceding literal ends with an + # apostrophe (e.g. after a backtick brand span like `ctx`'s), + # the word is a contraction suffix (s, ll, ve, t, re, d, m). + # Preserve as lowercase, never capitalize. + is_contraction_tail = literal.endswith("'") and len(m.group(0)) <= 3 + # Filename extension: when the preceding literal ends with '.' + # and the word is a short lowercase token (md, sh, py, json, + # yaml, txt, ...), preserve as lowercase. + word_text = m.group(0) + is_ext_tail = ( + literal.endswith('.') + and len(word_text) <= 5 + and word_text.islower() + ) + force = (is_first or is_last or after_colon) and not is_contraction_tail and not is_ext_tail + if (is_contraction_tail or is_ext_tail) and not (is_first or is_last): + buf.append(word_text.lower()) + else: + buf.append(title_case_word(word_text, force_cap=force)) + after_colon = False + last = m.end() + trailing = t[last:] + if re.search(r':\s*$', trailing): + after_colon = True + buf.append(trailing) + out.append(''.join(buf)) + return ''.join(out) + +def process_file(path): + src = path.read_text(encoding='utf-8') + lines = src.split('\n') + in_fence = False + in_fm = False + out_lines = [] + changes = [] + for i, line in enumerate(lines): + if i == 0 and line.strip() == '---': + in_fm = True + out_lines.append(line); continue + if in_fm and line.strip() == '---': + in_fm = False + out_lines.append(line); continue + if in_fm: + out_lines.append(line); continue + if line.startswith('```'): + in_fence = not in_fence + out_lines.append(line); continue + if in_fence: + out_lines.append(line); continue + m = re.match(r'^(#{1,6})\s+(.*)$', line) + if m: + hashes, text = m.groups() + new_text = title_case_text(text.rstrip()) + new_line = f"{hashes} {new_text}" + if new_line != line: + changes.append((i + 1, line, new_line)) + out_lines.append(new_line); continue + am = re.match(r'^(\s*)([!?]{3})(\s+\w[\w-]*\s+)"([^"]+)"\s*$', line) + if am: + indent, marker, mid, title = am.groups() + new_title = title_case_text(title) + new_line = f'{indent}{marker}{mid}"{new_title}"' + if new_line != line: + changes.append((i + 1, line, new_line)) + out_lines.append(new_line); continue + out_lines.append(line) + return '\n'.join(out_lines), changes + +def iter_md_files(target: pathlib.Path): + if target.is_file(): + if target.suffix.lower() == '.md': + yield target + return + if target.is_dir(): + yield from sorted(target.rglob('*.md')) + +def main(): + args = sys.argv[1:] + if '-h' in args or '--help' in args or not args: + print(__doc__) + sys.exit(0 if args else 2) + apply = False + if '--apply' in args: + apply = True + args.remove('--apply') + if len(args) != 1: + print("error: expected exactly one argument", file=sys.stderr) + sys.exit(2) + target = pathlib.Path(args[0]) + if not target.exists(): + print(f"error: path not found: {target}", file=sys.stderr) + sys.exit(2) + total = 0 + files = 0 + for md in iter_md_files(target): + new_src, changes = process_file(md) + if changes: + files += 1 + total += len(changes) + for ln, old, new in changes: + print(f"{md}:{ln}") + print(f" - {old}") + print(f" + {new}") + if apply: + md.write_text(new_src, encoding='utf-8') + mode = 'APPLIED' if apply else 'dry-run' + print(f"\n=== {total} changes across {files} files ({mode}) ===") + sys.exit(0 if (apply or total == 0) else 1) + +if __name__ == '__main__': + main() diff --git a/internal/assets/claude/skills/ctx-pause/SKILL.md b/internal/assets/claude/skills/ctx-pause/SKILL.md index 277af1dd3..ca5ef1297 100644 --- a/internal/assets/claude/skills/ctx-pause/SKILL.md +++ b/internal/assets/claude/skills/ctx-pause/SKILL.md @@ -26,7 +26,7 @@ Security hooks (dangerous command blocking) still fire. Run the pause command: ```bash -ctx pause +ctx hook pause ``` Then confirm to the user: diff --git a/internal/assets/claude/skills/ctx-resume/SKILL.md b/internal/assets/claude/skills/ctx-resume/SKILL.md index 39ad9a7ca..d96ccdb6b 100644 --- a/internal/assets/claude/skills/ctx-resume/SKILL.md +++ b/internal/assets/claude/skills/ctx-resume/SKILL.md @@ -24,7 +24,7 @@ reminder, and ceremony behavior. Run the resume command: ```bash -ctx resume +ctx hook resume ``` Then confirm to the user: diff --git a/internal/assets/claude/skills/ctx-worktree/SKILL.md b/internal/assets/claude/skills/ctx-worktree/SKILL.md index 56da68389..6c8cb7c83 100644 --- a/internal/assets/claude/skills/ctx-worktree/SKILL.md +++ b/internal/assets/claude/skills/ctx-worktree/SKILL.md @@ -124,7 +124,7 @@ Merge a completed worktree back and clean up. The encryption key lives at `~/.ctx/.ctx.key` (user-level, outside the project). All worktrees on the same machine share this path, so -**`ctx pad` and `ctx notify` work in worktrees automatically**. +**`ctx pad` and `ctx hook notify` work in worktrees automatically**. One thing to watch: diff --git a/internal/assets/commands/examples.yaml b/internal/assets/commands/examples.yaml index babbac921..2a3a282ea 100644 --- a/internal/assets/commands/examples.yaml +++ b/internal/assets/commands/examples.yaml @@ -58,8 +58,8 @@ compact: complete: short: |2- - ctx complete 3 - ctx complete "auth" + ctx task complete 3 + ctx task complete "auth" config: short: |2- diff --git a/internal/assets/commands/text/mcp.yaml b/internal/assets/commands/text/mcp.yaml index 7df4e460b..ad40d9911 100644 --- a/internal/assets/commands/text/mcp.yaml +++ b/internal/assets/commands/text/mcp.yaml @@ -134,7 +134,7 @@ mcp.all-tasks-complete: mcp.check-task-format: short: 'Did this complete task #%d: "%s"?' mcp.check-task-hint: - short: 'If yes, run: ctx complete %d' + short: 'If yes, run: ctx task complete %d' mcp.compact-clean: short: Nothing to compact - context is already clean. mcp.format-compacted: diff --git a/internal/assets/commands/text/ui.yaml b/internal/assets/commands/text/ui.yaml index ab1b9185f..322a8b807 100644 --- a/internal/assets/commands/text/ui.yaml +++ b/internal/assets/commands/text/ui.yaml @@ -205,7 +205,7 @@ guide.default: TRACKING DECISIONS & KNOWLEDGE ctx add -t TYPE Add a decision, learning, convention, or task - ctx complete Mark a task as done in TASKS.md + ctx task complete Mark a task as done in TASKS.md ctx decision reindex Rebuild the DECISIONS.md index table BROWSING HISTORY diff --git a/internal/assets/integrations/agents.md b/internal/assets/integrations/agents.md index 69013e9dc..313e22247 100644 --- a/internal/assets/integrations/agents.md +++ b/internal/assets/integrations/agents.md @@ -118,7 +118,7 @@ If `ctx` is installed, use these commands: ctx status # Context summary and health check ctx agent # AI-ready context packet ctx drift # Check for stale context -ctx recall list # Recent session history +ctx journal source --limit 5 # Recent session history ``` diff --git a/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md index 9419a3dc7..786373ccb 100644 --- a/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md +++ b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md @@ -24,7 +24,7 @@ memory-related question: **Do this FIRST (silently):** - Read TASKS.md, DECISIONS.md, and LEARNINGS.md from the context directory -- Run `ctx recall list --limit 5` for recent session history +- Run `ctx journal source --limit 5` for recent session history **Then respond with a structured readback:** diff --git a/internal/assets/integrations/copilot-cli/ctx-hooks.json b/internal/assets/integrations/copilot-cli/ctx-hooks.json index b4ae75012..694ef31a7 100644 --- a/internal/assets/integrations/copilot-cli/ctx-hooks.json +++ b/internal/assets/integrations/copilot-cli/ctx-hooks.json @@ -1,4 +1,5 @@ { + "version": 1, "hooks": { "sessionStart": [ { diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md index 2475bc6ba..182b423f1 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-agent/SKILL.md @@ -14,30 +14,52 @@ Load the full context packet for AI consumption. ## When NOT to Use -- The session start hook already runs `ctx agent` automatically: +- The PreToolUse hook already runs `ctx agent` automatically with a cooldown: you rarely need to invoke this manually -- Don't run it just to "refresh" if you already have context loaded +- Don't run it just to "refresh" if you already have the context loaded in + this session ## After Loading -**Read the files listed in "Read These Files (in order)"**: the -packet is a summary, not a substitute. In particular, read -CONVENTIONS.md before writing any code. +**Read the files listed in "Read These Files (in order)"**: the packet is a +summary, not a substitute. In particular, read CONVENTIONS.md before writing +any code. -Confirm to the user: "I have read the required context files and -I'm following project conventions." +Confirm to the user: "I have read the required context files and I'm +following project conventions." Read and confirm before beginning +implementation. ## Flags -| Flag | Default | Description | -|--------------|---------|-----------------------------------------------| -| `--budget` | 8000 | Token budget for context packet | -| `--format` | md | Output format: `md` or `json` | -| `--cooldown` | 10m | Suppress repeated output within this duration | -| `--session` | (none) | Session ID for cooldown isolation | +| Flag | Default | Description | +|--------------|---------|---------------------------------------------------| +| `--budget` | 8000 | Token budget for context packet | +| `--format` | md | Output format: `md` or `json` | +| `--cooldown` | 10m | Suppress repeated output within this duration | +| `--session` | (none) | Session ID for cooldown isolation (e.g., `$PPID`) | ## Execution +```bash +ctx agent $ARGUMENTS +``` + +**Example: default load:** +```bash +ctx agent +``` + +**Example: smaller packet for limited contexts:** ```bash ctx agent --budget 4000 ``` + +**Example: with cooldown (how the PreToolUse hook invokes it):** +```bash +ctx agent --budget 4000 --session $PPID +``` + +**Example: JSON for programmatic use:** +```bash +ctx agent --format json --budget 8000 +``` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md index 0667d8c02..ca9acca5c 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md @@ -1,69 +1,947 @@ --- name: ctx-architecture -description: "Build and maintain ARCHITECTURE.md and DETAILED_DESIGN.md. Use when working on structure, adding packages, or tracing flow." +description: "Build and maintain architecture maps. Use to create or refresh ARCHITECTURE.md and DETAILED_DESIGN.md. Supports principal mode for deeper analysis: vision, future direction, bottlenecks, implementation alternatives, gaps, upstream proposals, and intervention points." tools: [bash, read, write, glob, grep] --- -Build and maintain architecture documentation with incremental -coverage tracking. +Build and maintain two architecture documents incrementally: +**ARCHITECTURE.md** (succinct project map, loaded at session start) +and **DETAILED_DESIGN.md** (deep per-module reference, consulted +on-demand). Coverage is tracked in `map-tracking.json` so each run +extends the map rather than re-analyzing everything. + +## Execution Priority + +When time or context budget runs short, execute in this order. +Never skip a tier to do a lower one: + +1. **Authoritative truth first**: ARCHITECTURE.md + DETAILED_DESIGN.md + must be accurate and honest. Incomplete is fine; wrong is not. +2. **Surface uncertainty honestly**: partial coverage with correct + confidence scores beats inflated scores. Mark what you don't know. +3. **Offer judgment only where grounded**: danger zones, extension + points, improvement ideas only for modules you actually analyzed. +4. **Prefer fewer sharp insights over many shallow sections**: a + CHEAT-SHEETS.md with one excellent cheat sheet beats five thin ones. + An ARCHITECTURE-PRINCIPAL.md with three concrete risks beats ten + vague ones. + +## Mode Detection + +Read the invocation for a mode keyword: + +- **No keyword** (or `default`) → run **Default mode** (Phases 0-5 below) +- `principal` → run **Principal mode** (Phases 0-5 + Principal phases P1-P3) + +Examples: +```text +/ctx-architecture +/ctx-architecture principal +/ctx-architecture (principal) +``` + +--- ## When to Use -- Working on system structure or adding packages -- Tracing data flow across the codebase -- Onboarding to understand the system -- After significant structural changes +- First time setting up architecture documentation for a project +- Periodically to refresh stale module coverage after significant + changes +- After major refactors, new package additions, or dependency changes +- When the agent nudges that the map is stale (>30 days, commits + detected) +- When you need deep understanding of a module before working on it +- When you want strategic analysis of the architecture (principal mode) ## When NOT to Use -- For code-level changes within a single package -- When ARCHITECTURE.md is already up-to-date -- For documentation-only projects +- For minor code changes that don't affect module boundaries or + data flow +- When ARCHITECTURE.md just needs a quick path fix (use `/ctx-drift` + instead) +- Repeatedly in the same session without intervening code changes +- When the user has opted out (`opted_out: true` in + map-tracking.json) + +--- + +## Default Mode (Phases 0-5) + +### Phase 0: Check Opt-Out + +Read `.context/map-tracking.json`. If it exists and +`opted_out: true`, say: + +> Architecture mapping is opted out for this project. Delete +> `.context/map-tracking.json` to re-enable. + +Then stop. + +### Phase 0.25: Companion Tool Check + +Check if **Gemini Search** MCP is available by attempting a +simple query. Gemini is used for upstream documentation, design +rationale, KEPs, peer-project patterns - anything outside the +local codebase that helps understand *why* the code is shaped +the way it is. + +**If available**: note it silently. Use Gemini throughout the +analysis for upstream lookups. Prefer it over built-in web search. + +**If not available**: ask the user once: + +``` +Gemini Search is not connected. It helps me look up upstream +design docs, KEPs, and peer-project patterns during analysis. + +Want to set it up now, or proceed without it? +``` + +Respect the answer and continue either way. + +**Important**: Gemini is for *upstream* and *external* context +only. Do not use it to understand the local codebase - read the +code directly. The depth of analysis comes from forced reading, +not from search shortcuts. + +### Phase 0.5: Quick Structure Scan + Focus Areas -## Process +Before any deep analysis, do a lightweight structural survey to +discover what the project actually contains. This takes seconds +and makes the focus-area question concrete instead of open-ended. -### 1. Scan the codebase +**Scan steps** (no file reads - structure only): ```bash -ctx status +# Detect ecosystem +ls go.mod package.json Cargo.toml pyproject.toml 2>/dev/null + +# List top-level source directories / packages +# Go: +go list ./... 2>/dev/null | sed 's|.*/||' | sort -u | head -40 +# or: ls internal/ cmd/ pkg/ 2>/dev/null + +# Node/other: ls src/ lib/ packages/ 2>/dev/null + +# Large monorepo guard: if >100 packages, limit to top 2 levels only +find . -mindepth 1 -maxdepth 2 -type d \ + ! -path './.git/*' ! -path './vendor/*' ! -path './node_modules/*' \ + | sort | head -60 +``` + +**Then ask** (present the discovered package/module names): + +``` +I found these top-level packages/modules: + [list from scan] + +Any specific areas you'd like me to go deep on? You can name +packages from the list above, describe subsystems (e.g. "the +reconciler loop", "auth handling"), or say "all" for a uniform +pass. + +Skip or press enter to do a standard uniform pass. +``` + +**If focus areas are given**, carry them forward: +- Phase 2 goes deep on focus packages (target confidence ≥ 0.8) +- Direct dependencies of focus packages get a solid pass (≥ 0.7) +- All other packages are stubbed (0.2) unless they appear as + transitive dependencies +- DETAILED_DESIGN.md sections for focus packages are written first + and in full detail +- Principal mode Phase P2 strategic questions reference the focus + areas explicitly + +**If "all" or no answer**, proceed with standard uniform analysis. + +### Phase 1: Assess Current State + +Determine if this is a **first run** or **subsequent run**: + +- **First run**: no `.context/map-tracking.json` exists +- **Subsequent run**: tracking file exists with coverage data + +For subsequent runs, identify the **frontier**: modules that need +analysis: + +1. Read `map-tracking.json` for coverage state +2. For each covered module, check staleness: + +```bash +git log --oneline --since="" \ +-- / +``` + +3. Frontier = uncovered modules + stale modules (commits after + `last_analyzed`) + low-confidence modules (confidence < 0.7) + +### Phase 2: Survey (First Run) or Analyze Frontier (Subsequent Run) + +**First run: full survey:** + +0. Run `ctx deps` to bootstrap the dependency graph: + ```bash + ctx deps + ``` + Auto-detects the ecosystem (Go, Node.js, Python, Rust) from + manifest files. Use this as the starting point for "Package + Dependency Graph": verify and enrich with semantic context. + +1. Read the project manifest for project identity (name, version, + description): `ctx deps` covers the dependency tree +2. Explore directory structure: + ```bash + ctx status + ``` +3. Read key files in each package: exported types, functions, + imports +4. Trace data flow through main entry points +5. Identify architectural patterns (dependency injection, + interfaces, registries) + +**Subsequent run: targeted analysis:** + +1. For each frontier module, read its source files +2. Trace data flow and dependencies +3. Note changes since last analysis +4. Update confidence based on depth of understanding + +### Phase 3: Update Documents + +**ARCHITECTURE.md**: update ONLY if module boundaries, dependency +graph, data flow, or key patterns changed. Internal implementation +changes do NOT warrant updates. Target: under 4000 tokens (~16KB) +so ARCHITECTURE.md loads within the session-start context budget. + +Required sections: +- Overview (design philosophy, key concepts) +- Package Dependency Graph (mermaid `graph TD`) +- Component Map (tables: package, purpose, depends on) +- Data Flow (mermaid sequence diagrams for key operations) +- Key Architectural Patterns +- File Layout (ASCII tree) + +**DETAILED_DESIGN.md**: update per-module sections using this +format: + +```markdown +## + +**Purpose**: One-line description. + +**Key types**: List main structs/interfaces. + +**Exported API**: +- `FuncName()`: what it does +- `Type.Method()`: what it does + +**Data flow**: Entry → Processing → Output + +Include an ASCII sequence diagram when there are 3+ actors or +non-obvious ordering: + +``` +Caller Scheduler Worker +|--schedule()-->| | +| |--dispatch()-->| +| |<--result------| +|<--done--------| | +``` + +Include an ASCII state diagram when the module manages lifecycle +or status transitions: + +``` +[Init] --configure()--> [Ready] --start()--> [Running] +| | +error()---------| |--stop()-->[ Stopped] +| [Stopped] --reset()--> [Ready] +[Failed] +``` + +Use plain ASCII (not mermaid) for DETAILED_DESIGN.md - it renders +in any terminal, editor, or raw file view without a renderer. +Reserve mermaid for ARCHITECTURE.md only. + +**Edge cases**: +- Condition → behavior + +**Performance considerations**: +- Known or likely bottlenecks (hot paths, allocation pressure, + lock contention, I/O bound operations) +- Scale assumptions baked into the design (e.g. "assumes <1000 + items", "single-threaded reconcile loop") +- What breaks first under load + +**Danger zones** (top 3 riskiest modification points): +1. `` - why it's dangerous (hidden coupling, + ordering assumption, shared mutable state, etc.) +2. ... +3. ... + +**Control loop & ownership** (if the module participates in +reconciliation or state management): +- What owns the reconciliation for this module's resources? +- What is source of truth vs. derived/cached state? +- What triggers re-reconciliation? + +**Extension points** (where features would naturally attach): +- `` - what kind of extension fits here + +**Improvement ideas** (1-3 concrete suggestions, not generic): +- `` - what it fixes and why it's feasible + +**Dependencies**: list of internal packages used +``` + +**Splitting DETAILED_DESIGN.md when it grows large:** + +When DETAILED_DESIGN.md exceeds ~600 lines or covers 3+ natural +domains, split into domain files and keep a shallow index: + +- `DETAILED_DESIGN.md` - index only (domain name, file pointer, + module list, one-line domain purpose) +- `DETAILED_DESIGN-.md` - full module sections for that + domain + +Domains are natural groupings, not arbitrary splits. Examples: +- storage, auth, api, reconciler, cli, observability +- If no natural grouping exists, split by: core vs. peripheral + +Index format: +```markdown +# Detailed Design Index + +| Domain | File | Modules | Summary | +|---------|----------------------------|----------------------|-------------------| +| storage | DETAILED_DESIGN-storage.md | pkg/store, pkg/cache | Persistence layer | +| auth | DETAILED_DESIGN-auth.md | pkg/authn, pkg/authz | Identity + policy | + +> See individual files for module-level detail. +``` + +Update `map-tracking.json` to record which domain file each module +lives in: +```json +"pkg/store": { + "domain_file": "DETAILED_DESIGN-storage.md", + ... +} +``` + +Each section is self-contained. The agent reads specific sections +when working on a module, not the entire file. + +**CHEAT-SHEETS.md**: write (or update) short mental models for +key lifecycle flows. One cheat sheet per major lifecycle or flow +identified in the codebase. Format: + +```markdown +## + +Steps: +1. +2. +3. ... + +Key invariants: +- + +Common failure modes: +- + +Flow (ASCII - include when sequence or state is non-obvious): + + [Trigger] --> [Step A] --> [Step B] --> [Done] + | + [Error] --> [Retry] --> [Dead Letter] +``` + +Aim for cheat sheets that fit on one screen. If a flow needs more +than ~15 steps, split it. Write cheat sheets for at minimum: +- The main entry-point lifecycle (e.g. controller reconcile loop, + request handler, CLI command dispatch) +- Any policy or rule evaluation flow +- Any significant async or background job lifecycle + +Skip if the project has no meaningful lifecycles (e.g. a pure +library with no runtime behavior). + +**GLOSSARY.md**: append project-specific terms discovered during +analysis. This captures the vocabulary that makes the codebase +searchable — type names, internal concepts, abbreviations, and +domain jargon that a new reader wouldn't know to search for. + +Rules: +- Skip entirely if `.context/GLOSSARY.md` does not exist (the + project hasn't opted into a glossary) +- Additive only: never modify or remove existing entries +- Maximum 10 new terms per run to avoid flooding +- Project-specific terms only: skip generic programming concepts + (e.g. "mutex", "goroutine") and well-known patterns (e.g. + "singleton"). Include terms that are unique to this codebase or + used in a project-specific way +- Insert alphabetically into the existing list +- Format: `**Term** — one-line definition` +- Print added terms in the convergence report under a + "Glossary additions" line + +### Phase 4: Update Tracking + + +Write `.context/map-tracking.json` with: + +```json +{ + "version": 1, + "opted_out": false, + "opted_out_at": null, + "last_run": "", + "coverage": { + "": { + "last_analyzed": "", + "confidence": <0.0-1.0>, + "files_seen": ["file1.go", "file2.go"], + "notes": "Brief summary of understanding" + } + } +} +``` + +### Phase 5: Convergence Report + Search Prompts + +Print a structured convergence report AND write it to +`.context/CONVERGENCE-REPORT.md`. The printed version is the +primary output the user reads. The file version is the artifact +that `/ctx-architecture-enrich` and future sessions consume. + +The source of truth for confidence scores is `map-tracking.json`. +`CONVERGENCE-REPORT.md` is a human-readable view of that data - +if they ever conflict, `map-tracking.json` wins. + +**Format:** + +``` +## Convergence Report + +### By Module + +| Module | Confidence | Status | Blocker | +|--------|------------|--------|---------| +| pkg/foo | 0.9 | ✅ Converged | - | +| pkg/bar | 0.6 | 🔶 Shallow | Internal flow unclear | +| pkg/baz | 0.2 | 🔴 Stubbed | Not analyzed | + +### By Domain (if natural groupings exist) + +Group related modules and show aggregate coverage: + e.g. "Auth layer: 2/3 modules converged (avg 0.72)" + +### Overall + +- Total modules: N +- Converged (≥ 0.9): N ✅ +- Solid (0.7-0.89): N 🟡 +- Shallow (0.4-0.69): N 🔶 +- Stubbed (< 0.4): N 🔴 + +### What Would Help Next + +For each non-converged module, print a specific suggestion: + +🔶 pkg/bar (0.6) - Shallow + → Read the test files to understand expected behavior under + edge cases: `pkg/bar/*_test.go` + → Trace the internal flow through + → Ask: "walk me through what happens when X" + +🔴 pkg/baz (0.2) - Not analyzed + → Run /ctx-architecture with focus area: pkg/baz + → Or: open pkg/baz/README.md if present + +### Convergence Verdict + +One of: +- ✅ CONVERGED - all modules ≥ 0.9, frontier empty. Further runs + without code changes won't improve coverage. +- 🟡 MOSTLY CONVERGED - core modules ≥ 0.9, peripheral modules + shallow. Diminishing returns on full re-run; use focus areas. +- 🔶 PARTIAL - significant modules below 0.7. Re-run with focus + areas or read tests. +- 🔴 INCOMPLETE - substantial portions unanalyzed. Run again. +``` + +**Convergence thresholds:** +- Module is **converged** at confidence ≥ 0.9 +- Project is **converged** when all non-peripheral modules ≥ 0.9 +- Peripheral = no other modules depend on it AND it has no + exported API surface (pure internal helpers, generated code, + vendor) + +**Blocker vocabulary** (use these consistently in the table): +- `Internal flow unclear` - exports known, internals not traced +- `Not analyzed` - directory listed only +- `Tests not read` - implementation known, behavior under edge + cases unknown +- `Design rationale unknown` - code understood, "why" is unclear +- `Converged` - nothing left to learn from static reading + +--- + +After printing the convergence verdict, append a **Search Prompts** +section. The skill has just read the codebase and knows its jargon - +this is the most useful thing it can hand back to someone who is +not blocked by intelligence but by not knowing the right words. + +**Format:** + +``` +## Search Prompts + +The right keyword changes everything. Based on what I found in +the codebase, here are targeted searches worth running - in your +internal docs, Confluence, Notion, Slack, or publicly: + +### Fill the gaps (ranked by how much they'd help) + +For modules/areas still below 0.9: + +🔶 pkg/bar - Internal flow unclear + Try searching: + - " design" or " internals" + - " " + - "why does use " (ADR or design doc) + +🔴 pkg/baz - Not analyzed + Try searching: + - " explained" + - " behavior" + +### Concepts worth understanding deeply + +List 3-5 technical concepts the codebase clearly depends on but +that can't be learned from the code alone. Give the exact search +phrase, not a topic: + +- " explained" - e.g. "etcd watch semantics + explained", "CRDT merge strategies", "OIDC token refresh flow" +- " tradeoffs" - e.g. "saga pattern vs 2PC tradeoffs" + +### Architecture decision records (if relevant) + +If the code shows signs of a deliberate non-obvious choice +(e.g. custom retry logic instead of a library, unusual data +structure), suggest: + - " ADR" + - " RFC" + - "why doesn't use " + +--- +Note: I won't run these searches for you - you may have internal +docs where these are more useful than public results, and you know +which sources to trust. Pick the phrases that match what's blocking +you. +``` + +**Rules for this section:** +- Always generate search prompts, even for converged modules - + there's always design rationale that code can't express +- Phrases must be concrete and use actual names/types from the + codebase - no generic "learn more about X" fluff +- Rank by usefulness: gaps in shallow modules first, concepts + second, ADRs third +- Maximum ~10 phrases total; fewer sharp ones beat many vague ones +- Default: do NOT run the searches yourself +- Exception: if Gemini Search is available, you MAY run upstream + searches for KEPs, design docs, peer-project patterns, and ADRs + - but only for concepts the codebase shows clear dependency on. + Note what you searched and what you found. This applies in any + mode, not just principal mode. +- If Gemini is not available and the user requested principal-mode + depth, you may fall back to built-in web search for the same + purpose + +--- + +## Principal Mode (Phases 0-5 + P1-P3) + +Run all default mode phases first (0-5), then continue below. +Principal mode is for strategic thinking - beyond "what is" to +"what could be" and "what should concern us." + +### Phase P1: Extended Context Gathering + +In addition to the default phase sources, read: + +- `.context/TASKS.md` - outstanding work, future plans +- `CHANGELOG.md` or `docs/changelog.md` - trajectory of decisions +- `docs/` - any design rationale in user-facing docs +- Recent git log: `git log --oneline -30` + +### Phase P2: Gather Strategic Context + +Two-tier behavior - do not stall: + +**If answers are available** (user provided them in the prompt, +or they exist in `.context/TASKS.md` / `DECISIONS.md`): use them. +Do not ask for what you already have. + +**If answers are not available**: do NOT stop. Generate a +provisional principal analysis with assumptions explicitly labeled +(see Principal Mode Fallback below). Include a "Questions That +Would Sharpen This" section at the end of ARCHITECTURE-PRINCIPAL.md. + +When asking the user, present all questions at once as a numbered +list - do not ask one-at-a-time: + +``` +Before I write the principal analysis, a few questions - skip +or say "unsure" on anything you don't know: + +0. **Focus areas** (if not already set in Phase 0.5) + +1. **Vision**: What is this project trying to become in 12-24 months? + +2. **Future direction**: Any architectural pivots being considered? + (plugin system, multi-tenant, cloud sync, daemon model, etc.) + +3. **Known bottlenecks**: Where does the current design hurt you? + +4. **Implementation alternatives**: Any decisions you'd do + differently starting fresh? + +5. **Gaps**: What's missing that you expect to need? + +6. **Areas of improvement**: Known tech debt or structural awkwardness? +``` + +### Phase P3: Write Principal Analysis + +After collecting answers, write `.context/ARCHITECTURE-PRINCIPAL.md` +(separate from `ARCHITECTURE.md` - speculation must not pollute +the authoritative doc). + +```markdown +# Architecture - Principal Analysis +_Generated . Strategic analysis only; see ARCHITECTURE.md +for the authoritative architecture reference._ + +## Current State Summary +[Condensed narrative of the current architecture - ~1 page max] + +## Vision Alignment +[How does the current architecture support or constrain the stated +vision? What structural changes would enable it?] + +## Future Direction +[Architectural implications of planned pivots or new capabilities. +What would need to change if [feature X] were added?] + +## Known Bottlenecks +[Analysis of performance, scalability, or dev-experience pain +points identified in the codebase or raised by the user] + +## Implementation Alternatives +[For 2-3 key design decisions: current approach, alternatives, +tradeoffs] + +## Gaps +[Missing capabilities or abstractions the architecture doesn't +handle yet but probably will need to] + +## Areas of Improvement +Ranked by impact/effort: +- **High impact, low effort** (do first) +- **High impact, high effort** (plan for) +- **Low impact** (defer or skip) + +## Risks +[Architectural risks as the system scales, team grows, or +requirements evolve] + +## Intervention Points +Top 5 highest-leverage places to implement new features or +improvements, ranked by impact/effort: +1. `` - what kind of change fits here and why +2. ... + +(These are concrete locations - package paths, interface names, +function boundaries - not vague subsystem labels.) + +## Upstream Proposals +2-3 changes worth proposing to the project upstream (KEP / RFC / +issue style thinking). For each: +- **What**: one-sentence description of the change +- **Why**: what problem it solves that the current design can't +- **Where**: which abstraction boundary it touches +- **Risk**: what it breaks or complicates + +Each proposal must cross an abstraction boundary - it must affect +how modules interact, not just refactor internals. If it doesn't +change an interface, a contract, or an ownership boundary, it's +not upstream-worthy; it's a local improvement (put it in +Improvement Ideas instead). + +## Productization Gaps +What would need to change for this to work at enterprise scale? +- Multi-cluster / multi-tenant gaps +- Observability and debuggability holes +- Operational hardening missing from current design +- What a large customer would hit first + +## Failure-First Analysis +[Hidden assumptions baked into the architecture. What breaks +silently vs. loudly? What would cause a cascade? What does the +system assume about its environment that may not hold?] + +## Onboarding Friction +[Practical, not theoretical - this is what a new engineer actually +hits in week one:] +- What makes this system hard to understand quickly? +- Which modules require tribal knowledge to use safely? +- Where would a new engineer get stuck first, and why? +- What isn't written down anywhere? +``` + +**Boundary hygiene** - ARCHITECTURE-PRINCIPAL.md is for synthesis, +leverage, risk, direction, and judgment. Do NOT restate module +details that already exist in DETAILED_DESIGN.md. Reference module +paths only where needed to ground an argument. If you find yourself +summarizing what a module does, stop - link to it instead. + +**Principal mode fallback** - if Phase P2 answers were not provided, +label speculative sections clearly and add at the end: + +```markdown +## Questions That Would Sharpen This Analysis + +Answering any of these would move speculative sections to grounded ones: + +1. **Vision** - What is this project trying to become in 12-24 months? +2. **Future direction** - Any architectural pivots being considered? +3. **Known bottlenecks** - Where does the current design hurt? +4. **Assumptions marked** - These sections are labeled [inferred]: + [list them] ``` -Read the existing ARCHITECTURE.md if it exists. -Scan the directory tree to identify: -- Top-level packages and their responsibilities -- Data flow between components -- External dependencies +**Autonomous inferences** - principal mode must also answer the +following from the codebase alone, without waiting for user input. +These are things the code is silently deciding. Surface them: -### 2. Build or update ARCHITECTURE.md +- Where are abstraction boundaries likely to calcify under growth? +- Which current APIs are accidentally becoming public contracts? +- What will become expensive when team size or data volume doubles? +- Where is the architecture optimized for current workflow rather + than long-term extensibility? +- Which parts are structurally elegant but strategically wrong for + the likely future? -Structure: -- **Overview**: 2-3 sentence system description -- **Package Map**: table of packages → responsibilities -- **Data Flow**: how data moves through the system -- **Key Interfaces**: important boundaries -- **Dependencies**: external deps and why they're used +These go in a dedicated "Silent Choices" section in +ARCHITECTURE-PRINCIPAL.md. The code is making bets - name them. -### 3. Build DETAILED_DESIGN.md (optional) +**Opinion floor** - ARCHITECTURE-PRINCIPAL.md must contain at minimum: +- 3 risks (specific, not "this could be slow") +- 3 improvement ideas (concrete, not "add more tests") +- 2 upstream opportunities (actionable, not "contribute more") -Deeper dive into internals for complex packages: -- Function-level documentation -- State machines -- Error handling patterns -- Concurrency model +Generate opinions, not just descriptions. If you find yourself +writing neutral summaries, push harder. -### 4. Coverage tracking +When in doubt, prefer a strong, falsifiable opinion over a safe, +generic one. Weak opinions are noise; strong opinions can be +corrected. -Track which packages have been documented: +**Cross-project comparison** (include when the codebase shows +non-obvious design choices or when focus areas have well-known +peers): +For any module where a comparable exists in another project, add: +```markdown +### Compared to / + +- What does differently +- What does better +- What could be unified or learned from ``` -Coverage: 18/24 packages documented (75%) -Missing: internal/hub, internal/crypto, ... + +Examples worth comparing when relevant: +- Velero vs Stash (backup) +- controller-runtime reconciler vs custom loops +- Gatekeeper vs Kyverno (policy) +- Any CNCF project vs its closest peer + +Skip if no meaningful peer exists. Do not force comparisons. + +Be direct. This document is for engineering judgment, not external +audiences. + +### Phase P4: Write DANGER-ZONES.md + +Extract danger zones from all DETAILED_DESIGN.md module sections +and compile them into a standalone `.context/DANGER-ZONES.md`. +This is the consolidated view - one document a reviewer or new +engineer can read to know where the dragons live. + +```markdown +# Danger Zones + +_Generated from DETAILED_DESIGN.md danger zone sections. +Run `/ctx-architecture-enrich` to add verified blast radius data._ + +## Summary + +| Module | Zone | Risk | Why | +|--------|------|------|-----| +| | | HIGH/MEDIUM/LOW | one-line reason | + +## By Module + +### + +1. **** - + - Hidden coupling / ordering assumption / shared mutable state + - Modification advice: + +2. ... ``` +**Rules:** +- Only include danger zones from modules actually analyzed + (confidence ≥ 0.4) +- Risk level is the skill's judgment based on code reading: + HIGH (will break things), MEDIUM (likely to cause subtle bugs), + LOW (worth knowing but manageable) +- `/ctx-architecture-enrich` can later add verified blast radius + numbers - leave room for that (don't claim precision you don't + have from reading alone) +- If no danger zones were identified, skip the file entirely + rather than writing an empty one + +--- + +## Confidence Rubric + +Score by **decision usefulness**, not descriptive completeness. +Ask: "What could an engineer safely do with this understanding?" + +| Level | Decision usefulness | +|------------|------------------------------------------------------------------------------| +| 0.0 - 0.3 | Stubbed: not safe to make any decisions; directory listed only | +| 0.4 - 0.6 | Shallow: can describe purpose; not safe to modify without more reading | +| 0.7 - 0.79 | Safe to make localized changes with care; can review simple PRs | +| 0.8 - 0.89 | Can reason about design tradeoffs; safe to design changes in this module | +| 0.9 - 1.0 | Can predict likely breakage from non-trivial changes; safe to own the module | + +Inflate scores and you lie to the next agent that reads the tracking +file. Under-score and the convergence report will never clear. +Score the decision-usefulness honestly. + +## Opt-Out Handling + +If the user says "never", "don't ask again", or similar: + +1. Set `opted_out: true` and `opted_out_at: ""` in + map-tracking.json +2. Confirm: "Noted: won't ask again. Delete + `.context/map-tracking.json` to re-enable." +3. On future invocations, exit immediately with brief message + +## Nudge Behavior + +The agent MAY suggest `/ctx-architecture` during session start when: + +- **No tracking file**: "This project doesn't have an architecture + map yet. Want me to run `/ctx-architecture`?" +- **Stale (>30 days)**: "The architecture map hasn't been updated + since and there are commits touching modules. Want me + to refresh?" +- **Opted out**: say nothing + +The nudge is a suggestion, not automatic execution. + ## Quality Checklist -- [ ] Every top-level package mentioned -- [ ] Data flow is traceable end-to-end -- [ ] External dependencies listed with rationale -- [ ] Coverage percentage reported -- [ ] No stale references to removed packages +After running, verify: +- [ ] ARCHITECTURE.md is under 4000 tokens (~16KB) +- [ ] ARCHITECTURE.md has all required sections (Overview, Dependency + Graph, Component Map, Data Flow, Key Patterns, File Layout) +- [ ] DETAILED_DESIGN.md uses consistent per-module format +- [ ] Each module section has Purpose, Key types, Exported API, + Data flow, Edge cases, Performance considerations, Control + loop & ownership (if applicable), Danger zones, Extension + points, Improvement ideas, Dependencies +- [ ] ASCII sequence diagram included when 3+ actors or + non-obvious ordering +- [ ] ASCII state diagram included when module manages lifecycle + or status transitions +- [ ] No mermaid in DETAILED_DESIGN.md (ASCII only) +- [ ] If DETAILED_DESIGN.md > ~600 lines or 3+ domains: split + into domain files with shallow index +- [ ] map-tracking.json records domain_file for each module + when split +- [ ] map-tracking.json is valid JSON with version, coverage entries +- [ ] Confidence levels are honest (not inflated) +- [ ] Stale modules were re-analyzed, not just marked current +- [ ] ARCHITECTURE.md was only updated for boundary/flow/dependency + changes, not internal implementation details +- [ ] Convergence report printed with per-module table +- [ ] Domain groupings shown if natural groupings exist +- [ ] Each non-converged module has a specific "what would help" + suggestion (not generic advice) +- [ ] Overall convergence verdict stated (CONVERGED / MOSTLY / + PARTIAL / INCOMPLETE) +- [ ] Blocker column uses consistent vocabulary +- [ ] Search Prompts section printed after convergence verdict +- [ ] Search phrases use actual type/function/pattern names from + the codebase (not generic topics) +- [ ] Phrases ranked: shallow-module gaps first, concepts second, + ADRs third +- [ ] No more than ~10 phrases total +- [ ] Skill did NOT run local-code searches itself (upstream + searches via Gemini are allowed) +- [ ] CONVERGENCE-REPORT.md written to .context/ (not just printed) +- [ ] Phase 0.25 Gemini check completed (available or user declined) +- [ ] Phase 0.5 structure scan was run before any deep analysis +- [ ] Focus areas question was asked with actual package names (not + open-ended) +- [ ] If focus areas given: deep analysis concentrated there; other + packages stubbed at 0.2 unless direct dependencies +- [ ] Principal mode: P2 answers used if available; if not, + provisional analysis written with [inferred] labels +- [ ] Principal mode: "Questions That Would Sharpen This" section + present if P2 answers were not provided +- [ ] Principal mode: output written to `ARCHITECTURE-PRINCIPAL.md`, + not overwriting `ARCHITECTURE.md` +- [ ] Principal mode: "Silent Choices" section present (autonomous + inferences from code - abstraction calcification, accidental + contracts, scale costs, strategic bets) +- [ ] Principal mode: ARCHITECTURE-PRINCIPAL.md does not restate + DETAILED_DESIGN.md content - links to module paths instead +- [ ] CHEAT-SHEETS.md written with at least one lifecycle flow +- [ ] Each cheat sheet fits ~one screen; long flows are split +- [ ] Danger zones section present in each DETAILED_DESIGN module + (top 3, with reasoning - not just "this is complex") +- [ ] Extension points section present in each module +- [ ] Principal mode: Failure-First Analysis section written +- [ ] Principal mode: Onboarding Friction section present (practical, + week-one concerns - not generic "hard to understand") +- [ ] Principal mode: Upstream Proposals cross abstraction boundaries + (not internal refactors) +- [ ] Principal mode: Intervention Points section present (concrete + locations, not vague labels) +- [ ] Principal mode: Upstream Proposals section present (2-3 items + with what/why/where/risk) +- [ ] Principal mode: Productization Gaps section present +- [ ] Principal mode: opinion floor met (≥3 risks, ≥3 improvements, + ≥2 upstream opportunities - specific, not generic) +- [ ] Principal mode: cross-project comparisons included where + meaningful peers exist (not forced) +- [ ] Principal mode: DANGER-ZONES.md written with consolidated + danger zones from all analyzed modules (skip if none found) +- [ ] Principal mode: DANGER-ZONES.md includes summary table and + per-module breakdown with risk levels and modification advice +- [ ] GLOSSARY.md: new terms added alphabetically (max 10, project- + specific only, skipped if file doesn't exist) +- [ ] Convergence report includes "Glossary additions" line if + terms were added diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md index 90920e161..366dcd53f 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-archive/SKILL.md @@ -8,10 +8,12 @@ Move completed tasks from TASKS.md to the archive. ## Before Archiving +Two questions: if any answer is "no", don't archive: + 1. **"Are the completed tasks cluttering the view?"** → If TASKS.md is still easy to scan, there's no urgency -2. **"Are all `[x]` items truly done?"** → Verify nothing was checked - off prematurely +2. **"Are all `[x]` items truly done?"** → Verify nothing was checked off + prematurely ## When to Use @@ -21,26 +23,36 @@ Move completed tasks from TASKS.md to the archive. ## When NOT to Use -- When there are only a few completed tasks +- When there are only a few completed tasks (not worth the noise) - When you're unsure if tasks are truly complete (verify first) - **Never delete tasks**: only archive (CONSTITUTION invariant) +## Constitution Rules + +These are inviolable: + +- **Archival is allowed, deletion is not**: never delete context history +- **Archive preserves structure**: Phase headers are kept for traceability +- **Never move tasks**: tasks stay in their Phase section; archiving is + the only sanctioned "move" and it's to the archive directory + ## Execution ```bash -# Preview first (recommended) -ctx tasks archive --dry-run +ctx task archive $ARGUMENTS +``` -# Archive after confirming the preview -ctx tasks archive +**Example: preview first (recommended):** +```bash +ctx task archive --dry-run ``` -Archived tasks go to `archive/tasks-YYYY-MM-DD.md` in the context -directory, preserving Phase headers for traceability. +**Example: archive after confirming the preview:** +```bash +ctx task archive +``` -## Quality Checklist +Archived tasks go to `archive/tasks-YYYY-MM-DD.md` in the context directory, +preserving Phase headers for traceability. -- [ ] Previewed with `--dry-run` before archiving -- [ ] All archived items are truly complete -- [ ] No tasks were deleted (only archived) -- [ ] Reported how many tasks were archived +Report how many tasks were archived and where the archive file was written. diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md index 3366a7625..6130c183a 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-blog-changelog/SKILL.md @@ -37,22 +37,30 @@ Required: Optional: - **Reference post**: An existing post to match the style +## Usage Examples + +```text +/ctx-blog-changelog 040ce99 "human-assisted refactoring" +/ctx-blog-changelog HEAD~30 "building the journal system" +/ctx-blog-changelog v0.1.0 "what's new in v0.2.0" +``` + ## Process 1. **Analyze the commit range**: ```bash -git --no-pager log --oneline ..HEAD -git --no-pager diff --stat ..HEAD -git --no-pager log --format="%s" ..HEAD | head -50 +git log --oneline ..HEAD +git diff --stat ..HEAD +git log --format="%s" ..HEAD | head -50 ``` 2. **Gather supporting context**: ```bash # Files most changed -git --no-pager diff --stat ..HEAD | sort -t'|' -k2 -rn | head -20 +git diff --stat ..HEAD | sort -t'|' -k2 -rn | head -20 # Journal entries from this period -ctx recall list +ctx journal source ``` 3. **Draft the narrative** following the theme diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md index bae7e5adf..b5968e6e8 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-blog/SKILL.md @@ -1,57 +1,146 @@ --- name: ctx-blog -description: "Generate blog post drafts from project activity. Use to communicate progress, decisions, or technical insights." +description: "Generate blog post draft. Use when documenting project progress, sharing learnings, or writing about development experience." tools: [bash, read, write] --- -Generate blog post drafts from project activity. + + +Generate a blog post draft from recent project activity. + +## Before Writing + +Two questions: if any answer is "no", reconsider: + +1. **"Is there a narrative arc?"** → A blog post needs a story (problem → + approach → outcome), not just a list of changes +2. **"Would someone outside the project learn something?"** → If the + insight is only useful internally, use LEARNINGS.md instead ## When to Use -- After completing a significant feature -- When a decision or learning is worth sharing publicly -- For project updates and changelogs -- When the user says "write a blog post about..." +- When documenting significant project progress +- When sharing learnings publicly +- When the user wants to write about the development experience ## When NOT to Use -- For internal context (use learnings/decisions instead) -- When there's nothing noteworthy to share +- For internal-only notes (use session saves or LEARNINGS.md) +- When the work is still in progress with no clear insight yet +- For changelogs (use `/ctx-blog-changelog` instead) -## Process +## Input + +The user may specify: +- A time range: `last week`, `since Monday`, `January` +- A topic focus: `the refactoring`, `new features`, `lessons learned` +- Or just run it to analyze recent activity + +## Sources to Analyze + +Gather context from multiple sources: + +```bash +# Recent commits +git log --oneline -30 + +# Recent decisions +ctx status --verbose # or read DECISIONS.md directly + +# Recent learnings +ctx status --verbose # or read LEARNINGS.md directly + +# Recent tasks completed +ctx status # shows active and completed task counts + +# Journal entries (if available) +ctx journal source --limit 10 +``` + +## Blog Post Structure + +### Frontmatter -### 1. Gather material +```yaml +--- +title: "Descriptive Title: What This Post Is About" +date: YYYY-MM-DD +author: [Ask user] +topics: + - topic-one + - topic-two + - topic-three +--- +``` + +### Body + +```markdown +# Title -- Recent commits: `git log --oneline -20` -- Recent decisions from DECISIONS.md -- Recent learnings from LEARNINGS.md -- Completed tasks from TASKS.md +![ctx](../images/ctx-banner.png) -### 2. Identify the narrative +> Opening hook or question -What's the story? Options: -- Feature announcement -- Technical deep-dive -- Lessons learned -- Project update / changelog +[Introduction: Set the scene, why this matters] -### 3. Draft the post +## Section 1: The Context/Problem +[What situation led to this work] -Structure: -- **Title**: clear and engaging -- **Introduction**: what and why (2-3 sentences) -- **Body**: the story with technical details -- **Conclusion**: what's next +## Section 2: What We Did +[Narrative of the work, with code examples] -### 4. Write to blog directory +## Section 3: What We Learned +[Key insights, gotchas, patterns discovered] -Target: `site/blog/{date}-{slug}/index.html` or -`docs/blog/{date}-{slug}.md` per project convention. +## Section 4: What's Next +[Future work, open questions] +``` + +## Style Guidelines + +- **Personal voice**: Use "I", "we", share the journey +- **Show don't tell**: Include actual code, commits, quotes +- **Tables for comparisons**: Before/after, patterns found +- **Honest about failures**: Include what went wrong and why +- **Concrete examples**: Reference specific files, commits, decisions +- **No em-dashes**: Use `:`, `;`, or restructure the sentence instead +- **Straight quotes only**: Use "dumb quotes" (`"`, `'`), never + typographic/curly quotes +- **80-character line width**: Wrap prose at ~80 characters; exceptions + for tables, code blocks, and URLs + +## Process + +1. Gather sources (git, decisions, learnings, journals) +2. Identify the narrative arc (what's the story?) +3. Draft outline for user approval +4. Write full draft +5. Ask for revisions +6. Save to `docs/blog/YYYY-MM-DD-slug.md` +7. **Update `docs/blog/index.md`**: add entry at the top following the + existing pattern: + +```markdown +### [Post Title](YYYY-MM-DD-slug.md) + +*Author / Date* + +2-3 sentence blurb. + +**Topics**: topic-one, topic-two, topic-three + +--- +``` -## Quality Checklist +## Example Invocations -- [ ] Title is clear and engaging -- [ ] Technical accuracy verified -- [ ] No sensitive information exposed -- [ ] Proper frontmatter/metadata -- [ ] Links to relevant specs/docs where appropriate +``` +/ctx-blog about the cooldown feature we just built +/ctx-blog last week's refactoring work +/ctx-blog lessons learned from hook design +``` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md index e2b5ba45e..b019def25 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-brainstorm/SKILL.md @@ -1,6 +1,6 @@ --- name: ctx-brainstorm -description: "Design before implementation. Use before any creative or constructive work to transform vague ideas into validated designs." +description: "Design before implementation. Use before any creative or constructive work (features, architecture, behavior changes) to transform vague ideas into validated designs." tools: [bash, read, write] --- @@ -9,11 +9,13 @@ structured dialogue **before any implementation begins**. ## Before Brainstorming -1. **Check if design is needed**: is the change complex enough? -2. **Review prior art**: check `.context/DECISIONS.md` for related - past decisions -3. **Identify what exists**: read relevant code before asking - questions the codebase already answers +1. **Check if design is needed**: is the change complex enough + to warrant a design phase, or is the solution already clear? +2. **Review prior art**: check `.context/DECISIONS.md` for + related past decisions; do not re-litigate settled choices +3. **Identify what exists**: read relevant code and docs before + asking questions; do not ask the user things the codebase + already answers ## When to Use @@ -26,29 +28,50 @@ structured dialogue **before any implementation begins**. - Bug fixes with clear solutions - Routine maintenance tasks -- Well-defined requirements -- Small, isolated changes -- When the user explicitly wants to jump to code +- When requirements are already well-defined +- Small, isolated changes (just do them) +- When the user explicitly wants to jump straight to code + +## Usage Examples + +```text +/ctx-brainstorm +/ctx-brainstorm (new caching layer for the API) +/ctx-brainstorm (should we split the monolith?) +``` ## Operating Mode -Design facilitator, not builder. No implementation while -brainstorming. +Design facilitator, not builder. + +- No implementation while brainstorming +- No speculative features +- No silent assumptions +- No skipping ahead + +**Slow down just enough to get it right.** ## The Process ### 1. Understand Current Context Before asking questions: -- Review project state, docs, prior decisions + +- Review project state: files, docs, prior decisions +- Check `.context/DECISIONS.md` for related past decisions - Identify what exists vs what is proposed - Note implicit constraints +**Do not design yet.** + ### 2. Clarify the Idea +Goal: **shared clarity**, not speed. + Rules: - Ask **one question per message** - Prefer **multiple-choice** when possible +- Split complex topics into multiple questions Focus on: - Purpose: why does this need to exist? @@ -59,49 +82,89 @@ Focus on: ### 3. Non-Functional Requirements -Clarify or propose assumptions for: -- Performance, scale, security, reliability, maintenance +Explicitly clarify or propose assumptions for: + +- Performance expectations +- Scale (users, data, traffic) +- Security/privacy constraints +- Reliability needs +- Maintenance expectations + +If the user is unsure, propose reasonable defaults and mark +them as **assumptions**. ### 4. Understanding Lock (Gate) -Before proposing any design, provide: +Before proposing any design, pause and provide: **Understanding Summary** (5-7 bullets): -- What is being built, why, for whom, constraints, non-goals +- What is being built +- Why it exists +- Who it is for +- Key constraints +- Explicit non-goals **Assumptions**: list all explicitly. **Open Questions**: list unresolved items. -> "Does this accurately reflect your intent? Confirm or correct -> before we move to design." +Then ask: +> "Does this accurately reflect your intent? Confirm or +> correct before we move to design." **Do NOT proceed until confirmed.** ### 5. Explore Design Approaches +Once understanding is confirmed: + - Propose **2-3 viable approaches** -- Lead with recommended option -- Explain trade-offs +- Lead with your **recommended option** +- Explain trade-offs: complexity, extensibility, risk, + maintenance +- Apply YAGNI ruthlessly ### 6. Stress-Test the Chosen Approach -After the user picks an approach: -- Surface assumptions and dependencies -- Identify failure modes -- Steel-man an alternative +After the user picks an approach, pause for adversarial review +before moving to detailed design. -> "These are the risks I see. Do they change your preference?" +**Surface assumptions**: +- List assumptions the chosen approach depends on +- Identify implicit dependencies (libraries, infra, team knowledge) + +**Identify failure modes**: +- What would make this approach fail? (edge cases, scale limits, + integration risks, operational complexity) +- What's the worst-case recovery if it does fail? + +**Steel-man an alternative**: +- Name the strongest argument for a different approach +- Be specific: "Approach B avoids X risk" not "there are other options" + +**Gate question**: +> "These are the risks I see. Do they change your preference, or +> should we proceed with the chosen approach?" + +Only move to detailed design after the user confirms. This is +one gate, not a debate: if the user says "proceed," proceed. ### 7. Present the Design -Break into digestible sections. Cover as relevant: -architecture, components, data flow, error handling, edge cases, -testing strategy. +Break into digestible sections. After each, ask: +> "Does this look right so far?" + +Cover as relevant: +- Architecture +- Components +- Data flow +- Error handling +- Edge cases +- Testing strategy ### 8. Decision Log -Maintain a running log: +Maintain a running log throughout: | Decision | Alternatives | Rationale | |----------|--------------|-----------| @@ -111,8 +174,13 @@ Maintain a running log: ### Persist to Context +Once validated, persist outputs: + ```bash -ctx add decision "..." --context "..." --rationale "..." +# Record key decisions +ctx add decision "..." \ + --session-id ID --branch BR --commit HASH \ + --context "..." --rationale "..." --consequence "..." ``` ### Implementation Handoff @@ -120,13 +188,57 @@ ctx add decision "..." --context "..." --rationale "..." Only after documentation, ask: > "Ready to begin implementation?" +If yes: +- Create explicit implementation plan +- Break into incremental steps +- Proceed one step at a time + +## Good Example + +> **Understanding Summary**: +> - Building a cooldown mechanism for `ctx agent` hooks +> - Prevents repetitive context injection on every tool use +> - For Claude Code users running ctx in PreToolUse hooks +> - Must be session-isolated (two sessions share no state) +> - Non-goal: per-tool granularity (cooldown is global) +> +> **Assumptions**: 10-minute default cooldown is reasonable. +> +> **Open Questions**: none remaining. +> +> Does this accurately reflect your intent? + +## Bad Examples + +- Jumping to architecture diagrams before asking what the + feature is for +- Asking 5 questions in one message (ask one at a time) +- Proposing a design without the Understanding Lock step +- "Let me implement this real quick" (no implementation + during brainstorm) + ## Quality Checklist -Exit brainstorming **only when**: +Exit brainstorming mode **only when**: + - [ ] Understanding Lock confirmed by the user - [ ] At least one design approach accepted -- [ ] Stress-test completed -- [ ] Major assumptions documented +- [ ] Stress-test completed (assumptions, failure modes, alternatives) +- [ ] Major assumptions documented explicitly - [ ] Key risks acknowledged - [ ] Decision Log complete -- [ ] Decisions persisted to context +- [ ] Decisions persisted to `.context/DECISIONS.md` + +If any criterion is unmet, continue refinement. + +## Principles + +- **Think step-by-step** before proposing anything: reason + through the problem space before jumping to solutions +- One question at a time +- Assumptions must be explicit +- Explore alternatives before committing +- Validate incrementally +- Clarity over cleverness +- Be willing to go back +- **YAGNI ruthlessly** diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md index aa63466a4..a4ae1159f 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md @@ -10,37 +10,90 @@ recording the context behind it. ## When to Use -- When committing after meaningful work (feature, bugfix, - refactor) -- When the commit involves a design choice or trade-off -- When the user says "commit" or "commit this" +- For ALL commits. This is the only way to commit in this project. + Raw `git commit` bypasses spec enforcement and violates CONSTITUTION. +- When the user says "commit", "commit this", "ship it", "let's commit": + always use this skill, never raw git commit. ## When NOT to Use -- For trivial commits (typo, formatting): just commit normally -- When the user explicitly says "just commit, no context" -- When nothing has changed +- When nothing has changed (no staged or unstaged modifications) + +## Usage Examples + +```text +/ctx-commit +/ctx-commit "implement session enrichment" +/ctx-commit --skip-qa +``` ## Process -### 1. Pre-commit checks +### 1. Check CONSTITUTION for commit rules + +Read `.context/CONSTITUTION.md` (if it exists) for commit-specific +rules. Common project rules to look for and enforce: + +- **Spec-per-commit**: Add a `Spec:` trailer, verify a spec file exists in + `specs/` before proceeding. If no spec exists, stop and offer to run + `/ctx-spec` to scaffold one. +- **Sign-off**: `Signed-off-by:`, include it. +- **Other trailers**: Honor any project-specific trailer requirements. + +Read CONSTITUTION fully and apply all relevant rules before +proceeding to pre-commit checks. + +### 2. Pre-commit checks -Unless the user says "skip checks": +Unless the user says `--skip-qa` or "skip checks": - Run `git diff --name-only` to see what changed -- If Go files changed, run `go build ./cmd/ctx/...` -- If build fails, stop and report: do not commit broken code +- Run the project's build and lint commands to verify nothing is broken. + Check for a Makefile, package.json, or equivalent. If you cannot + identify the build/lint commands, ask the user before proceeding. +- If the build or lint fails, stop and report: do not commit broken code + +**Verify before claiming ready**: map each claim to evidence. +"Tests pass" requires test output with 0 failures. "Build succeeds" +requires exit 0. "Lint clean" requires linter output with 0 errors. +Run commands fresh — never reuse earlier output. Before proceeding +to stage, answer these self-audit questions: + +1. What assumptions did I make? +2. What did I NOT check? +3. Where am I least confident? +4. What would a reviewer question first? + +If any answer reveals a gap, address it before staging. + +### 3. Close matching tasks + +Every commit closes work. Before staging, check TASKS.md for +tasks that this commit completes: + +- Read `.context/TASKS.md` +- Identify the spec being committed (the `Spec:` trailer value) +- Find open tasks (`[ ]`) whose `Spec:` field matches +- If no spec match, search by keywords from the commit subject +- Mark matching tasks `[x]` +- If uncertain whether a task is fully done, ask the user +- Stage the updated TASKS.md alongside the code changes -### 2. Stage and commit +This is the closure point in the plan→spec→task→commit chain. +Skipping it causes task rot: completed work stays open, +future sessions waste time re-triaging stale items. + +### 4. Stage and commit - Review unstaged changes with `git status` - Stage relevant files (prefer specific files over `git add -A`) - Craft a concise commit message: - If the user provided a message, use it - - If not, draft one based on the changes -- Commit with trailers as required by project conventions + - If not, draft one based on the changes (1-2 sentences, + "why" not "what") +- Include the `Spec:` and `Signed-off-by:` trailers (see format below) -### 3. Context prompt +### 5. Context prompt After a successful commit, ask the user: @@ -48,43 +101,103 @@ After a successful commit, ask the user: > > - **Decision**: Did you make a design choice or trade-off? > - **Learning**: Did you hit a gotcha or discover something? -> - **Neither**: No context to capture. +> - **Neither**: No context to capture: we're done. -If they provide a decision or learning, record it: +Wait for the user's response. If they provide a decision or +learning, record it using the appropriate command: ```bash -ctx add decision "..." -ctx add learning --context "..." --lesson "..." --application "..." +ctx add decision "Use PostgreSQL" \ + --session-id abc12345 --branch main --commit 68fbc00a \ + --context "Need a reliable database" \ + --rationale "ACID compliance and JSON support" \ + --consequence "Team needs training" ``` -### 4. Doc drift check +```bash +ctx add learning "Go embed requires files in same package" \ + --session-id abc12345 --branch main --commit 68fbc00a \ + --context "..." --lesson "..." --application "..." +``` -If committed files include source code that could affect docs, -offer to check for doc drift. +### 6. Reflect -### 5. Reflect (optional) +After every commit, run `/ctx-reflect` to capture the bigger +picture before moving on. This is mandatory: Skipping reflection +is how context gets lost between sessions. -If the commit represents a significant milestone, suggest: +## Commit Message Format -> This looks like a good checkpoint. Want me to run a quick -> reflection to capture the bigger picture? +Follow the repository's existing commit style. Draft messages +that: +- Focus on **why**, not what (the diff shows what) +- Use lowercase, no period at the end +- Scale detail to match scope: a one-file fix gets 1-2 sentences; + a multi-package change gets a summary paragraph plus a bulleted + list of what changed and why +- Include any trailers required by CONSTITUTION (e.g., `Spec:`, + `Signed-off-by:`) + +Example: +``` +complete journal-recall merge wiring and cross-cutting cleanup + +Wire journal commands through journal/core packages instead of +recall/core. Move importer, lock, unlock, sync cmd packages from +recall/cmd to journal/cmd. + +Changes: +- journal/core/{plan,execute,query} are now canonical +- sourcefm/sourceformat renamed to source/frontmatter, source/format +- Magic numbers extracted to config/stats constants +- state.StateDir renamed to state.Dir across 26 callers +- splitLines moved to parse.ByteLines +- /ctx-commit skill generalized to be language-agnostic + +Spec: specs/journal-merge-completion.md +Signed-off-by: Jane Doe +``` -## Commit Message Style +## Commit Discipline -- Focus on **why**, not what (the diff shows what) -- Concise (1-2 sentences) -- Follow the repository's existing commit style -- Include required trailers (Spec:, Co-authored-by:, etc.) +- **Spec trailer is mandatory**: identify the spec that covers + this work and include `Spec:` in the commit message. If + CONSTITUTION also requires it, this is non-negotiable. +- **Confirm the message** with the user before committing (or use + their provided message) +- **Always present the context prompt**: this is the whole point + of the skill +- **Always reflect**: even a one-sentence reflection prevents + context loss +- **Check for secrets** (`.env`, credentials, tokens) in the diff + before staging ## Quality Checklist -Before committing: -- [ ] Build passes (if Go files changed) -- [ ] Commit message explains the why -- [ ] No secrets in staged changes +Before committing, verify: +- [ ] Spec exists and is referenced in the commit message +- [ ] Build and lint pass +- [ ] Matching tasks marked `[x]` in TASKS.md +- [ ] Commit message is concise and explains the why +- [ ] `Spec:` and `Signed-off-by:` trailers are present +- [ ] No secrets or sensitive files in the staged changes - [ ] Specific files staged (not blind `git add -A`) -After committing: -- [ ] Context prompt was presented -- [ ] Any decisions/learnings were recorded -- [ ] Doc drift check offered if source code changed +After committing, verify: +- [ ] Context prompt was presented to the user +- [ ] Any decisions/learnings provided were recorded +- [ ] Reflection was completed + +## Human Relay + +After every successful commit, relay a structured summary to the +human verbatim: + +``` +┌─ Commit Summary ───────────────────────── +│ Spec: specs/.md +│ Tasks closed: +│ Files changed: +│ Message: +└────────────────────────────────────────── +``` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md index 4a36f7280..7c715b16d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-consolidate/SKILL.md @@ -1,57 +1,195 @@ --- name: ctx-consolidate -description: "Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Use when context files grow large with overlapping entries." +description: "Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Use when ctx drift reports high entry counts or entries overlap." tools: [bash, read, write, edit] --- -Find and merge duplicate or overlapping entries in context files. +Analyze entries in LEARNINGS.md and/or DECISIONS.md, group overlapping +entries by topic, and (with user approval) merge groups into denser +consolidated entries. Originals are archived, not deleted. + +## Key Distinction + +**Consolidation != archival.** Archival moves old entries to +the archive directory. Consolidation *replaces* verbose entries with +tighter ones: the file stays useful, just denser. The originals move +to archive as a paper trail. ## When to Use -- Context files have grown large (50+ entries) -- Multiple entries cover the same topic from different sessions -- After a long project phase where many similar learnings accumulated -- When `ctx status` shows high token counts for context files +- When `ctx drift` reports entry counts above threshold + (default: 30 learnings, 20 decisions) +- When you notice 3+ entries about the same topic +- When the user asks "clean up learnings", "consolidate context", + "reduce noise in decisions" +- Before a release, to keep context lean ## When NOT to Use -- Files are small and manageable -- Entries are all distinct -- Just after a fresh `ctx init` +- When there are fewer than 10 entries (nothing meaningful to group) +- When the user wants to *delete* entries (offer archival instead) +- Automatically: always require user approval before modifying files +- Mid-task when the user is focused on shipping + +## Execution + +### Step 1: Parse Entries + +Read the target file(s): + +```bash +# Check entry counts first +ctx drift --json +``` + +Then read the files directly: +- LEARNINGS.md (in the context directory) +- DECISIONS.md (in the context directory) + +Parse entries by their `## [YYYY-MM-DD-HHMMSS] Title` headers. Each +entry extends from its header to the line before the next header or +end of file. + +### Step 2: Extract Keywords and Group + +For each entry, extract keywords from its title and body: + +1. Split text on whitespace and punctuation +2. Lowercase everything +3. Filter out stop words (the, and, for, with, from, are, was, etc.) + and words shorter than 3 characters +4. Deduplicate + +Build a keyword-to-entries map. Entries sharing **2 or more +non-trivial keywords** are candidates for the same group. + +**Grouping rules:** +- Minimum group size: 2 entries (nothing to consolidate with 1) +- Maximum group size: 8 entries (larger groups suggest the topic + needs splitting, not merging) +- An entry can only belong to one group (assign to the best match) + +### Step 3: Present Candidates + +Show the user what you found. Format: + +``` +Consolidation candidates for LEARNINGS.md: + +Group 1: "Hook behavior" (5 entries) + - [2026-01-15] Hook scripts can lose execute permission + - [2026-01-20] Two-tier hook output is sufficient + - [2026-02-03] Claude Code Hook Key Names + - [2026-02-09] Agent ignores repeated hook output + - [2026-02-16] Security docs vulnerable after migrations + -> Proposed: merge into 1 consolidated entry + +Group 2: "Path handling" (3 entries) + - [2026-01-10] Path construction uses stdlib + - [2026-02-05] G304 gosec false positives + - [2026-02-16] gosec G301/G306 permissions + -> Proposed: merge into 1 consolidated entry + +Ungrouped: 12 entries (no consolidation needed) +``` + +**Wait for the user to approve, modify, or reject each group.** +Do NOT proceed without explicit confirmation. + +### Step 4: Generate Consolidated Entries + +For each approved group, write a consolidated entry that: + +- Uses today's timestamp in `YYYY-MM-DD-HHMMSS` format +- Appends "(consolidated)" to the title +- Lists the date range of originals in a `**Consolidated from**` line +- Distills each original into 1-2 lines +- **Preserves all unique information** (nothing is lost) + +**Format:** + +```markdown +## [YYYY-MM-DD-HHMMSS] Hook behavior (consolidated) + +**Consolidated from**: 5 entries (2026-01-15 to 2026-02-16) + +- Hook scripts can lose execute permission without warning; always + restore +x after sync operations +- Two-tier output (stdout for AI context, stderr+exit for blocks) + is sufficient; don't over-engineer severity levels +- Claude Code hook key names are case-sensitive: PreToolUse, not + pre_tool_use +- Agents develop repetition fatigue: vary hook output phrasing + across invocations +- After infrastructure migrations, audit security docs first: + stale paths in security guidance give false confidence +``` + +### Step 5: Execute Approved Merges + +For each approved group: + +1. **Add the consolidated entry** at the top of the file (below + the `# Learnings` or `# Decisions` header) +2. **Remove the original entries** from the source file +3. **Append originals to archive** at + `archive/learnings-consolidated-YYYY-MM-DD.md` in the context + directory (or `decisions-consolidated-YYYY-MM-DD.md`) +4. **Rebuild the index**: + +```bash +ctx learning reindex +# or +ctx decision reindex +``` + +### Step 6: Report Results -## Process +``` +Consolidated LEARNINGS.md: + - Group "Hook behavior": 5 entries -> 1 (originals archived) + - Group "Path handling": 3 entries -> 1 (originals archived) + Total: 8 entries consolidated into 2. File reduced from 47 to 41 entries. + Archive: archive/learnings-consolidated-2026-02-19.md (in context dir) +``` -### 1. Read the target file +## Archive Format -Read the full content of the file to consolidate -(LEARNINGS.md or DECISIONS.md). +The archive file uses the same Markdown format as the source file. +Each archived entry keeps its original timestamp and content, +preceded by a header noting which consolidated entry replaced it: -### 2. Identify clusters +```markdown +# Archived Learnings (consolidated 2026-02-19) -Group entries by topic. Look for: -- Same subject with different wording -- Entries that build on each other chronologically -- Contradictory entries (later one supersedes) +Originals replaced by consolidated entries in LEARNINGS.md. -### 3. Propose merges +## Group: Hook behavior -For each cluster, propose a consolidated entry: +## [2026-01-15-120000] Hook scripts can lose execute permission +(original content preserved verbatim) -> **Cluster: [topic]** (N entries → 1) -> - Entry A: "..." -> - Entry B: "..." -> - **Merged**: "..." -> -> Approve? +## [2026-01-20-093000] Two-tier hook output is sufficient +(original content preserved verbatim) +``` -### 4. Apply approved merges +## What This Skill Does NOT Do -Replace the cluster entries with the merged version. -Archive originals if requested. +- **Automatic consolidation**: always requires user approval +- **Cross-file consolidation**: learnings stay in LEARNINGS.md, + decisions stay in DECISIONS.md +- **Delete entries**: always archives originals as a paper trail +- **Semantic understanding via embeddings**: uses keyword matching, + which is sufficient for structured entries with consistent formatting +- **Consolidate TASKS.md or CONVENTIONS.md**: use `ctx task archive` + for tasks; conventions rarely need consolidation ## Quality Checklist -- [ ] No information lost in merges -- [ ] Contradictions resolved (latest wins) -- [ ] User approved each merge -- [ ] File is valid markdown after edits +Before reporting results: +- [ ] Presented all candidate groups before making changes +- [ ] Waited for explicit user approval per group +- [ ] Each consolidated entry preserves all unique information +- [ ] Original entries are archived, not deleted +- [ ] Ran `ctx reindex` after modifications +- [ ] Reported what changed and where archives were written diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md index 710f38f31..48abe7220 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-doctor/SKILL.md @@ -12,17 +12,24 @@ event log analysis. - User says "doctor", "diagnose", "troubleshoot", "health check" - User asks "why didn't my hook fire?" - User says "hooks seem broken" or "context seems stale" +- User says "too many nudges" or "something seems off" +- User asks "what happened last session?" ## When NOT to Use -- User wants a quick status check (use `ctx-status`) -- User wants to fix drift (use `ctx-drift`) -- User wants to pause hooks (use `ctx-pause`) +- User wants a quick status check (use `/ctx-status`) +- User wants to fix drift (use `/ctx-drift`) +- User wants to change hook messages (use `ctx hook message`) +- User wants to pause hooks (use `/ctx-pause`) ## Diagnostic Playbook +Follow this triage sequence: + ### Phase 1: Structural Baseline +Run `ctx doctor --json` to get the full structural health report. + ```bash ctx doctor --json ``` @@ -31,28 +38,34 @@ Parse the JSON output. Note any warnings or errors. ### Phase 2: Event Log Analysis (if available) +If the doctor report shows event logging is enabled, query recent events: + ```bash -ctx system events --json --last 100 +ctx hook event --json --last 100 ``` -For specific hooks: +If the user is asking about a specific hook: + ```bash -ctx system events --hook --json --last 20 +ctx hook event --hook --json --last 20 ``` -If event logging is not enabled, note: "Enable `event_log: true` -in `.ctxrc` for hook-level diagnostics." +If event logging is not enabled, note: "Enable `event_log: true` in +`.ctxrc` for hook-level diagnostics." ### Phase 3: Targeted Investigation -Based on findings, check: -- Hook config for hook registration -- Custom messages: `ctx system message list` -- RC config: read `.ctxrc` -- Reminders: `ctx remind list` +Based on findings, check additional sources: + +- **Hook config**: read `.claude/settings.local.json` to verify hook registration +- **Custom messages**: run `ctx hook message list` to check for silenced hooks +- **RC config**: read `.ctxrc` to check configuration +- **Reminders**: run `ctx remind list` for pending reminders ### Phase 4: Present Findings +Structure your report as: + ``` ## Doctor Report @@ -60,7 +73,8 @@ Based on findings, check: - Summarize ctx doctor results ### Event analysis (if available) -- Patterns, gaps, or anomalies +- Patterns, gaps, or anomalies in event data +- Specific hook behavior observations ### Suggested actions - [ ] Actionable items based on findings @@ -69,10 +83,21 @@ Based on findings, check: ### Phase 5: Suggest, Don't Fix Present actionable next steps but do NOT auto-fix anything. +The user decides what to act on. + +## Available Data Sources + +| Source | Command | What it reveals | +|----------------------|------------------------------------------|-----------------------| +| Structural health | `ctx doctor --json` | All mechanical checks | +| Event log | `ctx hook event --json --last 100` | Recent hook activity | +| Event log (filtered) | `ctx hook event --hook --json` | Specific hook | +| Reminders | `ctx remind list` | Pending reminders | +| Hook messages | `ctx hook message list` | Custom vs default | +| RC config | Read `.ctxrc` | Configuration | -## Quality Checklist +## Graceful Degradation -- [ ] Ran `ctx doctor` for structural checks -- [ ] Checked event log if available -- [ ] Presented findings in structured format -- [ ] Suggested actions without auto-applying +If event logging is not enabled, the skill still works with reduced +capability. Run `ctx doctor` for structural checks and note that +event-level diagnostics require `event_log: true` in `.ctxrc`. diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md index c678e8b3d..ed16b031d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-drift/SKILL.md @@ -7,53 +7,245 @@ tools: [bash, read, write, edit, glob, grep] Detect context drift at two layers: **structural** (stale paths, missing files, constitution violations) via `ctx drift`, and **semantic** (outdated conventions, superseded decisions, -irrelevant learnings) via agent analysis. +irrelevant learnings) via agent analysis. The semantic layer is +where the real value is: the CLI cannot do it. ## When to Use - At session start to verify context health before working - After refactors, renames, or major structural changes -- When the user asks "is our context clean?" or "check for drift" -- Before a release or milestone +- When the user asks "is our context clean?", "anything + stale?", or "check for drift" +- Proactively when you notice a path in ARCHITECTURE.md or + CONVENTIONS.md that does not match the actual file tree +- Before a release or milestone to ensure context is accurate ## When NOT to Use -- When you just ran status and everything looked fine -- Repeatedly in the same session without changes -- Mid-flow when the user is focused on a task +- When you just ran `/ctx-status` and everything looked fine + (status already shows drift warnings) +- Repeatedly in the same session without changes in between +- When the user is mid-flow on a task; do not interrupt with + unsolicited maintenance + +## Usage Examples + +```text +/ctx-drift +/ctx-drift (after the refactor) +``` ## Execution +Drift detection has two layers: **structural** (programmatic) and +**semantic** (agent-driven). Always do both. + ### Layer 1: Structural Checks +Run the CLI tool for fast, programmatic checks: + ```bash ctx drift ``` -Catches dead paths, missing files, staleness indicators. +This catches dead paths, missing files, staleness indicators, +and constitution violations. These are necessary but insufficient: +they only detect structural problems. ### Layer 2: Semantic Analysis -After structural check, read context files and compare to the -codebase. Check for: - -- **Outdated conventions**: patterns the code no longer follows -- **Superseded decisions**: entries overridden by later work -- **Stale architecture**: module descriptions that have changed -- **Irrelevant learnings**: entries about fixed bugs -- **Contradictions**: context files contradicting each other +After the structural check, read the context files yourself and +compare them to what you know about the codebase. This is where +you add real value: the CLI tool cannot do this. + +Check for: + +- **Outdated conventions**: Does CONVENTIONS.md describe patterns + the code no longer follows? Read a few source files in the + relevant area to verify. +- **Superseded decisions**: Does DECISIONS.md contain entries that + were implicitly overridden by later work? Look for decisions + whose rationale no longer applies. +- **Stale architecture descriptions**: Does ARCHITECTURE.md + describe module purposes that have changed? A path can still + exist while its description is wrong. +- **Irrelevant learnings**: Does LEARNINGS.md contain entries + about bugs that were since fixed or patterns that no longer + apply? +- **Contradictions**: Do any context files contradict each other + or contradict the actual code? ### Reporting -1. Summarize findings by severity -2. Explain each finding: what file, why it matters -3. Distinguish structural from semantic -4. Offer to auto-fix structural: `ctx drift --fix` -5. Propose specific edits for semantic issues +After both layers, do **not** dump raw output. Instead: + +1. **Summarize findings** by severity (structural warnings, + semantic issues) in plain language +2. **Explain each finding**: what file, what line, why it + matters +3. **Distinguish structural from semantic**: structural issues + can be auto-fixed; semantic issues need the user's judgment +4. **Offer to auto-fix** structural issues: + "I can run `ctx drift --fix` to clean up the dead path + references. Want me to?" +5. **Propose specific edits** for semantic issues: + "CONVENTIONS.md still says 'use fmt.Printf for output' but + we switched to cmd.Printf three weeks ago. Want me to + update it?" +6. **Suggest follow-up commands** when appropriate: + - Many stale paths after a refactor → suggest `ctx sync` + - Heavy task clutter → suggest `ctx compact --archive` + - Old files untouched for weeks → suggest reviewing content + +## Interpreting Results + +| Finding | What It Means | Suggested Action | +|-------------------------------|--------------------------------------------------|------------------------------------------------------| +| Path does not exist | Context references a deleted file/dir | Remove reference or update path | +| Directory is empty | Referenced dir exists but has no files | Remove reference or populate directory | +| Many completed tasks | TASKS.md is cluttered | Run `ctx compact --archive` | +| File not modified in 30+ days | Content may be outdated | Review and update or confirm current | +| Constitution violation | A hard rule may be broken | Fix immediately | +| Missing packages | An `internal/` package is not in ARCHITECTURE.md | Add it with `/ctx-architecture` or document manually | +| Required file missing | A core context file does not exist | Create it with `ctx init` or manually | + +## Auto-Fix + +When the user agrees to auto-fix: + +```bash +ctx drift --fix +``` + +After fixing, run `ctx drift` again to confirm remaining +issues need manual attention. Report what was fixed and what +still needs the user's judgment. + +## Skill Template Drift + +After running `ctx drift`, check whether the project's +installed skills (`.claude/skills/`) match the canonical +templates shipped with `ctx`. + +### Procedure + +1. Create a temp directory and run `ctx init --force` inside + it to get the latest templates: + + ```bash + CTX_TPL_DIR=$(mktemp -d) + cd "$CTX_TPL_DIR" && ctx init --force 2>/dev/null + ``` + +2. Compare each skill in the project against the template: + + ```bash + diff -ru "$CTX_TPL_DIR/.claude/skills/" .claude/skills/ 2>/dev/null + ``` + +3. Clean up the temp directory: + + ```bash + rm -rf "$CTX_TPL_DIR" + ``` + +### Interpreting Skill Drift + +| Finding | Action | +|--------------------------------------|---------------------------------------------------| +| Skill missing from project | Offer to install: copy from template | +| Skill differs from template | Show the diff; offer to update to latest template | +| Project has extra skills (no match) | These are custom: leave them alone | +| No differences | Skills are up to date; report clean | + +When reporting skill drift, distinguish between: + +- **ctx-managed skills** (present in the template): these + should generally match; differences mean the user's copy + is outdated or was customized intentionally +- **Custom skills** (only in the project): these are user + additions and should not be flagged as drift + +If a skill was intentionally customized, note it and move on. +Offer to update only ctx-managed skills, and always show the +diff before overwriting. + +## Permission Drift + +After checking skills, verify that `.claude/settings.local.json` +has the expected ctx permissions. This file is gitignored, so it +drifts independently from the codebase. + +### Procedure + +1. Read `.claude/settings.local.json` and extract the allow list. + +2. Check for **missing ctx defaults**. Every entry in + `DefaultAllowPermissions()` (defined in + `internal/assets/permissions/allow.txt`) should be present. The current + expected set is: + + - `Bash(ctx:*)`: covers all ctx subcommands + - `Skill(ctx-*)`: one entry per ctx-shipped skill + + To get the authoritative list: + + ```bash + ctx init --force 2>/dev/null # in a temp dir + ``` + + Then compare permissions from the generated + `settings.local.json` against the project's copy. + +3. Check for **stale skill permissions**. If a `Skill(ctx-*)` + entry references a skill that no longer exists in + `.claude/skills/`, flag it. + +4. Check for **missing skill permissions**. If a `ctx-*` skill + exists in `.claude/skills/` but has no corresponding + `Skill(ctx-*)` in the allow list, flag it. + +### Interpreting Permission Drift + +| Finding | Action | +|----------------------------------|---------------------------------------------------------------------| +| Missing `Bash(ctx:*)` | Suggest adding: required for ctx to work | +| Missing `Skill(ctx-*)` entry | Suggest adding: skill will prompt every time | +| Stale `Skill(ctx-*)` entry | Suggest removing: dead reference | +| Granular `Bash(ctx :*)` | Suggest consolidating to `Bash(ctx:*)` | +| One-off / session debris entries | Note as hygiene issue (see `hack/runbooks/sanitize-permissions.md`) | + +### Important + +Do **not** edit `settings.local.json` directly. Report findings +and let the user make changes. This file controls agent +permissions: self-modification is a security concern. Refer +users to `hack/runbooks/sanitize-permissions.md` for the manual cleanup +procedure. + +## Proactive Use + +Run drift detection without being asked when: + +- You load context at session start and notice a path + reference that does not match the file tree +- The user just completed a refactor that renamed or moved + files +- TASKS.md has obviously heavy clutter (20+ completed items + visible when you read it) + +When running proactively, keep the report brief: + +> I ran a quick drift check after the refactor. Two stale +> path references in ARCHITECTURE.md. Want me to clean +> them up? ## Quality Checklist -- [ ] Summarized findings (did not dump raw output) +After running drift detection, verify: +- [ ] Summarized findings in plain language (did not just + paste raw CLI output) - [ ] Explained why each finding matters -- [ ] Offered auto-fix before running it +- [ ] Offered auto-fix for fixable issues before running it +- [ ] Suggested appropriate follow-up commands - [ ] Did not run `--fix` without user confirmation diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md index 6e13828bb..79609f6f9 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-implement/SKILL.md @@ -16,16 +16,24 @@ steps. execution - When the user wants checkpointed progress with verification at each step -- After `ctx-brainstorm` or plan mode produces an approved plan +- After `/ctx-brainstorm` or plan mode produces an approved plan ## When NOT to Use - For single-step tasks: just do them directly -- When the plan is vague or incomplete: use `ctx-brainstorm` +- When the plan is vague or incomplete: use `/ctx-brainstorm` first to refine it - When the user wants to explore or discuss, not execute - When changes are trivial (typo fix, config tweak) +## Usage Examples + +```text +/ctx-implement +/ctx-implement path/to/plan.md +/ctx-implement (the plan from our discussion above) +``` + ## Process ### 1. Load the plan @@ -60,17 +68,20 @@ Present the step list to the user for confirmation: For each step: 1. **Announce** what you're doing (one line) -2. **Think through** the change before writing code +2. **Think through** the change before writing code: what does + it touch, what could break, what's the simplest correct path? 3. **Implement** the change -4. **Verify** with the appropriate check: - - Go code changed → `go build ./cmd/ctx/...` - - Tests affected → `go test ./...` +3. **Verify** with the appropriate check: + - Go code changed → `CGO_ENABLED=0 go build -o /dev/null ./cmd/ctx` + - Tests affected → `CGO_ENABLED=0 go test ./...` - Config/template changed → build to verify embeds - Docs only → no verification needed -5. **Report** step result: pass or fail -6. **If failed**: stop, diagnose, fix, re-verify before +4. **Report** step result: pass or fail +5. **If failed**: stop, diagnose, fix, re-verify before moving to the next step +Verify after every individual step before proceeding to the next. + ### 4. Checkpoint progress After every 3-5 steps (or after a significant milestone): @@ -81,49 +92,86 @@ After every 3-5 steps (or after a significant milestone): ### 5. Wrap up After all steps complete: -- Run a final full verification +- Run a final full verification (`make check` or + `CGO_ENABLED=0 go build && go test ./...`) - Summarize what was implemented - Note any deviations from the original plan - Suggest context to persist (decisions, learnings, tasks) ## Step Verification Map -| Change type | Verification command | -|--------------------|--------------------------------| -| Go source code | `go build ./cmd/ctx/...` | -| Test files | `go test ./...` | -| Templates/embeds | `go build ./cmd/ctx/...` | -| Makefile | Run the changed target | -| Skill files | Build to verify embed | -| Docs/markdown only | None required | -| Shell scripts | `bash -n script.sh` | +| Change type | Verification command | +|--------------------|---------------------------------------------------| +| Go source code | `CGO_ENABLED=0 go build -o /dev/null ./cmd/ctx` | +| Test files | `CGO_ENABLED=0 go test ./...` | +| Templates/embeds | `CGO_ENABLED=0 go build -o /dev/null ./cmd/ctx` | +| Makefile | Run the new/changed target | +| Skill files | Build (to verify embed) + check live copy matches | +| Docs/markdown only | None required | +| Shell scripts | `bash -n script.sh` (syntax check) | ## Handling Failures When a step fails verification: -1. Read the error output carefully -2. Reason through the failure before attempting a fix -3. Fix the issue in the current step -4. Re-verify the fix -5. Only then move to the next step +1. **Don't panic**: read the error output carefully +2. **Reason through** the failure step-by-step before attempting + a fix; understand the cause, not just the symptom +3. **Fix** the issue in the current step +4. **Re-verify** the fix +5. **Only then** move to the next step +6. If the fix changes the plan, note the deviation If a step fails repeatedly (3+ attempts), stop and ask the -user for guidance. +user for guidance rather than thrashing. + +## Output Format + +Progress updates should be concise: + +``` +Step 1/6: Create ctx-next skill directory .......... OK +Step 2/6: Write SKILL.md template .................. OK +Step 3/6: Copy to live skill directory ............. OK +Step 4/6: Build to verify template embeds .......... OK +Step 5/6: Run tests ................................ OK +Step 6/6: Mark task in TASKS.md .................... OK + +All 6 steps complete. Build and tests pass. +``` + +## Examples + +### Good Implementation + +> **Step 3/8**: Add `check` target to Makefile +> Added `check: build audit` after the `audit` target. +> Verify: `make check` ... build OK, audit OK. +> **Result**: PASS + +### Bad Implementation + +> "I'll implement the whole plan now" +> *[makes all changes at once without verification]* +> "Done! Everything should work." + +(No step-by-step, no verification, no checkpoints: this +defeats the purpose of the skill.) ## Quality Checklist -Before starting: +Before starting, verify: - [ ] Plan exists and is clear enough to execute - [ ] Steps are broken down and presented to the user - [ ] User confirmed readiness to proceed -During execution: +During execution, verify: - [ ] Each step is verified before moving on - [ ] Failures are fixed in place, not deferred - [ ] Checkpoints happen every 3-5 steps -After completion: +After completion, verify: - [ ] Final full verification passes - [ ] Deviations from plan are noted +- [ ] Summary of what was implemented is presented - [ ] Context persistence is suggested if warranted diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md index cbf9396de..8ef2d87c0 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich-all/SKILL.md @@ -1,59 +1,234 @@ --- name: ctx-journal-enrich-all -description: "Batch journal pipeline: export unexported sessions then enrich all unenriched entries." +description: "Full journal pipeline: import unimported sessions, then batch-enrich all unenriched entries. Use when the user says 'process the journal' or to catch up on the backlog." tools: [bash, read, write, edit] --- -Full journal pipeline: import sessions and batch-enrich entries. +Full journal pipeline: import if needed, then batch-enrich. ## When to Use -- Backlog of unenriched journal entries -- After many sessions without journal maintenance -- When running periodic journal housekeeping +- When the user says "enrich everything" or "process the journal" +- When there is a backlog of unenriched or unimported sessions +- Periodically to catch up on recent sessions +- After the `check-journal` hook reports unimported or unenriched entries ## When NOT to Use -- No journal entries exist -- All entries are already enriched -- Single entry (use `ctx-journal-enrich` instead) +- For a single specific session (use `/ctx-journal-enrich` instead) ## Process -### 1. Import unexported sessions +### Step 0: Import If Needed + +Before enriching, check whether there are unimported sessions. If +the journal directory has no `.md` files at all, or if there are +`.jsonl` session files newer than the newest journal entry, import +them first. ```bash -ctx recall export --all +CTX_DIR=$(ctx system bootstrap -q) +JOURNAL_DIR="$CTX_DIR/journal" + +# Check if any .md files exist +md_count=$(ls "$JOURNAL_DIR"/*.md 2>/dev/null | wc -l) + +if [ "$md_count" -eq 0 ]; then + echo "No journal entries found: importing all sessions." + ctx journal import --all --yes +else + # Compare newest .md mtime against .jsonl files + newest_md=$(stat -c %Y $(ls -t "$JOURNAL_DIR"/*.md | head -1)) + unimported=$(find ~/.claude/projects -name "*.jsonl" -newermt @${newest_md} 2>/dev/null | wc -l) + if [ "$unimported" -gt 0 ]; then + echo "$unimported unimported session(s) found: importing first." + ctx journal import --all --yes + fi +fi ``` -### 2. List unenriched entries +Report how many sessions were imported (or "none needed") before +moving to enrichment. + +### Step 1: Find Unenriched Entries + +List all journal entries that lack enrichment using the state file: ```bash -ctx journal list --unenriched +# List .md files in journal dir and check state +CTX_DIR=$(ctx system bootstrap -q) +for f in "$CTX_DIR/journal/"*.md; do + name=$(basename "$f") + ctx system mark-journal --check "$name" enriched || echo "$f" +done ``` -### 3. Batch enrich - -For each unenriched entry: -1. Read the entry content -2. Generate appropriate frontmatter -3. Write the enriched version -4. Report progress +Or read `.state.json` in the journal directory directly and list +entries without an `enriched` date set. + +### Fallback: Detect Enrichment from Frontmatter + +If `mark-journal --check` is unavailable (no state file, command +fails), fall back to frontmatter inspection. An entry is considered +**already enriched** if its YAML frontmatter contains **both** `type` +and `outcome` fields: these are set exclusively by enrichment, never +by import. + +Do NOT use `title` or `date` to detect enrichment: those are always +present from import. The enrichment-only fields are: + +| Field | Set by | +|----------------|----------------| +| `title` | Import | +| `date` | Import | +| `time` | Import | +| `model` | Import | +| `tokens_in` | Import | +| `tokens_out` | Import | +| `session_id` | Import | +| `project` | Import | +| `type` | **Enrichment** | +| `outcome` | **Enrichment** | +| `topics` | **Enrichment** | +| `technologies` | **Enrichment** | +| `summary` | **Enrichment** | + +If all entries already have enrichment recorded, report that and stop. + +### Step 2: Filter Out Noise + +Skip entries that are not worth enriching: + +- **Locked entries**: a file is locked if `.state.json` has a + `locked` date OR the frontmatter contains `locked: true`. Never + modify locked files: neither metadata nor body. Check via: + `ctx system mark-journal --check locked` + or look for `locked: true` in the YAML frontmatter. +- **Suggestion sessions**: files under ~20 lines or containing + only auto-complete fragments. Check with: + ```bash + wc -l + ``` +- **Multi-part continuations**: files ending in `-p2.md`, `-p3.md` + etc. Enrich only the first part; continuation parts inherit + the frontmatter topic. + +Report how many entries will be processed and how many were +filtered out. + +### Step 3: Process Each Entry + +For each entry, read the conversation and extract: + +1. **Title**: a short descriptive title for the session +2. **Type**: feature, bugfix, refactor, exploration, debugging, + or documentation +3. **Outcome**: completed, partial, abandoned, or blocked +4. **Topics**: 2-5 topic tags +5. **Technologies**: languages, frameworks, tools used +6. **Summary**: 2-3 sentences describing what was accomplished + +Apply YAML frontmatter to each file: + +```yaml +--- +title: "Session title" +date: 2026-01-27 +type: feature +outcome: completed +topics: + - authentication + - caching +technologies: + - go + - redis +--- +``` -For large backlogs (20+ entries), use heuristic enrichment: -derive metadata from filename patterns and entry headings -without reading full content. +### Step 4: Mark Enriched -### 4. Report +After writing frontmatter to each file, update the state file: +```bash +ctx system mark-journal enriched ``` -Enriched: 15/15 entries -Skipped: 3 (already enriched) -``` + +### Step 5: Report + +After processing, report: + +- How many sessions were imported (or "none needed") +- How many entries were enriched +- How many were skipped (already enriched, too short, etc.) +- Remind the user to rebuild: `ctx journal site --build` + +## Confirmation Mode + +**Interactive** (default when user is present): show a summary +of proposed enrichments before applying. Group by type/outcome +so the user can scan quickly rather than reviewing one by one. + +**Unattended** (when running in a loop or explicitly told +"just do it"): apply enrichments directly and report results. + +## Large Backlogs (20+ entries) + +For large backlogs, use the heuristic enrichment script bundled +in `references/enrich-heuristic.py`. This script infers type, +outcome, topics, and technologies from the title and filename +patterns, then inserts frontmatter and marks state automatically. + +### How to use + +1. Build a file list of eligible entries (non-multipart, 20+ lines, + missing `type:` and `outcome:` fields): + ```bash + CTX_DIR=$(ctx system bootstrap -q) + for f in "$CTX_DIR"/journal/*.md; do + [ -f "$f" ] || continue + has_type=$(head -30 "$f" | grep -c '^type:' || true) + has_outcome=$(head -30 "$f" | grep -c '^outcome:' || true) + if [ "$has_type" -eq 0 ] || [ "$has_outcome" -eq 0 ]; then + name=$(basename "$f") + case "$name" in *-p[0-9].md|*-p[0-9][0-9].md) continue ;; esac + lines=$(wc -l < "$f") + [ "$lines" -ge 20 ] && echo "$f" + fi + done > /tmp/enrich-list.txt + ``` + +2. Run the heuristic enrichment script. The script path is relative + to this skill's directory: copy it to /tmp or reference it via + the full embedded path: + ```bash + python3 references/enrich-heuristic.py /tmp/enrich-list.txt + ``` + +3. The script handles everything: reads files, inserts frontmatter, + runs `ctx system mark-journal` for each, and reports counts. + +### When to use heuristic vs. per-file enrichment + +| Backlog size | Approach | +|--------------|---------------------------------------------------| +| 1-5 entries | Read each file, enrich manually with full context | +| 6-20 entries | Sequential processing in the main conversation | +| 20+ entries | Use `enrich-heuristic.py` for bulk processing | + +The heuristic script produces good-enough enrichment from titles +and filenames. For higher quality, follow up with manual review +of entries where the type or topics look wrong. + +Subagent parallelization is an alternative for 20+ entries, but +requires that subagents have Edit and Bash permissions granted. +If permissions are restricted, the heuristic script is faster +and more reliable. ## Quality Checklist -- [ ] All unexported sessions imported first -- [ ] Each enriched entry has valid frontmatter -- [ ] Progress reported during batch -- [ ] No entries corrupted or lost +- [ ] Unimported sessions detected and imported before enrichment +- [ ] Suggestion sessions and multi-part continuations filtered +- [ ] Each enriched entry has all required frontmatter fields +- [ ] Summary is specific to the session, not generic +- [ ] User was shown a summary before applying (unless unattended) +- [ ] State file updated for each enriched entry diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md index d9a27f0f1..c6897679d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-journal-enrich/SKILL.md @@ -1,60 +1,162 @@ --- name: ctx-journal-enrich -description: "Enrich a journal entry with YAML frontmatter metadata. Use to add type, outcome, topics, and technologies to session records." +description: "Enrich journal entry with metadata. Use when journal entries lack frontmatter, tags, or summary for future reference." tools: [bash, read, write, edit] --- -Enrich individual journal entries with structured metadata. +Enrich a session journal entry with structured metadata. + +## Before Enriching + +1. **Check if locked**: a file is locked if `.state.json` has a + `locked` date OR the frontmatter contains `locked: true`. Locked + files must not be modified: skip them silently. Check via: + `ctx system mark-journal --check locked` + or look for `locked: true` in the YAML frontmatter. +2. **Check if already enriched**: check the state file via + `ctx system mark-journal --check enriched` or read + `.state.json` in the journal directory; confirm before overwriting ## When to Use -- After exporting a session to the journal -- When journal entries lack metadata for search/filter -- When `ctx journal` shows unenriched entries +- When journal entries lack metadata for future reference +- After importing sessions that need categorization +- When building a searchable session archive ## When NOT to Use -- Entry is already fully enriched -- No journal entries exist +- On entries that already have complete frontmatter (unless updating) +- Before normalizing entries with broken formatting +- On suggestion sessions (short auto-complete prompts; not worth enriching) -## Process +## Input -### 1. Identify the entry +The user specifies a journal entry by partial match: +- `twinkly-stirring-kettle` (slug) +- `twinkly` (partial slug) +- `2026-01-24` (date) +- `76fe2ab9` (short ID) + +Find matching files in the journal directory: +```bash +ls "$(ctx system bootstrap -q)/journal/"*.md | grep -i "" +``` -If not specified, find unenriched entries: +If multiple matches, show them and ask which one. +If no argument given, show recent unenriched entries by reading +`.state.json` in the journal directory and listing entries without +an `enriched` date: ```bash -ctx journal list --unenriched +# List unenriched entries using state file +CTX_DIR=$(ctx system bootstrap -q) +for f in "$CTX_DIR/journal/"*.md; do + name=$(basename "$f") + ctx system mark-journal --check "$name" enriched || echo "$f" +done | head -10 ``` -### 2. Read the entry +## Usage Examples -Read the full session content to understand what happened. +```text +/ctx-journal-enrich twinkly-stirring-kettle +/ctx-journal-enrich twinkly +/ctx-journal-enrich 2026-01-24 +/ctx-journal-enrich 76fe2ab9 +``` + +## Enrichment Tasks -### 3. Generate frontmatter +Read the journal entry and extract: -Add or update YAML frontmatter with: +### 1. Frontmatter (YAML at top of file) ```yaml --- -type: feature|bugfix|refactor|research|planning|review -outcome: completed|partial|blocked|abandoned -topics: [topic1, topic2] -technologies: [go, typescript, ...] -summary: "One-line summary of the session" +title: "Session title" +date: 2026-01-27 +model: claude-opus-4-6 # auto-populated at import +tokens_in: 234000 # auto-populated at import +tokens_out: 89000 # auto-populated at import +type: feature +outcome: completed +topics: + - authentication + - caching +technologies: + - go + - postgresql +libraries: + - cobra + - fatih/color +key_files: + - internal/auth/token.go + - internal/db/cache.go --- ``` -### 4. Write enriched entry +**Auto-populated fields** (set during `ctx journal import`, do NOT overwrite): +`date`, `time`, `project`, `session_id`, `model`, `tokens_in`, `tokens_out`, `branch` + +**Type values:** + +| Type | When to use | +|-----------------|---------------------------------------| +| `feature` | Building new functionality | +| `bugfix` | Fixing broken behavior | +| `refactor` | Restructuring without behavior change | +| `exploration` | Research, learning, experimentation | +| `debugging` | Investigating issues | +| `documentation` | Writing docs, comments, README | + +**Outcome values:** + +| Outcome | Meaning | +|-------------|------------------------------------| +| `completed` | Goal achieved | +| `partial` | Some progress, work continues | +| `abandoned` | Stopped pursuing this approach | +| `blocked` | Waiting on external dependency | + +### 2. Summary + +If `## Summary` says "[Add your summary...]", replace with 2-3 sentences +describing what was accomplished. + +### 3. Extracted Items + +Scan the conversation and extract: + +**Decisions made**: link to DECISIONS.md if persisted: +```markdown +## Decisions +- Used Redis for caching ([D12](../DECISIONS.md#d12)) +- Chose JWT over sessions (not yet persisted) +``` + +**Learnings discovered**: link to LEARNINGS.md if persisted: +```markdown +## Learnings +- Token refresh requires cache invalidation ([L8](../LEARNINGS.md#l8)) +- Go's defer runs LIFO (new insight) +``` -Update the file with the new frontmatter while preserving -the body content. +**Tasks completed/created**: +```markdown +## Tasks +- [x] Implement caching layer +- [ ] Add cache metrics (created this session) +``` -## Quality Checklist +## Process -- [ ] Frontmatter is valid YAML -- [ ] Type matches the actual work done -- [ ] Outcome is accurate -- [ ] Topics are specific, not generic -- [ ] Summary is one clear sentence -- [ ] Body content is preserved unchanged +1. Find and read the journal file +2. Analyze the conversation +3. Propose enrichment (type, topics, outcome) +4. Ask user for confirmation/adjustments +5. Show diff and write if approved +6. **Mark enriched** in the state file: + ```bash + ctx system mark-journal enriched + ``` +7. Remind user to rebuild: `ctx journal site --build` or `make journal` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md index 0d0edde19..3814cabb7 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-loop/SKILL.md @@ -1,62 +1,108 @@ --- name: ctx-loop -description: "Generate autonomous iteration loop scripts for headless AI tool runs with configurable completion signals." +description: "Generate a shell script for running AI tools in autonomous iteration loops. Use when setting up unattended iteration, headless agent runs, or CI-driven AI workflows." tools: [bash, read, write] --- -Generate shell scripts for autonomous AI iteration loops. +Generate a ready-to-use autonomous loop shell script. + +## Before Generating + +1. **Check for existing loop script**: look for `loop.sh` in the + project root; confirm before overwriting +2. **Verify PROMPT.md exists**: the generated script defaults to + reading `PROMPT.md`; if missing, ask the user what prompt file + to use +3. **Verify the context directory exists**: the loop pattern depends + on persistent context; run `ctx init` first if needed ## When to Use -- Setting up CI-driven AI workflows -- When a task needs autonomous iteration with checks -- For batch processing with verification gates +- When setting up a project for autonomous iteration +- When the user wants to run unattended AI development +- When switching AI tools (e.g., Claude to Aider) and need a + new loop script +- When customizing loop parameters (max iterations, completion + signal, prompt file) ## When NOT to Use -- Interactive work (just do it in the session) -- Simple single-run tasks -- When safety checks aren't defined +- For interactive pair-programming sessions (just use the AI + tool directly) +- When the user already has a working loop script and has not + asked for changes +- When the project lacks a context directory and `PROMPT.md` (set + those up first with `ctx init --ralph`) + +## Usage Examples + +```text +/ctx-loop +/ctx-loop --tool aider +/ctx-loop --prompt TASKS.md --max-iterations 10 +/ctx-loop --completion SYSTEM_BLOCKED --output my-loop.sh +``` + +## Flags + +| Flag | Short | Default | Purpose | +|--------------------|-------|--------------------|---------------------------------| +| `--prompt` | `-p` | `PROMPT.md` | Prompt file the loop reads | +| `--tool` | `-t` | `claude` | AI tool: claude, aider, generic | +| `--max-iterations` | `-n` | `0` (unlimited) | Stop after N iterations | +| `--completion` | `-c` | `SYSTEM_CONVERGED` | Signal that ends the loop | +| `--output` | `-o` | `loop.sh` | Output script filename | -## Process +## Supported Tools -### 1. Define the loop +| Tool | Command generated | +|-----------|--------------------------------------| +| `claude` | `claude --print "$(cat )"` | +| `aider` | `aider --message-file ` | +| `generic` | Template stub for custom AI CLI | -Gather: -- **Command**: what to run each iteration -- **Completion signal**: how to detect "done" (exit code, output pattern, file exists) -- **Max iterations**: safety limit (default: 10) -- **Checkpoint command**: what to run between iterations +## Completion Signals + +The loop watches AI output for these signals: + +| Signal | Meaning | +|----------------------|--------------------------------------| +| `SYSTEM_CONVERGED` | All tasks complete; loop exits | +| `SYSTEM_BLOCKED` | Needs human input; loop exits | +| `BOOTSTRAP_COMPLETE` | Initial scaffolding done; loop exits | + +## Execution + +```bash +ctx loop $ARGUMENTS +``` -### 2. Generate the script +The command writes a shell script (default `loop.sh`) and makes +it executable. Report the generated path and how to run it: ```bash -#!/bin/bash -set -euo pipefail -MAX_ITER=${1:-10} -for i in $(seq 1 "$MAX_ITER"); do - echo "=== Iteration $i/$MAX_ITER ===" - # Run the task - {command} - # Check completion - if {completion_check}; then - echo "✅ Complete after $i iterations" - exit 0 - fi - # Checkpoint - {checkpoint} -done -echo "❌ Max iterations reached" -exit 1 +chmod +x loop.sh # already done by ctx loop +./loop.sh ``` -### 3. Write and verify +## Safety Notes -Write to the requested location. Verify with `bash -n`. +- The generated script includes `set -e` and a 1-second sleep + between iterations to prevent runaway loops +- `--max-iterations` is strongly recommended for first runs; + suggest a reasonable default (e.g., 10) if the user omits it +- The script captures AI tool errors with `|| true` so one + failed iteration does not kill the loop +- Autonomous agents benefit from explicit reasoning prompts in + PROMPT.md: adding "think step-by-step before each change" + to the iteration prompt significantly improves accuracy and + reduces cascading mistakes in unattended runs ## Quality Checklist -- [ ] Max iterations has a sane default -- [ ] Completion signal is well-defined -- [ ] Script has `set -euo pipefail` -- [ ] Script passes `bash -n` syntax check +Before reporting success, verify: +- [ ] Generated script exists at the output path +- [ ] Script is executable +- [ ] Prompt file referenced in the script actually exists +- [ ] If `--max-iterations 0`, user is aware it runs until + a completion signal (warn them) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md index e17e48034..8b0e4ec4c 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-next/SKILL.md @@ -20,6 +20,13 @@ Analyze current tasks and recent session activity, then suggest - When actively mid-task (don't interrupt flow with suggestions) - When no context directory exists (nothing to analyze) +## Usage Examples + +```text +/ctx-next +/ctx-next (just finished the auth refactor) +``` + ## Process Do all of this **silently**: do not narrate the steps: @@ -29,7 +36,7 @@ Do all of this **silently**: do not narrate the steps: 2. **Check recent sessions** to understand what was just worked on and avoid suggesting already-completed work: ```bash - ctx recall list --limit 3 + ctx journal source --limit 3 ``` 3. **Read the most recent session file** (if any) to understand what was accomplished and what follow-up items were noted @@ -95,6 +102,41 @@ useful)* - If an in-progress task exists, it should almost always be recommendation #1 (don't abandon unfinished work) +## Examples + +### Good Output + +> ### Recommended Next +> +> **1. Fix `block-non-path-ctx` hook** `#priority:high` +> > Still open from yesterday's session. The hook is too +> > aggressive: it blocks `git -C path` commands that don't +> > invoke ctx. Quick fix, clears a blocker. +> +> **2. Add `Context.File(name)` method** `#priority:high` +> > Eliminates 10+ linear scan boilerplate instances across +> > 5 packages. High impact, low effort: good consolidation +> > target. +> +> **3. Topics system (T1.1)** `#priority:medium` +> > Journal site's most impactful remaining feature. Metadata +> > is already in place from the enrichment work. +> +> --- +> +> *Based on 24 pending tasks across 3 phases. Last session: +> doc-drift-cleanup (2026-02-11).* + +### Bad Output + +> "You have many tasks. Here are some options: +> - Do some stuff with hooks +> - Maybe work on tests +> - There's also some docs to write" + +(Too vague, no priorities, no rationale, no connection to +context.) + ## Quality Checklist Before presenting recommendations, verify: diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md index 9f4e3ccab..13511e724 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-pad/SKILL.md @@ -35,11 +35,15 @@ command. | "move entry 4 to the top" / "prioritize entry 4" | `ctx pad mv 4 1` | | "move entry 1 to the bottom" | `ctx pad mv 1 N` (where N = last position) | | "import my notes from notes.txt" | `ctx pad import notes.txt` | -| "import from stdin" / pipe into pad | `cmd | ctx pad import -` | +| "import from stdin" / pipe into pad | `cmd \| ctx pad import -` | | "export all blobs" / "extract blobs to DIR" | `ctx pad export [DIR]` | | "export blobs, overwrite existing" | `ctx pad export --force [DIR]` | | "merge entries from another pad" | `ctx pad merge FILE...` | | "merge with a different key" | `ctx pad merge --key /path/to/key FILE` | +| "show entries tagged later" / "filter by #later" | `ctx pad --tag later` | +| "show everything except #later" | `ctx pad --tag ~later` | +| "what tags do I have" / "list my tags" | `ctx pad tags` | +| "tag entry 5 as urgent" | `ctx pad edit 5 --tag urgent` | ## Execution @@ -112,6 +116,25 @@ ctx pad merge --key /path/to/other.key foreign.enc ctx pad merge --dry-run pad-a.enc pad-b.md ``` +**Filter by tag:** +```bash +ctx pad --tag later # entries with #later +ctx pad --tag ~later # entries WITHOUT #later +ctx pad --tag later --tag ci # entries with both (AND) +``` + +**List all tags:** +```bash +ctx pad tags +ctx pad tags --json +``` + +**Tag an entry:** +```bash +ctx pad edit 5 --tag urgent +ctx pad edit 5 --append "checked" --tag done # combine with other ops +``` + ## Interpreting User Intent When the user's intent is ambiguous: diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md index a0c85ddf4..08ecd1ce3 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-pause/SKILL.md @@ -1,47 +1,48 @@ --- name: ctx-pause -description: "Pause context nudge/reminder hooks for the session. Security hooks remain active." +description: "Pause context hooks for this session. Use when context nudges aren't needed for the current task." tools: [bash] --- -Temporarily pause context nudge and reminder hooks while keeping -security hooks active. +Pause all context nudge and reminder hooks for the current session. +Security hooks (dangerous command blocking) still fire. ## When to Use -- When hooks are too noisy during focused work -- When doing rapid iteration and nudges interrupt flow -- When the user says "pause hooks" or "too many reminders" +- User says "pause ctx", "pause context", "quiet mode" +- User says "stop the nudges", "too many reminders" +- Quick investigation or one-off task that doesn't need ceremonies +- User explicitly asks to reduce context overhead ## When NOT to Use -- At session start (hooks haven't fired yet) -- When the user wants to disable security hooks (not supported) +- User wants to silence a specific hook (use `ctx hook message edit` to + customize or silence individual hooks) +- User wants to permanently disable hooks (edit `.claude/settings.local.json`) +- Session involves real project work that benefits from persistence nudges -## Process +## Execution + +Run the pause command: ```bash -ctx system pause-hooks +ctx hook pause ``` -This suppresses: -- Ceremony checks (remember, wrap-up) -- Persistence nudges -- Task completion checks -- Journal reminders - -This does NOT suppress: -- Dangerous command blocking -- Context load gate -- Version checks - -## Resuming +Then confirm to the user: -Use `ctx-resume` to re-enable hooks, or they automatically -resume at next session start. +> Context hooks paused for this session. Nudges, reminders, and ceremony +> prompts are silenced. Security hooks still fire. +> +> Resume anytime with `/ctx-resume`. -## Quality Checklist +## Important Notes -- [ ] User confirmed they want to pause -- [ ] Security hooks remain active -- [ ] Informed user how to resume +- **Session-scoped**: only affects the current session, not other terminals +- **Hooks still fire silently**: they check the pause flag and no-op +- **Graduated reminder**: a minimal `ctx:paused` indicator appears in hook + output so the state is never invisible +- **Resume before wrap-up**: if the session evolves into real work, resume + hooks before wrapping up to capture learnings and decisions +- **Initial context load is unaffected**: the ~8k token startup injection + happens before any command runs: pause only affects subsequent hooks diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md index c3547bb14..47850455e 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-prompt-audit/SKILL.md @@ -1,54 +1,158 @@ --- name: ctx-prompt-audit -description: "Analyze session transcripts to identify vague prompts that caused unnecessary clarification cycles." +description: "Audit prompting patterns. Use periodically to help users improve prompt quality and reduce clarification cycles." tools: [bash, read, write] --- -Analyze session history to find prompts that led to wasted work -due to ambiguity, and suggest improvements. +Analyze recent session transcripts to identify prompts that led to +unnecessary clarification back-and-forth. + +## Before Auditing + +1. **Check for session data**: look in the journal directory for + transcripts to analyze +2. **Need at least 3 sessions**: fewer than that gives too small a + sample; tell the user to try again later +3. **Confirm scope**: if the user specifies sessions or a date + range, use that; otherwise default to the 5 most recent ## When to Use -- After a session with many back-and-forth clarifications -- When improving prompt discipline -- During periodic workflow reviews +- Periodically to help users improve their prompting +- When the user asks for feedback on their prompting style +- After noticing many clarification cycles in recent sessions +- After a session with unusually high back-and-forth ## When NOT to Use -- No session history exists -- Sessions were straightforward +- Immediately after a user's first session (not enough data) +- When the user is frustrated; coaching lands poorly when someone + is already annoyed +- Unsolicited; only run when the user invokes it or explicitly + asks for feedback + +## Usage Examples + +```text +/ctx-prompt-audit +/ctx-prompt-audit --sessions 10 +/ctx-prompt-audit 2026-01-24 +``` + +## Data Sources + +Session transcripts are stored in the journal: + +| Source | Format | +|-------------------------|------------------------------------| +| Journal directory | Exported session journals (richer) | + +Journal entries contain full turn-by-turn conversation and are +the best source for pattern detection. ## Process -### 1. Load recent sessions +1. **Gather transcripts**: read 3-5 recent sessions from the + journal +2. **Extract user prompts**: isolate the human turns +3. **Identify vague prompts**: flag those that caused clarifying + questions (see criteria below) +4. **Cross-reference patterns**: look for repeated habits across + sessions, not one-off mistakes +5. **Generate coaching report**: use the output format below +6. **Present and discuss**: share the report, ask if the user + wants to dig into any example + +## What Makes a Prompt "Vague" + +Look for prompts where the agent asked clarifying questions +instead of acting: + +- **Missing file context**: "fix the bug" without specifying + which file or error +- **Ambiguous scope**: "optimize it" without what to optimize + or success criteria +- **Undefined targets**: "update the component" when multiple + components exist +- **Missing error details**: "it's not working" without symptoms +- **Vague action words**: "make it better", "clean this up" + +## Important Nuance + +Not every short prompt is vague. Consider context: +- "fix the bug" after discussing a specific error: **not vague** +- "fix the bug" as the first message: **vague** +- "same:" after a pattern is established: **not vague** (the + user set a convention and is being efficient) +- Shorthand that references shared context is good prompting, + not lazy prompting + +## Output Format + +```markdown +## Prompt Audit Report + +**Sessions analyzed**: 5 +**User prompts reviewed**: 47 +**Vague prompts found**: 4 (8.5%) -```bash -ctx recall list --limit 5 -``` +--- + +### Example 1: Missing File Context + +**Your prompt**: "fix the bug" -### 2. Scan for patterns +**What happened**: I had to ask which file and what error. -Look for: -- Multiple clarifying questions before work began -- Misunderstood instructions leading to rework -- Vague requests like "fix it" or "make it better" -- Missing context that was discovered mid-task +**Better prompt**: "fix the authentication error in +src/auth/login.ts where JWT validation fails with 401" + +--- -### 3. Categorize findings +## Patterns to Watch -| Pattern | Example | Improvement | -|---------|---------|-------------| -| Vague scope | "Fix the tests" | "Fix TestFoo in internal/cli — it's failing on empty input" | -| Missing context | "Add a feature" | "Add JSON output to ctx status (see spec in specs/)" | -| Ambiguous reference | "Update that file" | "Update internal/config/mcp/tool/tool.go" | +Based on your sessions, you tend to: +1. Skip mentioning file paths (3 occurrences) +2. Use "it" without establishing what "it" refers to + (2 occurrences) -### 4. Present recommendations +## What You Do Well + +- You provide error output when debugging (4 of 5 sessions) +- You reference specific files by path in most prompts + +## Tips + +- Start prompts with the **file path** when discussing + specific code +- Include **error messages** when debugging +- Specify **success criteria** for optimization tasks +``` -Provide actionable suggestions for clearer prompts. +## Guidelines + +- **Constructive, not critical**: frame suggestions as + improvements, not corrections +- **Show actual prompts**: quote from their sessions so + examples are concrete, not hypothetical +- **Explain the consequence**: what happened because the prompt + was vague (extra round-trip, wrong file edited, etc.) +- **Provide rewrites**: show a concrete better alternative for + each example +- **Acknowledge strengths**: include a "What You Do Well" + section; people learn better when not purely criticized +- **Look for patterns**: one vague prompt is noise; three of the + same kind is a habit worth addressing +- **End with actionable tips**: 3-5 specific, memorable tips ## Quality Checklist -- [ ] At least 3 sessions analyzed -- [ ] Patterns categorized with examples -- [ ] Concrete improvements suggested -- [ ] No session data exposed inappropriately +Before presenting the report, verify: +- [ ] At least 3 sessions were analyzed (not a tiny sample) +- [ ] Every "vague" example includes the actual quoted prompt +- [ ] Every example has a concrete rewrite (not just "be more + specific") +- [ ] Context was considered (short != vague) +- [ ] Report includes positive observations, not just criticism +- [ ] Tips are specific to this user's patterns, not generic + advice diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md index 2ec6e56b9..609bb20f9 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-recall/SKILL.md @@ -4,53 +4,53 @@ description: "Browse session history. Use when referencing past discussions or f tools: [bash] --- -Browse, inspect, and export AI session history. +Browse, inspect, and import AI session history. ## When to Use - When the user asks "what did we do last time?" - When looking for context from previous work sessions -- When exporting sessions to the journal for enrichment +- When importing sessions to the journal for enrichment - When searching for a specific session by topic or date ## When NOT to Use - When the user just wants current context (use `ctx-status` or `ctx-agent` instead) -- For modifying session content (recall is read-only) +- For modifying session content (browsing is read-only) ## Subcommands -### `ctx recall list` +### `ctx journal source` ```bash -ctx recall list --limit 5 +ctx journal source --limit 5 ``` -### `ctx recall show` +### `ctx journal source --show` / `--latest` ```bash -ctx recall show -ctx recall show --latest +ctx journal source --show +ctx journal source --latest ``` -### `ctx recall export` +### `ctx journal import` ```bash -ctx recall export --all # Export new sessions only -ctx recall export --all --regenerate # Re-export all +ctx journal import --all # Import new sessions only +ctx journal import --all --regenerate # Re-import all ``` ## Typical Workflows **"What did we work on recently?"** ```bash -ctx recall list --limit 5 +ctx journal source --limit 5 ``` -**"Export everything to the journal"** +**"Import everything to the journal"** ```bash -ctx recall export --all +ctx journal import --all ``` Then suggest `ctx-journal-enrich-all` for enrichment. @@ -59,4 +59,4 @@ Then suggest `ctx-journal-enrich-all` for enrichment. - [ ] Used the right subcommand for user intent - [ ] Applied filters if user mentioned project, date, or topic -- [ ] For export, mentioned the normalize/enrich pipeline +- [ ] For import, mentioned the normalize/enrich pipeline diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md index 5f83f3ef0..185cc3330 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-reflect/SKILL.md @@ -9,67 +9,117 @@ accomplished and identify context worth persisting. ## When to Use -- At natural breakpoints (feature complete, bug fixed, task done) +- At natural breakpoints (feature complete, bug fixed, task + done) - After unexpected behavior or a debugging detour - When shifting from one task to a different one -- When the session may end soon -- When the user explicitly asks to reflect +- When context is getting full and the session may end soon +- When the user explicitly asks to reflect or wrap up ## When NOT to Use -- At the very start of a session (nothing to reflect on) +- At the very start of a session (nothing to reflect on yet) - After trivial changes (a typo fix does not need reflection) -- When the user is in flow: do not interrupt +- When the user is in flow and has not paused; do not interrupt + with unsolicited reflection + +## Usage Examples + +```text +/ctx-reflect +/ctx-reflect (after fixing the auth bug) +``` ## Reflection Checklist -Step back and reason through the session as a whole before -listing items. +Before listing items, step back and reason through the session +as a whole: what was the arc, what surprised you, what would +you do differently? This framing surfaces insights that a +mechanical checklist misses. + +Work through each category. Skip categories with nothing +to report; do not force empty sections. ### 1. Learnings -- Did we discover any gotchas or unexpected behavior? -- Did we learn something about the codebase or tools? +- Did we discover any gotchas, bugs, or unexpected behavior? +- Did we learn something about the codebase, tools, or + patterns? - Would this help a future session avoid problems? -- Is it specific to this project? +- Is it specific to this project? (General knowledge does not + belong in LEARNINGS.md) ### 2. Decisions - Did we make any architectural or design choices? - Did we choose between alternatives? What was the trade-off? -- Should the rationale be captured? +- Should the rationale be captured for future sessions? ### 3. Tasks - Did we complete any tasks? (Mark done in TASKS.md) -- Did we start any tasks not yet finished? -- Should new tasks be added for follow-up work? +- Did we start any tasks that are not yet finished? +- Should new tasks be added for follow-up work discovered + during this session? ### 4. Session Notes - Was this a significant session worth a full snapshot? -- Are there open threads a future session needs to pick up? +- Would a future session benefit from the discussion context? +- Are there open threads that a future session needs to pick + up? ## Output Format +After reflecting, provide: + 1. **Summary**: what was accomplished (2-3 sentences) 2. **Suggested persists**: list what should be saved, with - the specific command for each item + the specific command or file for each item 3. **Offer**: ask the user which items to persist +### Good Example + +> This session implemented the cooldown mechanism for +> `ctx agent` and updated all related docs. We discovered +> that `$PPID` in hook context resolves to the Claude Code +> process PID, which is unique per session. +> +> I'd suggest persisting: +> - **Learning**: `$PPID` in PreToolUse hooks resolves to +> the Claude Code PID (unique per session) +> `ctx add learning "Title" --session-id ID --branch BR --commit HASH --context "..." --lesson "..." --application "..."` +> - **Task**: mark "Add cooldown to ctx agent" as done +> - **Decision**: tombstone-based cooldown with 10m default +> `ctx add decision "Title" --session-id ID --branch BR --commit HASH --context "..." --rationale "..." --consequence "..."` +> +> Want me to persist any of these? + +### Bad Examples + +- "We did some stuff. Want me to save it?" (too vague; + no specific items or commands) +- Listing 10 trivial learnings that are general knowledge + (only project-specific insights belong) +- Persisting without asking (always get user confirmation) + ## Persistence Commands -| What to persist | Command | -|------------------|-----------------------------------------------------------------------| -| Learning | `ctx add learning --context "..." --lesson "..." --application "..."` | -| Decision | `ctx add decision "..."` | -| Task completed | Edit TASKS.md directly | -| New task | `ctx add task "..."` | +| What to persist | Command | +|------------------|----------------------------------------------------------------------------------------------------------------------------| +| Learning | `ctx add learning "Title" --session-id ID --branch BR --commit HASH --context "..." --lesson "..." --application "..."` | +| Decision | `ctx add decision "Title" --session-id ID --branch BR --commit HASH --context "..." --rationale "..." --consequence "..."` | +| Task completed | Edit TASKS.md directly | +| New task | `ctx add task "Description" --session-id ID --branch BR --commit HASH` | ## Quality Checklist -- [ ] Every suggested persist has a concrete command +Before presenting the reflection, verify: +- [ ] Every suggested persist has a concrete command or file + path (not just "save the learning") - [ ] Learnings are project-specific, not general knowledge -- [ ] Decisions include trade-off rationale -- [ ] No empty checklist categories -- [ ] User is asked before anything is persisted +- [ ] Decisions include the trade-off rationale, not just + the choice +- [ ] No empty checklist categories (skip what has nothing + to report) +- [ ] The user is asked before anything is persisted diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md index a0b8f991b..9ea4046be 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md @@ -19,17 +19,22 @@ tracking, then there will be something to remember." or any memory-related question - At the start of a session when context is not yet loaded - When context seems lost or stale mid-session +- When the user asks about previous work, decisions, or learnings ## When NOT to Use -- Context was already loaded this session: don't re-fetch -- Mid-session when actively working and context is fresh -- When asking about a specific past session by name: use - `ctx-recall` instead +- Context was already loaded this session via `/ctx-agent`: don't + re-fetch what you already have +- Mid-session when you are actively working on a task and context + is fresh: don't interrupt flow +- When the user is asking about a *specific* past session by name + or ID: use `/ctx-history` instead, which has list/show/export + subcommands ## Process -Do all of this **silently** — no narration of the steps: +Do all of this **silently**: narrating the steps makes the readback +feel like a file search rather than genuine recall: 1. **Load context packet**: ```bash @@ -39,31 +44,108 @@ Do all of this **silently** — no narration of the steps: section (TASKS.md, DECISIONS.md, LEARNINGS.md, etc.) 3. **List recent sessions**: ```bash - ctx recall list --limit 3 + ctx journal source --limit 3 ``` -4. **Present the structured readback** +4. **Present the structured readback** (see format below) ## Readback Format -**Last session**: Topic, date, and what was accomplished. +Present your findings as a structured readback with these sections: -**Active work**: Pending and in-progress tasks from TASKS.md. +**Last session**: Topic, date, and what was accomplished. Cite the +most recent session from the session list. -**Recent context**: 1-2 recent decisions or learnings. +**Active work**: Pending and in-progress tasks from TASKS.md. Use +a brief list: one line per task with its status. -**Next step**: Suggest what to work on next or ask for direction. +**Recent context**: 1-2 recent decisions or learnings that are +relevant. Pick the most recent or most impactful. + +**Next step**: Suggest what to work on next based on the active +tasks, or ask the user for direction if priorities are unclear. ## Readback Rules -- Open directly with the readback: not "I don't have memory" -- Skip preamble like "Let me check": go straight to readback -- Present findings as recall, not discovery -- Be honest about the mechanism only if explicitly asked +- Open directly with the readback: instead of "I don't have memory", + present what you found +- Skip preamble like "Let me check": go straight to the structured + readback +- Present findings as recall, not discovery: you are *remembering*, + not *searching* +- Be honest about the mechanism only if the user explicitly asks + *how* you remember (e.g., "It's stored in context files managed + by ctx") + +## Examples + +### Good Readback + +> **Last session** (2026-02-07): We implemented the cooldown +> mechanism for `ctx agent` to prevent redundant context loads. +> +> **Active work**: +> - [ ] Add `--format json` flag to `ctx status` (pending) +> - [x] Implement session cooldown (done) +> - [ ] Write integration tests for journal import (in progress) +> +> **Recent context**: +> - Decided to use file-based cooldown tokens instead of +> environment variables (simpler, works across shells) +> - Learned that Claude Code hooks run in a subprocess, so env +> vars set in hooks don't persist to the main session +> +> **Next step**: The integration tests for journal import are +> partially done. Want to continue those, or shift to the JSON +> status flag? + +### Bad Readback (Anti-patterns) + +> "I don't have persistent memory, but let me check if there +> are any context files..." + +> "Let me look at the context files to see what's there. +> I found TASKS.md, let me read it..." + +> "I found some session files. Here's what they contain..." + +## Companion Tool Check + +After presenting the readback, check companion tool availability. +Skip this section entirely if `companion_check: false` is set in +`.ctxrc`: check by running `ctx config status` and looking for +the field value. + +**Companion tools** enhance ctx skills with web search and code +intelligence. They are optional but recommended: + +| Tool | Purpose | Smoke test | +|---------------|--------------------------------------------------------|----------------------------------------------------------------------| +| Gemini Search | Grounded web search with citations | Call `mcp__gemini-search__search_with_grounding` with a simple query | +| GitNexus | Code knowledge graph (symbols, blast radius, clusters) | Call `mcp__gitnexus__list_repos` | + +**Check procedure:** + +1. Attempt each smoke test silently +2. For tools that respond: note as available (no output needed) +3. For tools that fail or are not connected: append a brief note + after the readback: + > "Companion tools: Gemini Search is not connected (web search + > will fall back to built-in). Install via MCP settings if + > needed." +4. For GitNexus specifically: if it responds but the current repo + is not indexed or the index is stale, suggest: + > "GitNexus index is stale: run `npx gitnexus analyze` to + > rehydrate." + +Present companion status as a one-line note after the readback, +not a separate section. If everything is healthy, say nothing. ## Quality Checklist -- [ ] Context packet was loaded +Before presenting the readback, verify: +- [ ] Context packet was loaded (not skipped) - [ ] Files from the read order were actually read - [ ] Structured readback has all four sections -- [ ] No narration of the discovery process +- [ ] No narration of the discovery process leaked into output - [ ] Readback feels like recall, not a file system tour +- [ ] Companion tool check ran (unless suppressed via .ctxrc) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md index 1d2c9b806..ede708935 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-remind/SKILL.md @@ -4,43 +4,85 @@ description: "Manage session reminders. Use when the user says 'remind me to...' tools: [bash] --- -Manage session-scoped reminders via `ctx remind` commands. +Manage session-scoped reminders via `ctx remind` commands using +natural language. Translate what the user says into the right +command. ## When to Use - User says "remind me to..." or "remind me about..." - User asks "what reminders do I have?" - User wants to dismiss or clear reminders +- User mentions reminders surfaced at session start ## When NOT to Use - For structured tasks with status tracking (use `ctx add task`) - For sensitive values or quick notes (use `ctx pad`) -- Create a reminder only when the user explicitly says "remind me" +- For architectural decisions (use `ctx add decision`) +- Create a reminder only when the user explicitly says "remind me": + for everything else, let the conversation proceed without creating records ## Command Mapping -| User intent | Command | -|----------------------------------|--------------------------------------------| -| "remind me to refactor swagger" | `ctx remind "refactor swagger"` | -| "remind me tomorrow to check CI" | `ctx remind "check CI" --after YYYY-MM-DD` | -| "what reminders do I have?" | `ctx remind list` | -| "dismiss reminder 3" | `ctx remind dismiss 3` | -| "clear all reminders" | `ctx remind dismiss --all` | +| User intent | Command | +|--------------------------------------|-----------------------------------------------| +| "remind me to refactor swagger" | `ctx remind "refactor swagger"` | +| "remind me tomorrow to check CI" | `ctx remind "check CI" --after YYYY-MM-DD` | +| "remind me next week to review auth" | `ctx remind "review auth" --after YYYY-MM-DD` | +| "what reminders do I have?" | `ctx remind list` | +| "dismiss reminder 3" | `ctx remind dismiss 3` | +| "clear all reminders" | `ctx remind dismiss --all` | + +## Execution + +**Add a reminder:** +```bash +ctx remind "refactor the swagger definitions" +``` + +**Add with date gate:** +```bash +ctx remind "check CI after the deploy" --after 2026-02-25 +``` + +**List reminders:** +```bash +ctx remind list +``` + +**Dismiss by ID:** +```bash +ctx remind dismiss 3 +``` + +**Dismiss all:** +```bash +ctx remind dismiss --all +``` ## Natural Language Date Handling -The CLI only accepts `YYYY-MM-DD` for `--after`. Convert natural -language dates to this format: +The CLI only accepts `YYYY-MM-DD` for `--after`. You must convert +natural language dates to this format. + +| User says | You run | +|--------------------------|---------------------------------------------------------| +| "remind me next session" | `ctx remind "..."` (no `--after`) | +| "remind me tomorrow" | `ctx remind "..." --after YYYY-MM-DD` (tomorrow's date) | +| "remind me next week" | `ctx remind "..." --after YYYY-MM-DD` (7 days from now) | +| "remind me about X" | `ctx remind "X"` (no `--after`, immediate) | +| "remind me after Friday" | `ctx remind "..." --after YYYY-MM-DD` (next Saturday) | -| User says | You run | -|----------------|-------------------------------------------------| -| "next session" | `ctx remind "..."` (no `--after`) | -| "tomorrow" | `ctx remind "..." --after YYYY-MM-DD` | -| "next week" | `ctx remind "..." --after YYYY-MM-DD` (+7 days) | +If the date is ambiguous (e.g., "after the release"), ask the user +for a specific date. ## Important Notes - Reminders fire **every session** until dismissed: no throttle -- The `--after` flag gates when a reminder starts appearing +- The `--after` flag gates when a reminder starts appearing, not when + it expires +- IDs are never reused: after dismissing ID 3, the next gets ID 4+ - Reminders are stored in `.context/reminders.json` (committed to git) +- After creating or dismissing, show the command output so the user + can confirm the action diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md index 6a60267c3..99c39b5b6 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-resume/SKILL.md @@ -1,35 +1,38 @@ --- name: ctx-resume -description: "Resume context hooks after a pause." +description: "Resume context hooks after a pause. Use when the user says 'resume ctx', 'unpause', 'turn nudges back on', or when transitioning from a quick task back to project work." tools: [bash] --- -Re-enable context hooks that were paused with `ctx-pause`. +Resume all context hooks after a `/ctx-pause`. Restores normal nudge, +reminder, and ceremony behavior. ## When to Use -- After a focused work period where hooks were paused -- When the user is ready for nudges and reminders again -- When the user says "resume hooks" +- User says "resume ctx", "resume context", "unpause" +- User says "turn nudges back on" +- Session has evolved from a quick task into real project work +- Before running `/ctx-wrap-up` (wrap-up needs hooks active) ## When NOT to Use -- Hooks are not currently paused -- At session start (hooks auto-resume) +- Session is not paused (resume is a silent no-op, but don't confuse the user) +- User wants to restart or reset the session (just start a new session) -## Process +## Execution + +Run the resume command: ```bash -ctx system resume-hooks +ctx hook resume ``` -This re-enables all non-security hooks: -- Ceremony checks -- Persistence nudges -- Task completion checks -- Journal reminders +Then confirm to the user: + +> Context hooks resumed. Nudges, reminders, and ceremonies are active again. -## Quality Checklist +## Important Notes -- [ ] Hooks were actually paused before resuming -- [ ] Confirmed hooks are active again +- **Silent no-op if not paused**: safe to run even if hooks aren't paused +- **Turn counter resets**: the graduated reminder counter starts fresh if + you pause again later diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md index fbe310653..3f8b55f7d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-skill-audit/SKILL.md @@ -1,78 +1,237 @@ --- name: ctx-skill-audit -description: "Audit skills against prompting best practices. Check for quality, consistency, and common anti-patterns." +description: "Audit skills against Anthropic prompting best practices. Use when reviewing skill quality, after creating or modifying a skill, before releasing skills, or when a skill produces inconsistent results. Also use when the user says 'audit this skill', 'check skill quality', 'review the skills', or 'are our skills any good?'" tools: [bash, read, glob, grep] --- -Audit skill files for quality, consistency, and adherence to -prompting best practices. +Audit one or more skills against Anthropic's prompting best +practices. The goal is to find patterns that degrade skill +effectiveness with current Claude models and suggest concrete +improvements. ## When to Use -- After creating or modifying skills -- During periodic quality reviews -- When skills seem to underperform +- After creating or modifying a skill (quality gate) +- Reviewing all skills before a release (batch audit) +- When a skill produces inconsistent or poor results +- When skills were written for older models and may need + calibration for Claude 4.5/4.6 -## When NOT to Use +## Before Auditing -- No skills exist yet -- Just after a fresh skill creation (let it settle first) +1. Read `references/anthropic-best-practices.md` from this + skill's directory: it contains the condensed audit criteria. +2. Identify which skill(s) to audit. If the user names a + specific skill, audit that one. If they say "audit all + skills," plan a batch pass. +3. For bundled skills, read from + `internal/assets/claude/skills/*/SKILL.md`. + For live skills, read from `.claude/skills/*/SKILL.md`. ## Audit Dimensions -### 1. Positive framing -Instructions should say what TO do, not just what NOT to do. +Apply these checks to each skill. Each dimension maps to a +section in the best practices reference. + +### 1. Positive Framing + +Scan for negative instructions ("don't", "never", "avoid", +"do not") that lack a positive counterpart. Every negative +should be paired with what the agent *should* do instead. + +**Pass:** negative instructions are supplements to clear +positive guidance. +**Fail:** primary instructions are negative, leaving the +agent to guess the desired behavior. + + + +Do not create new files. Do not modify tests. Do not add +comments. + + +Edit only the files specified in the task. Preserve existing +tests and comments: add new ones only when the user requests +them. + + + +### 2. Motivation Over Mandates + +Check for MUST, NEVER, ALWAYS, CRITICAL used as emphasis +without explaining *why* the rule matters. Claude 4.5/4.6 +responds better to reasoning than rigid directives. + +**Pass:** important instructions include motivation ("because +X" or "so that Y") that lets the model generalize. +**Fail:** instructions rely on emphasis alone to convey +importance. + + + +You MUST ALWAYS run tests before reporting completion. + + +Run tests before reporting completion: untested changes +create silent regressions that compound across sessions. + + + +### 3. XML Tag Structure + +Check whether the skill mixes instructions with variable +content (file paths, user input, injected code) without +clear delimiters. XML tags prevent the model from confusing +injected content with skill instructions. + +**Pass:** variable content is wrapped in descriptive tags, +or the skill doesn't inject variable content. +**Fail:** the skill templates in external content alongside +instructions without delimiters. + +### 4. Few-Shot Examples + +Check whether non-trivial behaviors (output formats, decision +logic, style requirements) are demonstrated with examples. +Skills that describe complex output without showing it drift +over time. + +**Pass:** key behaviors have at least one good/bad example +pair, or the behavior is simple enough that examples would +be redundant. +**Fail:** the skill describes a specific output format or +decision process but provides no examples. + +### 5. Subagent Guard + +If the skill spawns or encourages spawning subagents (via the +Agent tool), check that it states when subagents are and +aren't warranted. Claude Opus 4.6 over-delegates to subagents +when a direct tool call would be faster. + +**Pass:** subagent usage has explicit scope (when to use, +when not to), or the skill doesn't involve subagents. +**Fail:** the skill defaults to subagent delegation without +stating when direct execution is preferable. + +### 6. Overtriggering Calibration + +Check for language written to combat undertriggering in older +models that may cause overtriggering in Claude 4.5/4.6: +excessive caps emphasis (CRITICAL, MUST), redundant capability +statements ("You are an expert"), or aggressive always/never +framing. + +**Pass:** instructions use natural language with emphasis +reserved for genuinely critical points. +**Fail:** the skill reads like it was written for a less +capable model that needed constant nudging. + +### 7. Phantom References + +Every file path, tool name, and command referenced in the +skill must exist. Broken references are a form of hallucination +in the skill itself. + +**Pass:** all references resolve to real files/tools. +**Fail:** the skill mentions files or commands that don't +exist. + +### 8. Scope Discipline + +Check whether the skill encourages work beyond what's +requested: "while you're in there" improvements, unsolicited +refactoring, or scope creep. Skills should state the minimum +viable outcome. + +**Pass:** the skill's scope matches its stated purpose. +**Fail:** the skill encourages additional work beyond its +core task. + +### 9. Description Trigger Quality + +The `description` field determines when the skill activates. +Check that it: +- Covers concrete trigger situations and user phrases +- Includes synonyms and related concepts +- Is specific enough to avoid false triggers +- Is "pushy" enough to avoid undertriggering + +**Pass:** reading the description alone, you'd know exactly +when to use this skill. +**Fail:** the description is vague ("use for general tasks") +or too narrow (misses common phrasings). -### 2. Motivation over mandates -Explain WHY a rule exists, not just the rule. - -### 3. Structure -Uses clear sections: When to Use, When NOT to Use, Process, -Quality Checklist. - -### 4. Examples -Includes good and bad examples for clarity. +## Process -### 5. Scope -Skill is focused on one task, not a catch-all. +### Single Skill Audit -### 6. Description quality -Frontmatter description is clear and actionable. +1. Read the skill's SKILL.md. +2. Apply all 9 audit dimensions. +3. Report findings using the output format below. +4. Suggest specific rewrites for any failures: show the + current text and the proposed replacement. -### 7. Overtriggering guard -"When NOT to Use" section prevents false activations. +### Batch Audit -### 8. Phantom references -No references to tools, files, or commands that don't exist. +1. List all skills to audit (bundled, live, or both). +2. Audit each skill directly in the main conversation: + spawning one subagent per skill adds latency and context + overhead that outweighs parallelism for typical batch sizes. +3. Report concisely: only dimensions that fail or have notable + findings. +4. Summarize with a scorecard at the end. -### 9. Tool declarations -Tools listed in frontmatter match what the skill actually uses. +## Output Format -## Process +For each audited skill, report: -1. Glob all skill files: `internal/assets/**/skills/*/SKILL.md` -2. Read each skill -3. Score against the 9 dimensions (pass/fail/partial) -4. Report findings per skill with actionable fixes +``` +### /skill-name + +**Overall:** X/9 pass + +| # | Dimension | Result | Notes | +|---|------------------------|--------|--------------------------| +| 1 | Positive framing | pass | | +| 2 | Motivation over mandates | fail | 3 bare MUST/NEVER found | +| 3 | XML tag structure | pass | | +| 4 | Few-shot examples | fail | No output format example | +| 5 | Subagent guard | n/a | No subagent usage | +| 6 | Overtriggering | pass | | +| 7 | Phantom references | pass | | +| 8 | Scope discipline | pass | | +| 9 | Description quality | warn | Missing synonym coverage | + +**Suggested fixes:** +- [Dimension 2] Line "You MUST ALWAYS run tests" → + "Run tests before completion: untested changes create + silent regressions." +- [Dimension 4] Add example showing expected output format + after the "Report results" section. +``` -## Output Format +For batch audits, end with a summary: ``` -## Skill Audit Report +## Batch Summary -| Skill | Score | Issues | -|-------|-------|--------| -| ctx-implement | 8/9 | Missing bad example | -| ctx-commit | 9/9 | Clean | -| ctx-reflect | 7/9 | Phantom ref to /ctx-update-docs | - -### Details -... +| Skill | Score | Top Issue | +|--------------------|-------|--------------------------| +| ctx-commit | 8/9 | Missing example | +| ctx-drift | 7/9 | 2 bare mandates | +| ctx-verify | 9/9 | - | ``` ## Quality Checklist -- [ ] All skill files scanned -- [ ] Each dimension checked per skill -- [ ] Actionable fixes provided for failures -- [ ] No false positives (verify references exist) +Before reporting audit results: + +- [ ] Read the best practices reference before starting +- [ ] Applied all 9 dimensions (mark n/a where inapplicable) +- [ ] Every "fail" has a specific suggested rewrite, not just + a description of the problem +- [ ] Phantom reference check actually verified file existence + (used Glob/Read, not assumption) +- [ ] Description quality check considered real user phrases, + not hypothetical ones diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md index a8fc9445a..95cce7c0d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-spec/SKILL.md @@ -1,76 +1,110 @@ --- name: ctx-spec -description: "Scaffold a feature spec from the project template. Use when planning a new feature or when a task references a missing spec." +description: "Scaffold a feature spec from the project template. Use when planning a new feature, writing a design document, or when a task references a missing spec." tools: [bash, read, write] --- -Scaffold a new spec from `specs/tpl/spec-template.md` and walk -through each section with the user. +Scaffold a new spec from `specs/tpl/spec-template.md` and walk through +each section with the user to produce a complete design document. ## When to Use - Before implementing a non-trivial feature -- When a task says "Spec: `specs/X.md`" and the file doesn't exist -- When `ctx-brainstorm` produced a validated design that needs +- When a task says "Spec: `specs/X.md`" and the file does not exist +- When `/ctx-brainstorm` has produced a validated design that needs a written artifact -- When the user says "let's spec this out" +- When the user says "let's spec this out" or "write a spec for..." ## When NOT to Use -- Bug fixes or small changes +- Bug fixes or small changes (just do them) - When a spec already exists (read it instead) -- When the design is still vague (use `ctx-brainstorm` first) +- When the design is still vague (use `/ctx-brainstorm` first) + +## Usage Examples + +```text +/ctx-spec +/ctx-spec (session checkpointing) +/ctx-spec (rss feed generation) +``` ## Process ### 1. Gather the Feature Name -If not provided, ask. Derive filename: lowercase, hyphens. -Target: `specs/{feature-name}.md` +If not provided as an argument, ask: +> "What feature should this spec cover?" -### 2. Read the Template +Derive the filename: lowercase, hyphens, no spaces. +Target path: `specs/{feature-name}.md` -Read `specs/tpl/spec-template.md`. +If the file already exists, warn and offer to review it instead. -### 3. Walk Through Sections +### 2. Read the Template -Work through each section **one at a time**: +Read `specs/tpl/spec-template.md` to get the current structure. -| Section | Prompt | -|----------------------|---------------------------------------------------------------| -| **Problem** | "What user-visible problem does this solve? Why now?" | -| **Approach** | "How does this work? Where does it fit?" | -| **Happy Path** | "Walk through what happens when everything goes right." | -| **Edge Cases** | "What could go wrong? (empty input, failures, duplicates)" | -| **Validation Rules** | "What input constraints are enforced?" | -| **Error Handling** | "For each error: user message and recovery?" | -| **Interface** | "CLI command? Skill? Both? Flags?" | -| **Implementation** | "Which files change? Key functions? Helpers to reuse?" | -| **Configuration** | "Any .ctxrc keys, env vars, or settings?" | -| **Testing** | "Unit, integration, edge case tests?" | -| **Non-Goals** | "What does this intentionally NOT do?" | +### 3. Walk Through Sections -**Spend extra time on Edge Cases and Error Handling.** +Work through each section **one at a time**. For each section: + +1. Explain what belongs there (one sentence) +2. Ask the user for input or propose content based on context +3. Write their answer into the section +4. Move to the next section + +**Section order and prompts:** + +| Section | Prompt | +|----------------------|----------------------------------------------------------------------------------------------------| +| **Problem** | "What user-visible problem does this solve? Why now?" | +| **Approach** | "High-level: how does this work? Where does it fit?" | +| **Happy Path** | "Walk me through what happens when everything goes right." | +| **Edge Cases** | "What could go wrong? Think: empty input, partial failure, duplicates, concurrency, missing deps." | +| **Validation Rules** | "What input constraints are enforced? Where?" | +| **Error Handling** | "For each error condition: what message does the user see? How do they recover?" | +| **Interface** | "CLI command? Skill? Both? What flags?" | +| **Implementation** | "Which files change? Key functions? Existing helpers to reuse?" | +| **Configuration** | "Any .ctxrc keys, env vars, or settings?" | +| **Testing** | "Unit, integration, edge case tests?" | +| **Non-Goals** | "What does this intentionally NOT do?" | + +**Spend extra time on Edge Cases and Error Handling.** These are +where specs earn their value. Push for at least 3 edge cases and +their expected behaviors. Do not accept "none" without challenge. ### 4. Open Questions -After all sections: +After all sections, ask: > "Anything unresolved? If not, I'll remove the Open Questions > section." ### 5. Write the Spec -Write to `specs/{feature-name}.md`. +Write the completed spec to `specs/{feature-name}.md`. ### 6. Cross-Reference -- If a Phase exists in TASKS.md, confirm the path matches -- If no tasks exist, offer to create them +- If a Phase exists in TASKS.md referencing this spec, confirm + the path matches +- If no tasks exist yet, offer to create them: + > "Want me to break this into tasks in TASKS.md?" + +## Skipping Sections + +Not every spec needs every section. If a section clearly does not +apply (e.g., no CLI for an internal refactor), the user can say +"skip" and the section is omitted entirely: not left with +placeholder text. ## Quality Checklist +Before writing the file, verify: + - [ ] Problem section explains *why*, not just *what* -- [ ] At least 3 edge cases with expected behavior -- [ ] Error handling has user messages and recovery -- [ ] Non-goals are explicit -- [ ] No placeholder text remains +- [ ] At least 3 edge cases enumerated with expected behavior +- [ ] Error handling has user-facing messages and recovery steps +- [ ] Non-goals are explicit (prevents scope creep later) +- [ ] No placeholder `...` text remains +- [ ] Filename matches the convention: `specs/{feature-name}.md` diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md index 14eca349e..124bc9610 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-status/SKILL.md @@ -10,21 +10,65 @@ and recent activity. ## When to Use - At session start to orient before doing work -- When confused about what's being worked on +- When confused about what is being worked on or what context + exists - To check token usage and context health - When the user asks "what's the state of the project?" ## When NOT to Use -- When you already loaded context via `ctx-agent` in this session -- Repeatedly within the same session without changes +- When you already loaded context via `/ctx-agent` in this + session (status is a subset of what agent provides) +- Repeatedly within the same session without changes in between + +## Usage Examples + +```text +/ctx-status +/ctx-status --verbose +/ctx-status --json +``` ## Flags -| Flag | Default | Purpose | -|-------------|---------|--------------------------------| -| `--json` | false | Output as JSON (for scripting) | -| `--verbose` | false | Include file content previews | +| Flag | Short | Default | Purpose | +|-------------|-------|---------|----------------------------------| +| `--json` | | false | Output as JSON (for scripting) | +| `--verbose` | `-v` | false | Include file content previews | + +## What It Shows + +The output has three sections: + +### 1. Overview + +- Context directory path +- Total file count +- Token estimate (sum across all `.md` files in the context directory) + +### 2. Files + +Each `.md` file in the context directory with: + +| Indicator | Meaning | +|-----------|-----------------------------------------| +| check | File has content (loaded) | +| circle | File exists but is empty | + +File-specific summaries: +- `CONSTITUTION.md`: number of invariants +- `TASKS.md`: active and completed task counts +- `DECISIONS.md`: number of decisions +- `GLOSSARY.md`: number of terms +- Others: "loaded" or "empty" + +With `--verbose`: adds token count, byte size, and a 3-line +content preview per file. + +### 3. Recent Activity + +The 3 most recently modified files with relative timestamps +(e.g., "5 minutes ago", "2 hours ago"). ## Execution @@ -32,23 +76,25 @@ and recent activity. ctx status ``` -After running, summarize the key points: +After running, summarize the key points for the user: - How many active tasks remain -- Whether any context files are empty -- Token budget usage -- What was recently modified +- Whether any context files are empty (might need populating) +- Token budget usage (is context lean or bloated?) +- What was recently modified (gives a sense of momentum) ## Interpreting Results -| Observation | Suggestion | -|-------------------------|-------------------------------------------------| -| Many empty files | Populate core files (TASKS, CONVENTIONS) | -| High token count (>30k) | Consider `ctx compact` or archiving tasks | -| No recent activity | Context may be stale; check if files need update | -| TASKS.md has 0 active | All work done, or tasks need to be added | +| Observation | Suggestion | +|-------------------------|-------------------------------------------------------------| +| Many empty files | Context is sparse; populate core files (TASKS, CONVENTIONS) | +| High token count (>30k) | Consider `ctx compact` or archiving completed tasks | +| No recent activity | Context may be stale; check if files need updating | +| TASKS.md has 0 active | All work done, or tasks need to be added | ## Quality Checklist -- [ ] Summarized the output (do not just dump raw output) -- [ ] Flagged empty core files -- [ ] Noted token budget if high or low +After running status, verify: +- [ ] Summarized the output for the user (do not just dump + raw output without commentary) +- [ ] Flagged any empty core files that should be populated +- [ ] Noted token budget if it seems high or low diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md index 5686becd4..98bd585e1 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-worktree/SKILL.md @@ -21,7 +21,7 @@ with ctx-aware guardrails. - Tasks that touch overlapping files (high merge conflict risk) - Fewer than 3 independent tasks (overhead exceeds benefit) - Already inside a worktree (manage from the main checkout only) -- User just wants concurrent sessions in the same tree +- User just wants concurrent Claude Code sessions in the same tree ## Operations @@ -56,7 +56,8 @@ Create a new worktree as a sibling directory with a `work/` branch. 5. **Remind the user**: > Do NOT run `ctx init` in the worktree. The context > directory is already tracked in git and will be present. - > Launch a separate session there and work normally. + > Launch a separate Claude Code session there and work + > normally. ### `list` @@ -123,11 +124,11 @@ Merge a completed worktree back and clean up. The encryption key lives at `~/.ctx/.ctx.key` (user-level, outside the project). All worktrees on the same machine share this path, so -**`ctx pad` and `ctx notify` work in worktrees automatically**. +**`ctx pad` and `ctx hook notify` work in worktrees automatically**. One thing to watch: -- **Journal enrichment**: `ctx recall export` and `ctx journal enrich` +- **Journal enrichment**: `ctx journal import` and journal enrichment resolve paths relative to the current working directory. Files created in a worktree stay in that worktree and are discarded on teardown. Enrich journals on the main branch after merging: the diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md index ba764c867..dddb4124d 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-wrap-up/SKILL.md @@ -8,57 +8,78 @@ Guide end-of-session context persistence. Gather signal from the session, propose candidates worth persisting, and persist approved items via `ctx add`. +This is a **ceremony skill**: invoke it explicitly as `/ctx-wrap-up` +at session end, not conversationally. It pairs with `/ctx-remember` +at session start. + +## Before Starting + +Check that the context directory exists. If it does not, tell the user: +"No context directory found. Run `ctx init` to set up context +tracking, then there will be something to wrap up." + ## When to Use - At the end of a session, before the user quits - When the user says "let's wrap up", "save context", "end of session" +- When the `check-persistence` hook suggests it ## When NOT to Use - Nothing meaningful happened (only read files, quick lookup) -- The user already persisted everything manually -- Mid-session: use `ctx-reflect` instead +- The user already persisted everything manually with `ctx add` +- Mid-session when the user is still in flow: use `/ctx-reflect` + instead for mid-session checkpoints ## Process ### Phase 1: Gather signal -Do this **silently**: +Do this **silently**: do not narrate the steps: -1. Check what changed: +1. Check what changed in the working tree: ```bash git diff --stat ``` 2. Check commits made this session: ```bash - git log --oneline -5 + git log --oneline @{upstream}..HEAD 2>/dev/null || git log --oneline -5 ``` -3. Scan the conversation for: - - Architectural choices or trade-offs - - Gotchas or unexpected behavior - - Patterns established or conventions agreed - - Follow-up work identified +3. Scan the conversation history for: + - Architectural choices or design trade-offs discussed + - Gotchas, bugs, or unexpected behavior encountered + - Patterns established or conventions agreed upon + - Follow-up work identified but not yet started - Tasks completed or progressed ### Phase 2: Propose candidates Think step-by-step about what is worth persisting. For each -candidate ask: -- Is this project-specific or general knowledge? +potential candidate, ask yourself: +- Is this project-specific or general knowledge? (Only persist + project-specific insights) - Would a future session benefit from knowing this? -- Is this already captured in context files? +- Is this already captured in the context files? +- Is this substantial enough to record, or is it trivial? -Present candidates grouped by type. Skip empty categories. +Present candidates in a structured list, grouped by type. +Skip categories with no candidates: do not show empty sections. ``` ## Session Wrap-Up ### Learnings (N candidates) -1. **Title** — Context, Lesson, Application +1. **Title of learning** + - Context: What prompted this + - Lesson: The key insight + - Application: How to apply it going forward ### Decisions (N candidates) -1. **Title** — Context, Rationale, Consequence +1. **Title of decision** + - Context: What prompted this + - Rationale: Why this choice + - Consequence: What changes as a result ### Conventions (N candidates) 1. **Convention description** @@ -71,15 +92,33 @@ Persist all? Or select which to keep? ### Phase 3: Persist approved candidates -Wait for user approval. For each approved item: +Wait for the user to approve, select, or modify candidates. +Wait for the user to approve each item before persisting: +candidates proposed by the agent may be incomplete or +mischaracterized, and the user is the final authority on what +belongs in their context. + +For each approved candidate, run the appropriate command: + +| Type | Command | +|-------------|--------------------------------------------------------------------------------------------------------------------------------| +| Learning | `ctx add learning "Title" --session-id ID --branch BR --commit HASH --context "..." --lesson "..." --application "..."` | +| Decision | `ctx add decision "Title" --session-id ID --branch BR --commit HASH --context "..." --rationale "..." --consequence "..."` | +| Convention | `ctx add convention "Description"` | +| Task (new) | `ctx add task "Description" --session-id ID --branch BR --commit HASH` | +| Task (done) | Edit TASKS.md to mark complete | + +Report the result of each command. If any fail, report the error +and continue with the remaining items. + +### Phase 3.5: Suppress post-wrap-up nudges -| Type | Command | -|-------------|----------------------------------------------------------------------| -| Learning | `ctx add learning "Title" --context "..." --lesson "..." --application "..."` | -| Decision | `ctx add decision "Title" --context "..." --rationale "..." --consequence "..."` | -| Convention | `ctx add convention "Description"` | -| Task (new) | `ctx add task "Description"` | -| Task (done) | Edit TASKS.md to mark complete | +After persisting, mark the session as wrapped up so checkpoint +nudges are suppressed for the remainder of the session: + +```bash +ctx system mark-wrapped-up +``` ### Phase 4: Commit (optional) @@ -89,33 +128,54 @@ After persisting, check for uncommitted changes: git status --short ``` -If there are uncommitted changes, offer to commit with -`ctx-commit`. +If there are uncommitted changes, offer: + +> There are uncommitted changes. Want me to run `/ctx-commit` +> to commit with context capture? + +Do not auto-commit. The user decides. ## Candidate Quality Guide ### Good candidates -- Specific gotchas with actionable lessons -- Real trade-offs with rationale -- Patterns codified for consistency +- "PyMdownx `details` extension wraps content in `
` + tags, breaking `
` rendering in MkDocs": specific
+  gotcha, actionable for future sessions
+- "Decision: use file-based cooldown tokens instead of env vars
+  because hooks run in subprocesses": real trade-off with
+  rationale
+- "Convention: all skill descriptions use imperative mood":
+  codifies a pattern for consistency
 
 ### Weak candidates (do not propose)
 
-- General programming knowledge
-- Obvious facts from the diff
-- Things already in context files
+- "Go has good error handling": general knowledge, not
+  project-specific
+- "We edited main.go": obvious from the diff, not an insight
+- "Tests should pass before committing": too generic to be
+  useful
+- Anything already present in LEARNINGS.md or DECISIONS.md
+
+## Relationship to /ctx-reflect
+
+`/ctx-reflect` is for mid-session checkpoints at natural
+breakpoints. `/ctx-wrap-up` is for end-of-session: it's more
+thorough, covers the full session arc, and includes the commit
+offer. If the user already ran `/ctx-reflect` recently, avoid
+proposing the same candidates again.
 
 ## Quality Checklist
 
-Before presenting:
+Before presenting candidates, verify:
 - [ ] Signal was gathered (git diff, git log, conversation scan)
-- [ ] Every candidate has complete fields
-- [ ] Candidates are project-specific
-- [ ] No duplicates with existing context
-- [ ] Empty categories are omitted
-- [ ] User is asked before persisting
+- [ ] Every candidate has complete fields (not just a title)
+- [ ] Candidates are project-specific, not general knowledge
+- [ ] No duplicates with existing context files
+- [ ] Empty categories are omitted, not shown as "(none)"
+- [ ] User is asked before anything is persisted
 
-After persisting:
+After persisting, verify:
 - [ ] Each `ctx add` command succeeded
-- [ ] Uncommitted changes were surfaced
+- [ ] Uncommitted changes were surfaced (if any)
+- [ ] User was offered `/ctx-commit` (if applicable)
diff --git a/internal/assets/integrations/copilot/copilot-instructions.md b/internal/assets/integrations/copilot/copilot-instructions.md
index 0caf71efb..7dcf81046 100644
--- a/internal/assets/integrations/copilot/copilot-instructions.md
+++ b/internal/assets/integrations/copilot/copilot-instructions.md
@@ -126,7 +126,7 @@ validation, session tracking, and boundary checks automatically.
 **Rule**: Do NOT run `ctx` in the terminal when the equivalent MCP tool
 exists. MCP tools enforce boundary validation and track session state.
 Terminal fallback is only for commands without an MCP equivalent (e.g.,
-`ctx agent`, `ctx recall list`).
+`ctx agent`, `ctx journal source`).
 
 ## Governance: When to Call Tools
 
diff --git a/internal/assets/read/agent/agent.go b/internal/assets/read/agent/agent.go
index 2122220c7..6259f2f75 100644
--- a/internal/assets/read/agent/agent.go
+++ b/internal/assets/read/agent/agent.go
@@ -4,8 +4,6 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package agent provides access to agent integration files embedded
-// in the assets filesystem.
 package agent
 
 import (
diff --git a/internal/assets/read/claude/claude.go b/internal/assets/read/claude/claude.go
index 147bb44ed..68a54cb7c 100644
--- a/internal/assets/read/claude/claude.go
+++ b/internal/assets/read/claude/claude.go
@@ -4,8 +4,6 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package claude provides access to Claude Code integration files
-// embedded in the assets filesystem.
 package claude
 
 import (
diff --git a/internal/assets/read/lookup/doc.go b/internal/assets/read/lookup/doc.go
index c6de44ccb..87f650bc0 100644
--- a/internal/assets/read/lookup/doc.go
+++ b/internal/assets/read/lookup/doc.go
@@ -1,16 +1,68 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package lookup owns the YAML description maps and eager
-// initialization for all embedded text lookups.
-//
-// [Init] loads all YAML files (commands, flags, text, examples)
-// into in-memory maps. [TextDesc] resolves a text DescKey.
-// [StopWords] returns the stop word set for relevance scoring.
-// [ConfigPatterns] returns glob patterns for config file detection.
-// [PermAllowListDefault] and [PermDenyListDefault] return the
-// default permission lists for Claude Code settings.
+// Package lookup is the **eager-init in-memory cache** for
+// every embedded YAML asset ctx ships: command-help text,
+// flag-help text, prompt templates, examples, stop-word
+// lists, glob patterns, default permission lists.
+//
+// The package is what sits between
+// [internal/assets/read/desc] (the typed lookup helpers
+// every CLI command calls) and the embedded YAML files
+// (slow to parse on the hot path). Loading once at process
+// start trades a few milliseconds at boot for fast lookups
+// every time a hook fires.
+//
+// # Public Surface
+//
+//   - **[Init]** — loads every embedded YAML map into
+//     memory. Called exactly once from `main()` before
+//     the CLI starts dispatching. Idempotent: repeat
+//     calls are noops (the [sync.Once] guard short-
+//     circuits).
+//   - **[TextDesc](key)** — resolves a
+//     [internal/config/embed/text].DescKey to its
+//     rendered string.
+//   - **[StopWords]** — returns the embedded English
+//     stop-word set used by
+//     [internal/cli/agent/core/score] for relevance
+//     scoring.
+//   - **[ConfigPatterns]** — returns the embedded glob
+//     pattern list used to detect "this file is a
+//     config" in drift checks and skill heuristics.
+//   - **[PermAllowListDefault]** /
+//     **[PermDenyListDefault]** — return the default
+//     allow/deny entries for Claude Code permissions
+//     (used by `ctx init` and the
+//     `_ctx-permission-sanitize` skill).
+//
+// # Why Eager Loading
+//
+// Lazy parsing per call would dominate the time budget
+// for fast-fire hooks (some run on every tool call).
+// One up-front parse means the per-call cost is just
+// a map lookup. The maps are read-only after [Init];
+// concurrent readers never race.
+//
+// # Concurrency
+//
+// All readers are safe for concurrent use after [Init]
+// returns. The single-init guard ensures no race
+// between concurrent first-callers.
+//
+// # Related Packages
+//
+//   - [internal/assets/read/desc]   — the typed
+//     resolver helpers callers actually call
+//     (`desc.Text(key)`, `desc.Command(key)`, etc.);
+//     all delegate to this package.
+//   - [internal/assets/commands]    — the embedded
+//     YAML source for command/flag/text data.
+//   - [internal/cli/agent/core/score] — consumer of
+//     [StopWords].
+//   - [internal/cli/initialize]     — consumer of
+//     the permission defaults.
 package lookup
diff --git a/internal/assets/tpl/doc.go b/internal/assets/tpl/doc.go
index a6dd17a59..0aab72b8d 100644
--- a/internal/assets/tpl/doc.go
+++ b/internal/assets/tpl/doc.go
@@ -1,14 +1,76 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package tpl holds Sprintf-based format string constants that cannot
-// be expressed in the YAML text system (multi-line templates, structured
-// output with conditional sections).
+// Package tpl holds **Sprintf-based format string constants**
+// for output that is too structurally rich to live in the
+// flat YAML text store ([internal/config/embed/text]).
 //
-// Templates cover: Obsidian vault pages, loop script generation, recall
-// session formatting, journal entry scaffolding, and add entry headers.
-// See TASKS.md for the migration plan to Go text/template files.
+// The line ctx draws is: simple substitution → YAML; multi-
+// line templated content with conditional sections,
+// indentation rules, or per-call escaping → here.
+//
+// # What Lives Here
+//
+// Each `tpl_*.go` file owns one rendering domain:
+//
+//   - **`tpl_entry.go`** — the canonical TASKS.md task
+//     line and its inline tags (`#priority:`,
+//     `#session:`, `#branch:`, `#commit:`, `#added:`).
+//     Used by `ctx add task`.
+//   - **`tpl_hub_entry.go`** — markdown rendering of one
+//     hub entry (date header + origin tag + content
+//     body + horizontal rule). Consumed by
+//     `ctx connection sync` when materializing entries
+//     into `.context/hub/`.
+//   - **`tpl_journal.go`** — the journal entry skeleton:
+//     YAML frontmatter + body shell that the importer
+//     fills in.
+//   - **`tpl_loop.go`** — the autonomous-loop shell
+//     script template (`ctx loop` output).
+//   - **`tpl_obsidian.go`** — the Obsidian vault page
+//     templates (note frontmatter + wikilink section).
+//   - **`tpl_recall.go`** — the format the legacy
+//     `ctx recall` command used; kept here while the
+//     journal-merge transition completes.
+//   - **`tpl_trigger.go`** — the empty trigger script
+//     scaffold installed by `ctx trigger add`.
+//
+// # Naming Convention
+//
+// Constants are named for what they render, not how:
+// [HubEntryMarkdown], [Task], [TaskPriority], etc.
+// Each carries a doc comment listing the Sprintf args
+// in order so callers cannot accidentally pass the
+// wrong argument order.
+//
+// # Migration Note
+//
+// Several templates here are migration candidates for
+// Go `text/template` — Sprintf with many positional
+// arguments is fragile. The migration is tracked in
+// TASKS.md; until then, contributors should add new
+// templates here only when the YAML text store cannot
+// represent the structure.
+//
+// # Concurrency
+//
+// All exports are immutable string constants. Safe
+// for any access pattern.
+//
+// # Related Packages
+//
+//   - [internal/cli/add]               — consumes
+//     [Task] and the inline-tag templates.
+//   - [internal/cli/hub] /
+//     [internal/cli/connection]        — consume
+//     [HubEntryMarkdown].
+//   - [internal/cli/journal/cmd/importer] — consumes
+//     the journal entry templates.
+//   - [internal/cli/loop]              — consumes the
+//     loop-script template.
+//   - [internal/cli/journal/cmd/obsidian] — consumes
+//     the Obsidian templates.
 package tpl
diff --git a/internal/assets/tpl/tpl_hub_entry.go b/internal/assets/tpl/tpl_hub_entry.go
index 72fb00739..4972b1db8 100644
--- a/internal/assets/tpl/tpl_hub_entry.go
+++ b/internal/assets/tpl/tpl_hub_entry.go
@@ -8,7 +8,7 @@ package tpl
 
 // Hub entry markdown rendering template.
 const (
-	// TplEntryMarkdown formats a single hub entry as markdown
+	// HubEntryMarkdown formats a single hub entry as markdown
 	// with a date header, origin tag, and horizontal rule.
 	//
 	// Args (in order):
@@ -16,5 +16,5 @@ const (
 	//   - title: first line of content (used as heading)
 	//   - origin: entry origin identifier
 	//   - content: full entry content
-	TplEntryMarkdown = "## [%s] %s\n\n**Origin**: %s\n\n%s\n\n---\n\n"
+	HubEntryMarkdown = "## [%s] %s\n\n**Origin**: %s\n\n%s\n\n---\n\n"
 )
diff --git a/internal/bootstrap/doc.go b/internal/bootstrap/doc.go
index a6e290d4e..61a73cd59 100644
--- a/internal/bootstrap/doc.go
+++ b/internal/bootstrap/doc.go
@@ -4,12 +4,81 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package bootstrap initializes the ctx CLI application.
+// Package bootstrap is the **CLI assembly layer** for ctx —
+// the place where every cobra command in the binary gets
+// constructed, grouped, and wired into the root command
+// before `cmd.Execute()` runs.
 //
-// It provides functions to create the root command and register all
-// subcommands. The typical usage pattern is:
+// `cmd/ctx/main.go` is intentionally tiny:
 //
 //	cmd := bootstrap.Initialize(bootstrap.RootCmd())
-//	if err := cmd.Execute(); err != nil {
-//	    // handle error
+//	if err := cmd.Execute(); err != nil { ... }
+//
+// All the actual command registration happens here so the
+// command tree is in one auditable place and so the audit
+// suite (`cli_cmd_structure_test`) can verify invariants
+// like "every command has a non-empty Use", "every command
+// has a Short", and "every group has at least one
+// command".
+//
+// # The Root Command
+//
+// [RootCmd] returns the bare root cobra command with the
+// banner, version flag, global flags
+// (`--context-dir`, `--allow-outside-cwd`, `--tool`), and
+// the persistent error formatter. It is intentionally
+// devoid of subcommands; [Initialize] adds them.
+//
+// # Group-Based Registration
+//
+// [Initialize] does the wiring through small grouped
+// helpers — [gettingStarted], [contextCmds], [artifacts],
+// [sessions], [runtimeCmds], [integrations],
+// [diagnostics], [hiddenCmds] — each of which returns a
+// `[]registration` that pairs a constructor with a
+// [Group] tag. The result is the cobra command tree the
+// user sees in `ctx --help`, organized into the same
+// sections documented in `docs/cli/index.md`.
+//
+// New commands plug in by:
+//
+//  1. Implementing a `Cmd() *cobra.Command` factory in
+//     `internal/cli/`.
+//  2. Adding the constructor to the right group helper
+//     in [group.go] under the matching `embedCmd.Group*`
+//     constant.
+//  3. Adding the `Use` and `DescKey` constants to
+//     [internal/config/embed/cmd] and the matching YAML
+//     entries to [internal/assets/commands].
+//
+// # Hidden Commands
+//
+// [hiddenCmds] keeps `ctx site` and `ctx system` out of
+// `ctx --help` because they are agent-/automation-facing
+// rather than user-facing. They still execute when
+// invoked directly. The criterion for "hidden" is "no
+// human is expected to type this".
+//
+// # Version Stamping
+//
+// The build embeds the version string into the package
+// at link time via `-ldflags` (see Makefile `build`
+// target); the value is exposed through [Version] and
+// surfaced by `ctx --version`.
+//
+// # Concurrency
+//
+// Bootstrap runs once at process start. Concurrent
+// execution is not a concern; cobra serializes
+// subcommand dispatch.
+//
+// # Related Packages
+//
+//   - [internal/cli/...]                 — every
+//     subcommand package this layer wires together.
+//   - [internal/config/embed/cmd]        — the typed
+//     `Use` and `DescKey` constants the registered
+//     commands consume.
+//   - [internal/audit]                   — enforces
+//     structural invariants on the resulting tree.
 package bootstrap
diff --git a/internal/cli/add/cmd/root/doc.go b/internal/cli/add/cmd/root/doc.go
index 320685b49..e568d843f 100644
--- a/internal/cli/add/cmd/root/doc.go
+++ b/internal/cli/add/cmd/root/doc.go
@@ -1,13 +1,51 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package root implements the ctx add command.
+// Package root implements **`ctx add`** — the command
+// that adds a new entry (task / decision / learning /
+// convention) to the corresponding `.context/` file with
+// validated provenance, canonical formatting, and an
+// auto-updated index table.
 //
-// [Cmd] builds the cobra.Command with type-specific flags.
-// [Run] validates arguments, extracts content from args or
-// --from-file, formats the entry using core/format, and inserts
-// it into the target context file using core/insert.
+// # Public Surface
+//
+//   - **[Cmd]** — cobra command with the type
+//     selector (`-t task|decision|learning|convention`)
+//     plus type-specific flags (`--priority`,
+//     `--rationale`, `--consequence`, `--lesson`,
+//     `--branch`, `--commit`, `--session-id`,
+//     `--from-file`, `--application`, etc.).
+//   - **[Run]** — validates the supplied flags
+//     against the type's required-fields list,
+//     extracts content from positional args or
+//     `--from-file`, formats the entry via the
+//     `core/format` siblings, and inserts it via
+//     [internal/cli/add/core/insert].
+//
+// # Validation Boundaries
+//
+// All hard checks (required fields, secret patterns,
+// length limits, provenance requirements per
+// `.ctxrc`) live in [internal/entry] so the rules
+// are identical regardless of caller (CLI here, MCP
+// `ctx_add` tool elsewhere).
+//
+// # Concurrency
+//
+// Single-process, sequential.
+//
+// # Related Packages
+//
+//   - [internal/cli/add/core/insert]   — section-
+//     aware insertion at the right place in the
+//     target file.
+//   - [internal/entry]                 — shared
+//     write-side validation + write API.
+//   - [internal/index]                 — auto-
+//     updated quick-reference table.
+//   - [internal/mcp/handler]           — sister
+//     entry path for MCP tool callers.
 package root
diff --git a/internal/cli/add/core/insert/doc.go b/internal/cli/add/core/insert/doc.go
index 00ff022ab..6fac6a1bc 100644
--- a/internal/cli/add/core/insert/doc.go
+++ b/internal/cli/add/core/insert/doc.go
@@ -1,15 +1,55 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package insert handles section-aware insertion of entries into
-// context files.
-//
-// [AppendEntry] is the main entry point — it reads the target file,
-// finds the correct insertion point, and writes the updated content.
-// [AfterHeader] inserts below a specific heading, [Task] handles
-// task-specific logic (phase sections), and [AppendAtEnd] adds to
-// the file bottom as a fallback.
+// Package insert handles **section-aware insertion** of
+// new entries into context files — picking the right
+// location inside the target file (under the matching
+// Phase header, after the latest entry of the same type,
+// or at the file bottom as a fallback) instead of just
+// appending blindly.
+//
+// The package is the why-`ctx add` knows-where-to-put-
+// things engine. Without it every add would dump at the
+// bottom and tasks would lose their phase grouping.
+//
+// # Public Surface
+//
+//   - **[AppendEntry](file, entry, opts)** — top-level
+//     entry point. Reads `file`, decides where to
+//     insert based on `opts.Type` and `opts.Phase`,
+//     writes the result back.
+//   - **[AfterHeader](lines, header, content)** —
+//     pure helper: insert `content` immediately after
+//     `header` (or at the end of `header`'s
+//     section, depending on the rule). Returns the
+//     new line slice.
+//   - **[Task](lines, entry, phase)** — task-specific
+//     placement: finds the right Phase header (per
+//     CONSTITUTION, tasks must stay in their Phase
+//     forever) and inserts under it.
+//   - **[AppendAtEnd](lines, content)** — fallback
+//     when no smarter location can be inferred.
+//
+// # Constitutional Honors
+//
+// The TASKS.md rule "tasks stay in their Phase
+// section permanently" is enforced here by
+// [Task]: a new task always gets the explicit Phase
+// header it was added under, never floats free.
+//
+// # Concurrency
+//
+// Filesystem-bound. Sequential within a single call.
+//
+// # Related Packages
+//
+//   - [internal/cli/add/cmd/root]    — chief
+//     consumer.
+//   - [internal/entry]               — calls into
+//     this package after validation.
+//   - [internal/index]               — runs after
+//     insert to update the file's index table.
 package insert
diff --git a/internal/cli/agent/cmd/root/doc.go b/internal/cli/agent/cmd/root/doc.go
index 2363bd5c1..6986559dc 100644
--- a/internal/cli/agent/cmd/root/doc.go
+++ b/internal/cli/agent/cmd/root/doc.go
@@ -1,13 +1,57 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package root implements the ctx agent command for generating
-// AI-ready context packets.
+// Package root implements **`ctx agent`** — the command
+// that produces an AI-ready, token-budgeted context
+// packet for injection into the next prompt.
 //
-// [Cmd] builds the cobra.Command with --budget, --format, and
-// --json flags. [Run] loads context, assembles a budget-aware
-// packet via core/budget, and renders it as Markdown or JSON.
+// `ctx agent` is the most-called user-facing command in
+// production: tool integrations (Claude Code's
+// `PreToolUse` hook, Copilot CLI's session-start hook,
+// the Cursor MCP server) all invoke it on every prompt
+// to assemble what the AI sees.
+//
+// # Public Surface
+//
+//   - **[Cmd]** — cobra command with `--budget N`
+//     (default 8000), `--format markdown|json`,
+//     `--include-hub`, and the `--prompt `
+//     companion that lets the budget allocator score
+//     for relevance against the user's actual prompt.
+//   - **[Run]** — loads context via
+//     [internal/context/load], optionally folds in
+//     hub entries (`--include-hub`), assembles the
+//     packet via [internal/cli/agent/core/budget],
+//     scores entries via
+//     [internal/cli/agent/core/score], and renders
+//     to stdout.
+//
+// # Performance
+//
+// The whole call typically completes in 50–150 ms on
+// a project with hundreds of entries. The cost is
+// dominated by file IO (the per-file token estimator
+// is fast), which is why
+// [internal/context/load] reads the smallest set of
+// files needed and the budget allocator stops as
+// soon as the budget is exhausted.
+//
+// # Concurrency
+//
+// Single-process, sequential.
+//
+// # Related Packages
+//
+//   - [internal/cli/agent/core/budget]   — the
+//     allocator that decides what makes the cut.
+//   - [internal/cli/agent/core/score]    — entry
+//     relevance scorer.
+//   - [internal/context/load]            — the
+//     context-loading layer.
+//   - [internal/cli/hub] /
+//     [internal/cli/connection]          — supply
+//     hub entries for `--include-hub`.
 package root
diff --git a/internal/cli/agent/core/budget/doc.go b/internal/cli/agent/core/budget/doc.go
index 1b5cfc462..96922b15f 100644
--- a/internal/cli/agent/core/budget/doc.go
+++ b/internal/cli/agent/core/budget/doc.go
@@ -1,16 +1,92 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package budget implements the token-budgeted context assembly
-// algorithm for the agent command.
-//
-// [AssemblePacket] allocates tokens across seven tiers (constitution,
-// tasks, conventions, decisions, learnings, steering, skill). [Split] divides
-// remaining budget between two scored sections. [FillSection]
-// applies two-tier degradation: full entries then title-only
-// summaries. [FitItems] and [EstimateSliceTokens] handle
-// per-item token accounting.
+// Package budget implements the **token-budgeted context
+// assembly algorithm** behind `ctx agent`. Given a token budget
+// (`--budget N`, default 8000) and a loaded [entity.Context],
+// it produces an AI-ready packet that maximizes information
+// density without exceeding the budget — the single most
+// performance-sensitive operation in ctx because it runs at
+// the head of every prompt in tool integrations that use the
+// hook+MCP pipeline.
+//
+// # The Seven-Tier Allocation
+//
+// [AssemblePacket] walks the seven content tiers in priority
+// order, each with its own share of the budget:
+//
+//  1. **CONSTITUTION** — always full; never truncated.
+//  2. **TASKS** — current and pending work.
+//  3. **CONVENTIONS** — coding patterns the AI must follow.
+//  4. **DECISIONS** — index table, then full entries as
+//     budget permits.
+//  5. **LEARNINGS** — same shape as decisions.
+//  6. **STEERING** — matched files for this prompt.
+//  7. **SKILL** — bundled instructions if a skill matched.
+//
+// Lower tiers see whatever budget the higher tiers leave
+// behind. The constitution invariant — "context loading is the
+// first step of every session" — translates into "the
+// constitution is always in the packet, no exceptions".
+//
+// # Two-Tier Degradation
+//
+// [FillSection] handles the per-section degradation: when full
+// entries do not fit, it **falls back to title-only summaries**
+// (the index-table form) so the AI still sees that an entry
+// exists and can request it by ID. The degradation point is
+// chosen to maximize the count of entries the AI sees,
+// trading depth for breadth.
+//
+// # Splitting Between Two Sections
+//
+// [Split] divides the remaining budget between two scored
+// sections (typically DECISIONS vs LEARNINGS) using a
+// score-weighted ratio: a section with twice the relevance
+// score gets twice the budget share. Score comes from
+// [internal/cli/agent/core/score]; budget enforces.
+//
+// # Token Accounting
+//
+// [EstimateSliceTokens] is the rough-but-stable estimator
+// used throughout: ~4 chars per token for English Markdown,
+// with adjustments for code-fence-heavy content. It is not
+// the exact count the model will see but is consistent
+// enough to keep the assembled packet under budget.
+// [FitItems] is the greedy item picker: takes the highest-
+// scored items first, stops when the next one would push
+// the running total over budget.
+//
+// # Render Path
+//
+// [render.go] formats the assembled tiers into the final
+// markdown packet with section headers, separators, and
+// the read-order preamble the AI uses to navigate the
+// content. [out.go] writes the packet to stdout (or to the
+// MCP response, depending on caller).
+//
+// # Concurrency
+//
+// All functions are pure data transformations over the
+// loaded context. Concurrent callers never race; the
+// algorithm holds no module-level state.
+//
+// # Related Packages
+//
+//   - [internal/cli/agent]            — the `ctx agent`
+//     CLI surface that drives this package.
+//   - [internal/cli/agent/core/score] — the relevance
+//     scorer that ranks entries before [FillSection]
+//     consumes them.
+//   - [internal/context/load]         — produces the
+//     [entity.Context] this package consumes.
+//   - [internal/index]                — produces the
+//     index-table form used as the fallback.
+//   - [internal/steering]             — supplies matched
+//     steering files for the steering tier.
+//   - [internal/skill]                — supplies matched
+//     skill bundles for the skill tier.
 package budget
diff --git a/internal/cli/agent/core/score/doc.go b/internal/cli/agent/core/score/doc.go
index d5ad96275..70d72cd94 100644
--- a/internal/cli/agent/core/score/doc.go
+++ b/internal/cli/agent/core/score/doc.go
@@ -1,15 +1,66 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package score computes relevance scores for context entries to
-// prioritize budget allocation.
-//
-// [Recency] scores by age (7d=1.0, 30d=0.7, 90d=0.4, older=0.2).
-// [Relevance] scores by keyword overlap with active tasks (0.0-1.0).
-// [Score] combines both into a 0.0-2.0 range. [All] scores a
-// batch of entries. [ExtractTaskKeywords] builds the keyword set
-// from active task text.
+// Package score computes **per-entry relevance scores** so
+// the budgeted context-assembly algorithm in
+// [internal/cli/agent/core/budget] can decide which
+// decisions, learnings, and conventions to inject when there
+// is not enough budget to inject all of them.
+//
+// The score is a deliberately simple two-component number:
+// **recency** plus **relevance to current work**. Either
+// component alone produces a poor ranking; together they
+// approximate "what would a helpful colleague pull off the
+// shelf?".
+//
+// # The Two Components
+//
+//   - **[Recency](entry)** — bucketed by age:
+//
+//     ≤  7 days   → 1.0
+//     ≤ 30 days   → 0.7
+//     ≤ 90 days   → 0.4
+//     older       → 0.2
+//
+//     Buckets (rather than a continuous decay) keep the
+//     ordering stable across small input shifts and make
+//     the scoring trivially debuggable.
+//
+//   - **[Relevance](entry, taskKeywords)** — fraction of
+//     the entry's salient tokens that overlap with
+//     [ExtractTaskKeywords](activeTasks). Range 0.0–1.0.
+//     Stop words come from the embedded list in
+//     [internal/assets/read/lookup.StopWords].
+//
+// [Score](entry, taskKeywords) sums the two for a 0.0–2.0
+// composite. [All](entries, taskKeywords) is the bulk
+// scorer that returns parallel slices for the budget
+// allocator.
+//
+// # Why Bucketed Recency
+//
+// A continuous exponential decay would be technically
+// purer but produces "score jitter" — entries reorder
+// minute-to-minute as their ages cross the decimal
+// boundary. Bucketed recency means an entry's relative
+// rank only changes when it crosses a real threshold
+// (week, month, quarter), which is the cadence at which
+// users actually expect their context to age.
+//
+// # Concurrency
+//
+// All functions are pure. Concurrent callers never race.
+//
+// # Related Packages
+//
+//   - [internal/cli/agent/core/budget]   — consumer of
+//     [All]; uses scores to allocate per-section budget
+//     and to pick top-N entries within a section.
+//   - [internal/assets/read/lookup]      — supplies
+//     [StopWords] for the keyword extractor.
+//   - [internal/entity]                  — entry domain
+//     types this package scores.
 package score
diff --git a/internal/cli/change/core/detect/doc.go b/internal/cli/change/core/detect/doc.go
index 62303cd45..8f0ca0716 100644
--- a/internal/cli/change/core/detect/doc.go
+++ b/internal/cli/change/core/detect/doc.go
@@ -1,13 +1,62 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package detect resolves reference timestamps for change detection.
+// Package detect resolves the **reference timestamp**
+// `ctx change` uses to compute "what changed since". The
+// CLI offers three signals; this package picks the best
+// one and returns it.
 //
-// [FromMarkers] reads the session marker file for the last known
-// timestamp. [FromEvents] reads the event log. [ReferenceTime]
-// combines both with the --since flag to pick the best reference.
-// [ParseSinceFlag] parses user-provided duration or date strings.
+// `ctx change` answers "what moved since I was last in
+// this project?" — context file edits, code commits,
+// directories touched. Picking the right "since when"
+// is the package's only job.
+//
+// # The Three Signals
+//
+// In priority order:
+//
+//  1. **Explicit `--since`** — `ctx change --since
+//     2026-04-12` or `--since 3d`. Parsed by
+//     [ParseSinceFlag] into a time.Time.
+//  2. **Session marker** — `[FromMarkers]` reads
+//     `state/session-event.jsonl` for the timestamp
+//     of the last session-end event. The most useful
+//     "since" for "since I was last here".
+//  3. **Event log** — `[FromEvents]` falls back to
+//     the newest hook event timestamp when no
+//     session-end marker exists.
+//
+// [ReferenceTime] composes the three: returns the
+// `--since` value when set; otherwise the more recent
+// of the marker / event timestamps; falls back to
+// "30 days ago" when nothing is known.
+//
+// # Flag Parsing
+//
+// [ParseSinceFlag] accepts:
+//
+//   - **Date**     — `2026-04-12` (parsed midnight
+//     UTC).
+//   - **Duration** — `3d`, `12h`, `2w`, `1m` (rich
+//     duration syntax beyond Go's stdlib).
+//   - **`yesterday`**, **`today`** — relative
+//     keywords.
+//
+// # Concurrency
+//
+// Filesystem-bound and stateless. Concurrent
+// callers never race.
+//
+// # Related Packages
+//
+//   - [internal/cli/change]    — chief consumer.
+//   - [internal/log/event]     — supplies the event
+//     log [FromEvents] reads.
+//   - [internal/cli/system/cmd/session_event] —
+//     writes the markers [FromMarkers] reads.
+//   - [internal/parse]         — supplies [Date]
+//     used by [ParseSinceFlag].
 package detect
diff --git a/internal/cli/config/core/profile/doc.go b/internal/cli/config/core/profile/doc.go
index 3e0628881..1614839ff 100644
--- a/internal/cli/config/core/profile/doc.go
+++ b/internal/cli/config/core/profile/doc.go
@@ -4,8 +4,59 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package profile manages .ctxrc profile detection, copying,
-// and switching. Supports toggling between dev and base
-// profiles, with prod as an alias for base. Also provides
-// git root detection for profile file operations.
+// Package profile manages **`.ctxrc` profile detection,
+// copying, and switching** — the engine behind
+// `ctx config switch` that lets a user toggle between
+// (typically) `dev` and `base` configurations without
+// hand-editing `.ctxrc`.
+//
+// The package is the *mutator*; the read-side cache
+// lives in [internal/rc].
+//
+// # The Profile Convention
+//
+// Profiles are stored as **per-profile files** in the
+// project root:
+//
+//   - `.ctxrc`             — the active configuration.
+//   - `.ctxrc.dev`         — the dev profile (verbose
+//     logs, webhook events, ...).
+//   - `.ctxrc.base`        — the base / production
+//     profile (clean defaults).
+//
+// `prod` is recognized as an alias for `base`. New
+// profiles plug in as `.ctxrc.`.
+//
+// # Public Surface
+//
+//   - **[Active]** — returns the name of the
+//     currently-active profile (read from `.ctxrc`'s
+//     `profile:` field).
+//   - **[Detect](root)** — lists every available
+//     profile (by glob).
+//   - **[Switch](root, name)** — copies
+//     `.ctxrc.` over `.ctxrc`. Atomic via the
+//     standard write-temp-rename pattern. Refuses
+//     to switch to an unknown profile.
+//   - **[GitRoot]** — resolves the project's git
+//     root for path operations (the profile files
+//     live there, not in the current working
+//     subdirectory).
+//
+// # Concurrency
+//
+// Filesystem-bound and stateless. `ctx` is
+// single-process; concurrent switches are not a
+// design concern.
+//
+// # Related Packages
+//
+//   - [internal/cli/config/cmd/switchcmd]   — the
+//     `ctx config switch` CLI surface.
+//   - [internal/cli/config/cmd/status]      — the
+//     `ctx config status` surface that calls
+//     [Active] and [Detect].
+//   - [internal/rc]                         — the
+//     read-side cache that picks up the new
+//     `.ctxrc` after a switch.
 package profile
diff --git a/internal/cli/connection/core/render/doc.go b/internal/cli/connection/core/render/doc.go
index 3f4301a11..e7ed5c115 100644
--- a/internal/cli/connection/core/render/doc.go
+++ b/internal/cli/connection/core/render/doc.go
@@ -4,10 +4,56 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package render converts hub entries to markdown files
-// in .context/hub/ with origin tags and date headers.
+// Package render is the **client-side renderer** that
+// turns hub entries received from `ctx connection sync` /
+// `ctx connection listen` into markdown files under
+// `.context/hub/` so the local agent can read them.
 //
-// Key exports: [WriteEntries].
-// See source files for implementation details.
-// Part of the internal subsystem.
+// Each entry becomes a markdown block with a date
+// header, an origin tag (which project published it),
+// and the entry body, all separated by horizontal
+// rules. The format is the same one defined by
+// [internal/assets/tpl.HubEntryMarkdown] so what
+// ships through the gRPC pipe is what lands on disk.
+//
+// # Public Surface
+//
+//   - **[WriteEntries](dir, entries)** — appends
+//     each entry to the matching per-type file
+//     (`decisions.md`, `learnings.md`,
+//     `conventions.md`, `tasks.md`) under `dir`,
+//     formatting via [HubEntryMarkdown]. Idempotent
+//     by entry sequence number — re-running with
+//     the same sequence range produces no
+//     duplicates because the importer tracks last-
+//     seen sequence per file.
+//
+// # File Layout
+//
+//   - `.context/hub/decisions.md`
+//   - `.context/hub/learnings.md`
+//   - `.context/hub/conventions.md`
+//   - `.context/hub/tasks.md`
+//   - `.context/hub/.sync-state.json`  — last-seen
+//     sequence per type so resume is exact.
+//
+// # Concurrency
+//
+// Filesystem-bound. Concurrent renderers against
+// the same hub directory would race; the
+// `ctx connection listen` daemon is single-instance
+// per project by convention.
+//
+// # Related Packages
+//
+//   - [internal/cli/connection]      — the
+//     `ctx connection sync` / `listen` CLI
+//     surface that drives [WriteEntries].
+//   - [internal/hub]                 — server side;
+//     produces the entries this package writes.
+//   - [internal/assets/tpl]          — supplies
+//     [HubEntryMarkdown].
+//   - [internal/cli/agent]           —
+//     `--include-hub` reads the files this
+//     package writes.
 package render
diff --git a/internal/cli/connection/core/render/format.go b/internal/cli/connection/core/render/format.go
index 39c9a0bd9..710b0c359 100644
--- a/internal/cli/connection/core/render/format.go
+++ b/internal/cli/connection/core/render/format.go
@@ -75,7 +75,7 @@ func writeEntry(b *strings.Builder, e *hub.EntryMsg) {
 	ts := time.Unix(e.Timestamp, 0).UTC()
 	date := ts.Format(cfgTime.DateFormat)
 	if _, err := fmt.Fprintf(b,
-		tpl.TplEntryMarkdown,
+		tpl.HubEntryMarkdown,
 		date, firstLine(e.Content),
 		e.Origin, e.Content,
 	); err != nil {
diff --git a/internal/cli/decision/cmd/reindex/cmd.go b/internal/cli/decision/cmd/reindex/cmd.go
index e8a47eb33..4ac00b675 100644
--- a/internal/cli/decision/cmd/reindex/cmd.go
+++ b/internal/cli/decision/cmd/reindex/cmd.go
@@ -4,7 +4,6 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package reindex provides the "ctx decisions reindex" subcommand.
 package reindex
 
 import (
diff --git a/internal/cli/decision/decision.go b/internal/cli/decision/decision.go
index b2afa6079..c220c1ab3 100644
--- a/internal/cli/decision/decision.go
+++ b/internal/cli/decision/decision.go
@@ -4,7 +4,6 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package decision provides commands for managing DECISIONS.md.
 package decision
 
 import (
diff --git a/internal/cli/doctor/core/check/doc.go b/internal/cli/doctor/core/check/doc.go
index f5785cea0..0833fd299 100644
--- a/internal/cli/doctor/core/check/doc.go
+++ b/internal/cli/doctor/core/check/doc.go
@@ -4,9 +4,75 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package check runs health checks and builds a diagnostic
-// report. Covers context initialization, required files,
-// .ctxrc validation, drift detection, plugin enablement,
-// event logging, reminders, task completion, token budgets,
-// and system resource metrics.
+// Package check is the **brain** of `ctx doctor`: a battery of
+// independent health probes that together produce a single
+// diagnostic report covering everything that can plausibly be
+// wrong with a ctx installation or with the project's
+// `.context/` state.
+//
+// The package is the only thing the doctor CLI calls on the
+// "produce findings" side. The doctor command itself
+// orchestrates output; this package decides what to look at
+// and how to grade it.
+//
+// # The Probe Surface
+//
+// Each probe answers exactly one yes/no question and emits a
+// [CheckResult] with a name, status (Ok / Warning / Error),
+// and a one-line message. The full battery, run by [Run]:
+//
+//   - **Context initialization** — `.context/` exists and
+//     is populated.
+//   - **Required files** — TASKS, DECISIONS, LEARNINGS,
+//     CONVENTIONS, ARCHITECTURE, CONSTITUTION present.
+//   - **`.ctxrc` validation** — file parses, all values
+//     within range.
+//   - **Drift** — wraps [internal/drift.Detect] and
+//     surfaces the report's status.
+//   - **Plugin enablement** — Claude Code plugin
+//     installed AND enabled in `~/.claude/settings.json`.
+//   - **Event logging** — if `event_log: true`, the log
+//     file exists and is writable.
+//   - **Reminders** — pending reminder count and freshness.
+//   - **Task completion** — open task count, oldest open
+//     task age (consolidation nudge threshold).
+//   - **Token budgets** — currently injected size against
+//     the configured `injection_token_warn` and
+//     `context_window`.
+//   - **System resource metrics** — wraps
+//     [internal/sysinfo] to surface load/memory/disk
+//     pressure.
+//
+// New probes plug in by adding one more entry to the
+// dispatch table in [check.go] and one more constant to
+// [config/check.Name] (audited to keep CLI output stable).
+//
+// # Severity Roll-Up
+//
+// Each probe produces its own status. The doctor CLI rolls
+// the slice up to a single banner per the same rule the
+// drift package uses: any **Error** beats any **Warning**
+// beats **Ok**. JSON output preserves the per-probe detail
+// for tooling.
+//
+// # Stateless and Concurrency-Safe
+//
+// Probes hold no state and do not coordinate. They could
+// be parallelized; they currently run sequentially because
+// the slowest probe (`sysinfo` shelling out on macOS) is
+// still under 100ms and the simpler ordering keeps output
+// deterministic.
+//
+// # Related Packages
+//
+//   - [internal/cli/doctor]       — the `ctx doctor` CLI
+//     surface that consumes [Run] and renders the report.
+//   - [internal/drift]            — the drift detector
+//     this package wraps.
+//   - [internal/sysinfo]          — the resource probes
+//     this package wraps.
+//   - [internal/rc]               — supplies thresholds
+//     and feature flags.
+//   - [internal/config/check]     — probe-name
+//     constants used as keys in the report.
 package check
diff --git a/internal/cli/doctor/doc.go b/internal/cli/doctor/doc.go
index d4892f700..dc165a671 100644
--- a/internal/cli/doctor/doc.go
+++ b/internal/cli/doctor/doc.go
@@ -1,13 +1,58 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package doctor implements the ctx doctor command group for
-// troubleshooting context health.
+// Package doctor implements **`ctx doctor`** — the
+// one-stop structural-health command users (and
+// onboarding scripts) run when something feels off:
+// hooks not firing, drift accumulating, plugin not
+// enabled, settings file half-merged, etc.
 //
-// Runs structural health checks, analyzes event log patterns,
-// and presents findings with suggested actions. Supports both
-// human-readable checklist and JSON output formats.
+// The doctor is a *shell*: it asks
+// [internal/cli/doctor/core/check] for the full battery
+// of probes, then renders the results in either
+// human-readable checklist form (default) or structured
+// JSON form (`--json`).
+//
+// # Default Output
+//
+// The checklist groups probes by category (Setup,
+// Context, Plugin, State, Resources) and renders each
+// with a status glyph (`✓`, `⚠`, `✗`) plus a one-line
+// message. The roll-up banner at the end summarizes:
+// "all good", "warnings present", or "violations
+// present" — matching the same severity ladder
+// [internal/drift] uses.
+//
+// # JSON Output
+//
+// `ctx doctor --json` emits one record per probe with
+// `name`, `status`, `message`, and any structured
+// detail. Used by CI and by the `_ctx-doctor` skill
+// when the AI is the consumer.
+//
+// # Exit Codes
+//
+//   - **0** — all checks passed.
+//   - **1** — warnings present.
+//   - **3** — violations present (so CI scripts can
+//     gate on `>= 3`).
+//
+// # Sub-Packages
+//
+//   - **[core/check]**   — the actual probe battery
+//     (no UI, no CLI parsing).
+//
+// # Related Packages
+//
+//   - [internal/cli/doctor/core/check]   — the
+//     measurement layer.
+//   - [internal/drift]                   — wrapped by
+//     the drift probe.
+//   - [internal/sysinfo]                 — wrapped by
+//     the resource probes.
+//   - [internal/write/doctor]            — terminal
+//     output formatters.
 package doctor
diff --git a/internal/cli/doctor/doctor.go b/internal/cli/doctor/doctor.go
index 0ef4086df..661d18ad0 100644
--- a/internal/cli/doctor/doctor.go
+++ b/internal/cli/doctor/doctor.go
@@ -4,8 +4,6 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package doctor provides the "ctx doctor" command for structural
-// health checks across context, hooks, and configuration.
 package doctor
 
 import (
diff --git a/internal/cli/drift/core/fix/doc.go b/internal/cli/drift/core/fix/doc.go
index 16b54a085..5006b4a13 100644
--- a/internal/cli/drift/core/fix/doc.go
+++ b/internal/cli/drift/core/fix/doc.go
@@ -4,8 +4,58 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package fix auto-fixes drift issues like staleness and
-// missing required files. Archives completed tasks from the
-// Completed section and creates missing context files from
-// templates. Issues like dead paths and secrets are skipped.
+// Package fix is the **auto-remediation half** of
+// `ctx drift`: given a [drift.Report], it applies the
+// fixes the package knows how to apply safely (archiving
+// completed tasks, creating missing required files from
+// templates) and skips the issues that need human
+// judgment (dead paths, leaked secrets, constitution
+// violations).
+//
+// The package is the conservative side of the drift
+// loop. Anything that could be wrong if applied
+// blindly stays in the report and the user fixes it
+// by hand.
+//
+// # What Gets Auto-Fixed
+//
+//   - **Stale-completed tasks** — tasks marked `[x]`
+//     in the body of TASKS.md (not in a Completed
+//     section) are archived via [internal/tidy].
+//   - **Missing required files** — empty placeholders
+//     for the foundation files (CONSTITUTION,
+//     CONVENTIONS, etc.) are deployed from the
+//     embedded templates.
+//
+// # What Stays Manual
+//
+//   - **Dead path references** — the package cannot
+//     know whether a path is genuinely gone or just
+//     temporarily missing.
+//   - **Leaked secrets** — the user must redact and
+//     rotate; auto-removal could corrupt history.
+//   - **Constitution violations** — the user agreed
+//     to the rule and must un-violate it.
+//   - **File-age warnings** — staleness is
+//     informational, not fixable.
+//
+// # Public Surface
+//
+//   - **[Apply](report, contextDir)** — walks the
+//     report, applies fixable issues, returns a
+//     summary of what was changed and what was
+//     skipped.
+//
+// # Concurrency
+//
+// Filesystem-bound. Single-process, sequential.
+//
+// # Related Packages
+//
+//   - [internal/cli/drift]    — the `ctx drift
+//     --fix` CLI surface.
+//   - [internal/drift]        — produces the
+//     report this package consumes.
+//   - [internal/tidy]         — supplies the
+//     archive primitives.
 package fix
diff --git a/internal/cli/drift/core/out/doc.go b/internal/cli/drift/core/out/doc.go
index 666c171d6..ea9ae85db 100644
--- a/internal/cli/drift/core/out/doc.go
+++ b/internal/cli/drift/core/out/doc.go
@@ -4,8 +4,50 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package out formats drift reports as text or JSON. Text
-// output groups violations, warnings by type (path refs,
-// staleness, other), and passed checks with icons. JSON
-// output includes a UTC timestamp and full issue details.
+// Package out is the **rendering half** of `ctx drift`:
+// given a [drift.Report], it formats the report for
+// either humans (terminal text with icons and
+// section grouping) or machines (JSON for tooling and
+// CI pipelines).
+//
+// # Public Surface
+//
+//   - **[Text](report, w)** — writes the
+//     human-readable report to `w`. Groups
+//     violations and warnings by issue type
+//     (path refs, staleness, missing files,
+//     other) so similar issues cluster. Renders
+//     each with status glyphs (`✗`, `⚠`, `✓`)
+//     and a one-line message; passed checks are
+//     listed at the bottom.
+//   - **[JSON](report, w)** — writes a
+//     structured JSON document with a UTC
+//     timestamp, the per-issue detail (file,
+//     line, type, message, path, rule), and the
+//     passed-check list. Stable shape suitable
+//     for `jq` parsing in CI scripts.
+//
+// # Why Two Renderers
+//
+// Humans want skimmable output with visual
+// grouping; CI wants stable JSON with explicit
+// types. Hoisting both into a single output
+// package keeps the formatting choices in one
+// place and the underlying data shape (the
+// [drift.Report]) decoupled from how it's
+// presented.
+//
+// # Concurrency
+//
+// Pure data → io.Writer transformation.
+// Concurrent callers never race.
+//
+// # Related Packages
+//
+//   - [internal/cli/drift]   — chief consumer.
+//   - [internal/drift]       — produces the
+//     [Report] this package renders.
+//   - [internal/cli/doctor]  — sister renderer
+//     for the doctor's report (similar shape,
+//     different roll-up).
 package out
diff --git a/internal/cli/event/cmd.go b/internal/cli/event/cmd.go
index f2b19645d..230f98787 100644
--- a/internal/cli/event/cmd.go
+++ b/internal/cli/event/cmd.go
@@ -17,7 +17,7 @@ import (
 	"github.com/ActiveMemory/ctx/internal/flagbind"
 )
 
-// Cmd returns the "ctx event" top-level command.
+// Cmd returns the "ctx hook event" command.
 //
 // Returns:
 //   - *cobra.Command: Configured event command
diff --git a/internal/cli/event/doc.go b/internal/cli/event/doc.go
index 70d6fd892..21e54bc48 100644
--- a/internal/cli/event/doc.go
+++ b/internal/cli/event/doc.go
@@ -4,7 +4,7 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package event implements the ctx event top-level command.
+// Package event implements the ctx hook event command.
 //
 // Queries the local hook event log, filtered by hook name,
 // session ID, and event type. Supports JSON output and count
diff --git a/internal/cli/hub/core/server/doc.go b/internal/cli/hub/core/server/doc.go
index cc7b749d9..7fc9b3a78 100644
--- a/internal/cli/hub/core/server/doc.go
+++ b/internal/cli/hub/core/server/doc.go
@@ -4,10 +4,59 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package server implements the ctx Hub server
-// startup logic for ctx serve --hub.
+// Package server is the **server-side runtime** for
+// `ctx hub start` — daemon lifecycle, PID file management,
+// and the wire-up between the [internal/hub] package and
+// the user-facing CLI flags (`--port`, `--peers`,
+// `--daemon`).
 //
-// Key exports: [Run], [DefaultPort].
-// See source files for implementation details.
-// Part of the internal subsystem.
+// The package is the bridge: [internal/hub] knows how to
+// be a hub, this package knows how to *run* one as a
+// daemon process.
+//
+// # Public Surface
+//
+//   - **[Run](opts)** — foreground server boot. Binds
+//     the listener, instantiates the [hub.Server],
+//     wires the optional [hub.Cluster] when `--peers`
+//     is passed, blocks on serve. Honors signals
+//     (SIGINT, SIGTERM) for graceful shutdown.
+//   - **[DefaultPort]** — the canonical port (9900)
+//     used by docs, examples, and the recipes.
+//
+// # Daemon Mode
+//
+// When the user passes `--daemon`, the parent forks a
+// detached child, writes `/hub.pid` with the
+// child's PID, and exits. The PID file is what
+// `ctx hub stop` consumes to send SIGTERM.
+//
+// # PID File Lifecycle
+//
+//   - **Created** atomically on daemon start.
+//   - **Removed** by the child on graceful shutdown.
+//   - **Stale-detected** by `ctx hub status` (PID does
+//     not refer to a running process) so a crashed
+//     hub does not block a fresh start.
+//
+// # Concurrency
+//
+// The server runs in the same process as gRPC
+// dispatch; this package starts it and waits. No
+// in-process concurrency primitives beyond what
+// [internal/hub] and the gRPC runtime already provide.
+//
+// # Related Packages
+//
+//   - [internal/hub]                    — the actual
+//     hub implementation (storage, RPC, fan-out, raft
+//     election).
+//   - [internal/cli/hub/cmd/start] /
+//     [internal/cli/hub/cmd/stop] /
+//     [internal/cli/hub/cmd/status] /
+//     [internal/cli/hub/cmd/peer] /
+//     [internal/cli/hub/cmd/stepdown]   — CLI
+//     commands that call into this package.
+//   - [internal/config/hub]             — port,
+//     PID-file name, and data-dir constants.
 package server
diff --git a/internal/cli/initialize/core/claude_check/doc.go b/internal/cli/initialize/core/claude_check/doc.go
index f88128218..b94f2303d 100644
--- a/internal/cli/initialize/core/claude_check/doc.go
+++ b/internal/cli/initialize/core/claude_check/doc.go
@@ -4,17 +4,51 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package claude_check detects the state of Claude Code and
-// the ctx plugin so `ctx init` and `ctx setup claude-code`
-// can print stage-aware setup guidance.
+// Package claude_check detects the install state of Claude
+// Code and the ctx plugin so `ctx init` and `ctx setup
+// claude-code` can print **stage-aware** guidance instead of
+// dumping every possible setup step at once.
 //
-// The detector answers four questions, ordered:
+// The detector answers four questions in order, with each
+// negative answer short-circuiting the cascade:
 //
-//  1. Is the `claude` binary on PATH?
-//  2. Is the ctx plugin registered in
-//     ~/.claude/plugins/installed_plugins.json?
-//  3. Is the plugin enabled globally or locally?
-//  4. (derived) Is the setup ready to use?
+//  1. **Is the `claude` binary on PATH?** If not, suggest
+//     installing Claude Code.
+//  2. **Is the ctx plugin registered** in
+//     `~/.claude/plugins/installed_plugins.json`? If not,
+//     suggest `claude plugin install ...`.
+//  3. **Is the plugin enabled** globally or in the project's
+//     `.claude/settings.local.json`? If not, suggest the
+//     enable command.
+//  4. **Are MCP, hooks, and slash commands ready?** If not,
+//     suggest the missing pieces.
 //
-// Key exports: [State], [Detect].
+// # Public Surface
+//
+//   - **[State]** — the four-bool detection result plus a
+//     [PluginDetails] struct with version, install path,
+//     and registration scope.
+//   - **[Detect]** — runs the cascade, returns a [State].
+//     Pure detection: no installation, no mutation.
+//   - **[Details]** — loads rich metadata about the
+//     installed plugin (version, marketplace pin,
+//     install timestamp). Returns a zero value with
+//     `ok == false` when the plugin is not registered.
+//
+// # Concurrency
+//
+// All functions are read-only against the user's home
+// directory; concurrent calls never race. Results are
+// not cached because users frequently install /
+// uninstall mid-session and stale-cache bugs are worse
+// than the trivial re-read cost.
+//
+// # Related Packages
+//
+//   - [internal/cli/initialize]            — top-level
+//     `ctx init` orchestrator that consumes [Detect].
+//   - [internal/cli/initialize/core/plugin] — the
+//     mutator counterpart that flips the enabled bit.
+//   - [internal/cli/setup]                 — the
+//     `ctx setup claude-code` CLI surface.
 package claude_check
diff --git a/internal/cli/initialize/core/merge/doc.go b/internal/cli/initialize/core/merge/doc.go
index 4e2ffd1c4..f441bf08a 100644
--- a/internal/cli/initialize/core/merge/doc.go
+++ b/internal/cli/initialize/core/merge/doc.go
@@ -1,14 +1,75 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package merge handles create-or-merge file operations during init.
+// Package merge implements the **create-or-merge** file
+// operations that make `ctx init` safely idempotent: each
+// foundation file is either created from a template or, if
+// already present, has only its **ctx-managed marker
+// section** updated — never the user-edited surrounding
+// content.
 //
-// [OrCreate] creates a file from template, or merges the template's
-// marked section into an existing file. [UpdateMarkedSection]
-// replaces content between start/end markers. [SettingsPermissions]
-// merges Claude Code permission settings. [Permissions] deduplicates
-// and merges allow/deny permission lists.
+// The package solves the "I edited my CONSTITUTION.md and
+// re-ran `ctx init` — did I lose my edits?" problem by
+// making "yes, you keep them" the only possible answer.
+//
+// # Public Surface
+//
+//   - **[OrCreate](path, template, vars)** — file does
+//     not exist → write the template (with `vars`
+//     interpolated). File exists → run
+//     [UpdateMarkedSection] on it. Always creates a
+//     timestamped `.bak` before writing. Returns a
+//     report indicating which path was taken.
+//   - **[UpdateMarkedSection](existing, newSection,
+//     start, end)** — finds the `start` and `end` marker
+//     lines in `existing` and replaces only the content
+//     between them. If the markers are missing, the
+//     section is inserted at the bottom of the file with
+//     the markers added so the next run becomes a true
+//     in-place update.
+//   - **[SettingsPermissions](path, allow, deny)** —
+//     specialized merger for Claude Code permission
+//     lists; preserves user-added entries while ensuring
+//     the ctx-required entries are present.
+//   - **[Permissions](existing, additions)** —
+//     deduplicating list union used by the settings
+//     merger and by `_ctx-permission-sanitize`.
+//
+// # Marker Convention
+//
+// ctx-managed sections are bracketed by HTML-comment
+// markers:
+//
+//	
+//	... ctx-managed content ...
+//	
+//
+// The markers are invisible in rendered Markdown but
+// trivially greppable. Constants for the well-known
+// pairs live in [internal/config/marker].
+//
+// # Backup Policy
+//
+// Every write goes through a timestamped backup
+// (`.bak.YYYY-MM-DD-HHMMSS`). Backups accumulate;
+// `ctx prune` cleans them on schedule. The trade-off is
+// disk space for accident recovery, which the user
+// always wants.
+//
+// # Concurrency
+//
+// Filesystem-bound and stateless; serialized through
+// process-level execution.
+//
+// # Related Packages
+//
+//   - [internal/cli/initialize]    — invokes [OrCreate]
+//     for every foundation file.
+//   - [internal/config/marker]     — marker-pair
+//     constants.
+//   - [internal/assets]            — provides the
+//     templates passed to [OrCreate].
 package merge
diff --git a/internal/cli/initialize/core/plugin/doc.go b/internal/cli/initialize/core/plugin/doc.go
index 6576631ef..fc97bdcad 100644
--- a/internal/cli/initialize/core/plugin/doc.go
+++ b/internal/cli/initialize/core/plugin/doc.go
@@ -1,14 +1,59 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package plugin handles Claude Code plugin detection and enablement
-// during initialization.
+// Package plugin handles **Claude Code plugin enablement**
+// during `ctx init` — the read/write side of the same
+// settings files that
+// [internal/cli/initialize/core/claude_check] only reads.
 //
-// [EnableGlobally] registers the ctx plugin in Claude Code's global
-// settings. [Installed] checks if the plugin binary exists.
-// [EnabledGlobally] and [EnabledLocally] check registration status
-// in global and project-level settings respectively.
+// Claude Code keeps two layers of plugin state:
+//
+//   - **Global** — `~/.claude/settings.json`'s
+//     `enabledPlugins` map. Affects every project on the
+//     machine.
+//   - **Local** — `/.claude/settings.local.json`'s
+//     `enabledPlugins` map. Affects only this project.
+//
+// Both can independently mark a plugin as enabled. ctx
+// prefers global enablement so users do not have to
+// re-flip the bit per project, but supports local-only
+// enablement for users who segment configs.
+//
+// # Public Surface
+//
+//   - **[Installed](pluginID)** — true when the plugin
+//     binary is registered in
+//     `~/.claude/plugins/installed_plugins.json`.
+//   - **[EnabledGlobally](pluginID)** — true when the
+//     plugin is enabled in the global settings file.
+//   - **[EnabledLocally](projectRoot, pluginID)** —
+//     true when the plugin is enabled in the project's
+//     local settings file.
+//   - **[EnableGlobally](pluginID)** — atomically merges
+//     the plugin into the global `enabledPlugins` map.
+//     Idempotent. Creates the settings file if missing.
+//
+// # Settings-File Editing Contract
+//
+// All writes are **JSON-merge-aware**: existing keys are
+// preserved, only `enabledPlugins.` is touched.
+// A pre-write backup (`.bak`) is created so a manual
+// rollback is one `mv` away.
+//
+// # Concurrency
+//
+// Filesystem-bound and stateless; serialized through
+// process-level execution.
+//
+// # Related Packages
+//
+//   - [internal/cli/initialize]                — top-level
+//     orchestrator.
+//   - [internal/cli/initialize/core/claude_check] —
+//     read-only detection.
+//   - [internal/config/claude]                 — settings
+//     file paths and JSON key constants.
 package plugin
diff --git a/internal/cli/initialize/core/project/doc.go b/internal/cli/initialize/core/project/doc.go
index ea9ea4ca8..22615fb60 100644
--- a/internal/cli/initialize/core/project/doc.go
+++ b/internal/cli/initialize/core/project/doc.go
@@ -1,13 +1,63 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package project handles project-root directory and file creation
-// during initialization.
+// Package project handles **project-root scaffolding** during
+// `ctx init` — creating the `.context/` directory tree with
+// the right permissions and deploying optional Makefile
+// integration when the host project already uses Make.
 //
-// [CreateDirs] creates the .context/ directory tree with proper
-// permissions. [HandleMakefileCtx] deploys the Makefile.ctx
-// template if it does not already exist.
+// The package is the *filesystem layer* of init; the foundation
+// **content** comes from [internal/cli/initialize/core/merge]
+// and [internal/assets].
+//
+// # Public Surface
+//
+//   - **[CreateDirs](contextDir)** — creates the
+//     `.context/` tree:
+//   - `.context/`          (0o755)
+//   - `.context/archive/`  for archived tasks/decisions
+//   - `.context/state/`    for per-session markers,
+//     events, trace history (mode 0o755 — readable by
+//     hooks)
+//   - `.context/journal/`  for enriched journal entries
+//   - `.context/memory/`   for the Claude-Code memory
+//     mirror
+//   - `.context/steering/` for steering files
+//   - `.context/hooks/`    for project-authored
+//     lifecycle scripts
+//     Idempotent: existing directories are left in place
+//     with their existing permissions.
+//   - **[HandleMakefileCtx](projectRoot)** — when a
+//     `Makefile` already exists at the project root,
+//     deploys `Makefile.ctx` from the embedded template
+//     so users can run `make ctx-status`, `make
+//     ctx-agent`, etc. Skipped when the project has no
+//     Makefile (avoids polluting non-Make projects).
+//
+// # Permissions Rationale
+//
+// The hooks directory needs `0o755` (not `0o700`) because
+// child hook scripts launched by AI tools may inherit
+// reduced privileges; making the directory world-readable
+// avoids "cannot stat" failures across user/agent
+// boundaries. State files are `0o644` for the same
+// reason.
+//
+// # Concurrency
+//
+// Filesystem-bound and stateless. Concurrent invocations
+// against the same root would race on `MkdirAll` writes;
+// in practice ctx is single-process.
+//
+// # Related Packages
+//
+//   - [internal/cli/initialize]              — top-level
+//     orchestrator.
+//   - [internal/cli/initialize/core/merge]   — populates
+//     the directories with template content.
+//   - [internal/config/dir], [internal/config/fs]   —
+//     directory-name and permission constants.
 package project
diff --git a/internal/cli/initialize/doc.go b/internal/cli/initialize/doc.go
index cd10c687b..59528f415 100644
--- a/internal/cli/initialize/doc.go
+++ b/internal/cli/initialize/doc.go
@@ -4,11 +4,88 @@
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package initialize implements the "ctx init" command for initializing a
-// .context/ directory with template files.
+// Package initialize implements **`ctx init`** — the first
+// command a user runs against a project to bootstrap the
+// `.context/` directory, scaffold the foundation files, and
+// optionally wire the Claude Code plugin and other tool
+// integrations.
 //
-// The init command creates the foundation for persistent AI context by
-// generating template files for constitution rules, tasks, decisions,
-// learnings, conventions, and architecture documentation. It also sets
-// up Claude Code integration with hooks and slash commands.
+// `ctx init` is the entry point that turns "a directory" into
+// "a ctx-managed project". Its idempotency is a hard
+// requirement: running it twice in a row must produce no
+// destructive changes — only fresh foundation files where
+// they were missing, and merge-aware updates to settings
+// files that already exist.
+//
+// # What `ctx init` Creates
+//
+// On a clean directory the command produces:
+//
+//   - **`.context/` tree** — the dir itself plus
+//     `archive/`, `state/`, `journal/`, `memory/`,
+//     `steering/`, `hooks/` subdirectories with sane
+//     permissions ([core/project]).
+//   - **Foundation files** — `CONSTITUTION.md`,
+//     `TASKS.md`, `DECISIONS.md`, `LEARNINGS.md`,
+//     `CONVENTIONS.md`, `ARCHITECTURE.md`, `GLOSSARY.md`,
+//     each from a template with the project name
+//     interpolated.
+//   - **Steering scaffold** — four foundation steering
+//     files (`product.md`, `tech.md`, `structure.md`,
+//     `workflow.md`) under `.context/steering/`.
+//   - **`Makefile.ctx`** — optional; deployed when the
+//     project has a `Makefile` so users can `make
+//     ctx-status` etc.
+//   - **Tool wiring** — Claude Code plugin enablement,
+//     Copilot instructions, VS Code tasks, MCP config,
+//     etc., depending on what the host environment has
+//     installed.
+//
+// # Sub-Packages
+//
+//   - **[cmd/root]**         — the cobra command +
+//     flag wiring.
+//   - **[core/project]**     — directory tree and
+//     foundation file creation.
+//   - **[core/plugin]**      — Claude Code plugin
+//     detection and global enablement.
+//   - **[core/claude_check]** — stage-aware detection of
+//     Claude Code state used to print contextual
+//     guidance during init.
+//   - **[core/merge]**       — create-or-merge file
+//     operations with marker-bracketed sections so
+//     re-running init never clobbers user edits.
+//   - **[core/vscode]**      — `.vscode/` workspace
+//     artifacts (tasks.json, mcp.json, extensions.json).
+//
+// # Idempotency Contract
+//
+// Every action performed by init must satisfy:
+//
+//  1. **Existing files are merged, not overwritten** — the
+//     [core/merge] helpers find the marker pair, replace
+//     only the bracketed content, and leave everything
+//     else alone.
+//  2. **Permissions are deduplicated** — Claude Code
+//     `allow`/`deny` lists are merged; existing entries
+//     are preserved.
+//  3. **Templated values are stable** — the project name
+//     interpolation uses `git remote` data when
+//     available so re-running produces byte-identical
+//     output.
+//  4. **No destructive operations without an explicit
+//     `--force`** — `init` does not delete or move user
+//     files.
+//
+// # Related Packages
+//
+//   - [internal/bootstrap]                — wires
+//     `ctx init` into the root cobra tree.
+//   - [internal/assets]                   — the embedded
+//     templates init writes to disk.
+//   - [internal/steering]                 — supplies
+//     [FoundationFiles] for the steering scaffold.
+//   - [internal/cli/setup]                — separate CLI
+//     for re-running just the tool-integration step
+//     after `ctx init`.
 package initialize
diff --git a/internal/cli/journal/cmd/importer/doc.go b/internal/cli/journal/cmd/importer/doc.go
index aaceb4338..bad836142 100644
--- a/internal/cli/journal/cmd/importer/doc.go
+++ b/internal/cli/journal/cmd/importer/doc.go
@@ -1,13 +1,64 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package importer implements the ctx journal import subcommand.
+// Package importer implements **`ctx journal import`** —
+// the subcommand that ingests raw AI session files from
+// `~/.claude/projects//` (and the equivalent paths
+// for other tools) into enriched, git-tracked journal
+// entries under `.context/journal/`.
 //
-// [Cmd] builds the cobra.Command with --all, --regenerate,
-// --dry-run, and --keep-frontmatter flags. [Run] plans the import
-// (which sessions to create, regenerate, or skip), confirms with
-// the user, and executes the plan.
+// # Public Surface
+//
+//   - **[Cmd]** — cobra command with `--all`,
+//     `--regenerate`, `--dry-run`, and
+//     `--keep-frontmatter` flags.
+//
+//   - **[Run]** — three-phase orchestration:
+//
+//     1. **Plan** — diff the source set against the
+//     journal state file ([internal/journal/state])
+//     to produce an [entity.ImportPlan]: which
+//     sources to create, regenerate, or skip.
+//     2. **Confirm** — print the plan and ask for
+//     confirmation (skipped under `--dry-run`).
+//     3. **Execute** — for each action: parse via
+//     [internal/journal/parser], reduce/collapse
+//     /normalize, write the entry, update the
+//     state file. Locked entries
+//     ([internal/cli/journal/core/lock]) are
+//     skipped with a notice.
+//
+// # `--regenerate` Semantics
+//
+// Without `--regenerate`, only sources that have not
+// been imported produce new entries. With
+// `--regenerate`, **every** source is re-imported,
+// preserving any frontmatter the user added by
+// default (`--keep-frontmatter true`). Pass
+// `--keep-frontmatter=false` to discard enrichments
+// — destructive; the importer warns explicitly.
+//
+// # Concurrency
+//
+// Sequential. Concurrent imports against the same
+// journal directory would race on state-file writes;
+// ctx is single-process.
+//
+// # Related Packages
+//
+//   - [internal/journal/parser]               — turns
+//     raw sources into [entity.Session].
+//   - [internal/journal/state]                — the
+//     state file the plan diffs against.
+//   - [internal/cli/journal/core/lock]        — the
+//     locked-entry checks the importer respects.
+//   - [internal/cli/journal/core/{reduce,collapse,
+//     normalize,wrap}]                        —
+//     per-entry transformation passes the importer
+//     runs in order.
+//   - [internal/cli/journal/core/slug]        —
+//     filename slug generation.
 package importer
diff --git a/internal/cli/journal/cmd/obsidian/doc.go b/internal/cli/journal/cmd/obsidian/doc.go
index 1c5e82f86..a210c60ad 100644
--- a/internal/cli/journal/cmd/obsidian/doc.go
+++ b/internal/cli/journal/cmd/obsidian/doc.go
@@ -1,12 +1,52 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package obsidian implements the ctx journal obsidian subcommand.
+// Package obsidian implements **`ctx journal obsidian`** —
+// the subcommand that exports the project's enriched
+// journal entries as a complete **Obsidian vault** (with
+// MOC pages, wikilinks, and graph-friendly frontmatter)
+// for users who consume the journal in Obsidian rather
+// than the zensical site.
 //
-// [Cmd] builds the cobra.Command with --output flag. [Run]
-// delegates to core/obsidian.BuildVault to generate an
-// Obsidian vault from journal entries.
+// # Public Surface
+//
+//   - **[Cmd]** — cobra command with `--output` to
+//     control the destination directory (default
+//     `vault/`).
+//   - **[Run]** — delegates to
+//     [internal/cli/journal/core/obsidian.BuildVault]
+//     which handles the full file generation pipeline
+//     (scan, transform frontmatter, convert links to
+//     `[[wikilinks]]`, build MOC pages, write
+//     `Home.md`).
+//
+// # Why a Separate Vault
+//
+// Obsidian and the zensical site both consume the same
+// raw entries but render them very differently
+// (wikilinks vs markdown links, MOC vs topic index,
+// graph view vs sidebar nav). Producing two output
+// trees from one input set keeps each rendering
+// idiomatic for its environment.
+//
+// # Concurrency
+//
+// Single-process, sequential. `O(N)` over journal
+// entries.
+//
+// # Related Packages
+//
+//   - [internal/cli/journal/core/obsidian] — the
+//     vault-building engine.
+//   - [internal/cli/journal/core/wikilink] — markdown
+//     → wikilink conversion.
+//   - [internal/cli/journal/core/frontmatter] —
+//     Obsidian-flavored frontmatter assembly.
+//   - [internal/cli/journal/core/moc]      — MOC
+//     pages (Obsidian flavor).
+//   - [internal/cli/journal/cmd/site]      — sister
+//     command for the zensical-flavored output.
 package obsidian
diff --git a/internal/cli/journal/cmd/site/doc.go b/internal/cli/journal/cmd/site/doc.go
index a05ad7522..0d00692e7 100644
--- a/internal/cli/journal/cmd/site/doc.go
+++ b/internal/cli/journal/cmd/site/doc.go
@@ -1,13 +1,50 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package site implements the ctx journal site subcommand.
+// Package site implements **`ctx journal site`** — the
+// subcommand that turns the project's enriched journal
+// entries into a browsable static site, optionally
+// invoking the zensical builder to produce the HTML.
 //
-// [Cmd] builds the cobra.Command with --build and --output flags.
-// [Run] generates a static journal site: parses entries, builds
-// month-grouped pages, topic indexes, and a zensical configuration.
-// With --build, it also invokes zensical to produce HTML.
+// # Public Surface
+//
+//   - **[Cmd]** — cobra command with `--build` (also
+//     run zensical) and `--output` (override the
+//     destination directory).
+//   - **[Run]** — orchestrates the full generation:
+//     parse entries (parse), normalize each (normalize),
+//     build month-grouped pages and topic indexes
+//     (section + generate + moc), write the zensical
+//     `README.md` (generate.SiteReadme), and — when
+//     `--build` is set — shell out to `zensical build`.
+//
+// # Output Layout
+//
+//   - `/README.md`        — zensical config
+//   - `/index.md`         — chronological index
+//   - `/topics/index.md`  — topic overview MOC
+//   - `/topics/.md` — per-topic pages
+//   - `///.md` — entries
+//
+// # Concurrency
+//
+// Single-process, sequential. The site build is
+// `O(N)` over journal entries and typically
+// completes in seconds.
+//
+// # Related Packages
+//
+//   - [internal/cli/journal/core/section] — topic
+//     index builders.
+//   - [internal/cli/journal/core/moc]     — Map of
+//     Content pages.
+//   - [internal/cli/journal/core/generate] — top-
+//     level page templates.
+//   - [internal/cli/journal/core/normalize] — runs
+//     per-entry before rendering.
+//   - [internal/cli/serve]                  — the
+//     `ctx serve` command that hosts the built site.
 package site
diff --git a/internal/cli/journal/core/collapse/doc.go b/internal/cli/journal/core/collapse/doc.go
index 529c7a6fd..da20a97e9 100644
--- a/internal/cli/journal/core/collapse/doc.go
+++ b/internal/cli/journal/core/collapse/doc.go
@@ -1,13 +1,61 @@
 //   /    ctx:                         https://ctx.ist
 // ,'`./    do you remember?
-// `.,'\\
+// `.,'\
 //   \    Copyright 2026-present Context contributors.
 //                 SPDX-License-Identifier: Apache-2.0
 
-// Package collapse condenses large tool output sections in journal
-// markdown.
+// Package collapse condenses **large tool-output blocks** in
+// journal markdown — the multi-thousand-line shell pastes
+// and `ls`/`grep` outputs that bloat an entry without
+// adding much signal — into expandable summaries that show
+// the first few lines and offer the rest under a
+// `
` toggle. // -// [ToolOutputs] finds tool output blocks in the content and -// replaces them with collapsed summaries, preserving the first -// few lines as context. +// The package complements [reduce]: reduce strips bona-fide +// noise (system reminders, orphan fences); collapse +// preserves output but **hides** the bulk so reviewers can +// skim and only expand the tool calls they care about. +// +// # Public Surface +// +// - **[ToolOutputs](content, opts)** — finds tool-output +// code blocks larger than a configurable line +// threshold and replaces them with a `
` +// summary block: +// +//
Tool output (NNN lines) +// +// ``` +// ...full output... +// ``` +// +//
+// +// with the first 5 lines shown above the +// collapsed block as anchor context. Threshold and +// preview line count are tunable via [opts]. +// +// # Why Not Just Truncate? +// +// Truncating loses information. The journal entry is a +// **record** — the user may need the full output later +// to reconstruct what happened. Collapsing wins on both +// fronts: the rendered page is short and skimmable, the +// raw markdown still contains every byte of the original +// output. +// +// # Concurrency +// +// Pure data transformation. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/importer] — chief +// consumer; runs [ToolOutputs] before normalize so +// the disk representation is the readable form. +// - [internal/cli/journal/core/reduce] — sister +// pass that strips outright noise. +// - [internal/cli/journal/core/normalize] — runs +// after both reduce and collapse. package collapse diff --git a/internal/cli/journal/core/frontmatter/doc.go b/internal/cli/journal/core/frontmatter/doc.go index bf2536b06..464837d6b 100644 --- a/internal/cli/journal/core/frontmatter/doc.go +++ b/internal/cli/journal/core/frontmatter/doc.go @@ -1,14 +1,60 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package frontmatter handles YAML frontmatter transformation -// for journal entries and Obsidian vault generation. +// Package frontmatter handles the **YAML frontmatter +// transformations** that journal entries undergo as they +// pass through the pipeline: importer → normalizer → +// renderer (site or vault). // -// [Transform] converts raw frontmatter into a normalized format. -// [ExtractStringSlice] safely extracts []string from parsed YAML -// maps. The Obsidian struct provides the vault-specific frontmatter -// schema. +// The package owns the per-renderer adapters that map the +// canonical [entity.JournalFrontmatter] into the slightly +// different shapes each downstream renderer expects. +// +// # Public Surface +// +// - **[Transform](raw)** — converts a raw frontmatter +// map (untyped, just-parsed YAML) into the +// normalized journal frontmatter shape: enforces +// field types, fills in defaults, drops fields the +// schema does not recognize. Used by the importer +// when ingesting hand-edited entries. +// - **[ExtractStringSlice](m, key)** — safely pulls a +// `[]string` from a `map[string]any`, tolerating +// both `[]string` and `[]any` source types (YAML +// decoders produce one or the other depending on +// content). Returns nil when the key is missing. +// - **[Obsidian]** — the Obsidian-vault frontmatter +// struct: subset/extension of the canonical shape +// with additional `aliases:`, `tags:`, and graph +// metadata Obsidian renders. +// +// # Why a Separate Package +// +// Frontmatter handling looks trivial on the surface but +// is one of the most bug-prone surfaces in any markdown +// pipeline because YAML's loose typing produces +// `[]string` in some cases and `[]any` in others for +// "the same" structure. Hoisting the conversions here +// means every renderer benefits from the same +// safe-decode helpers. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/importer] / +// [internal/cli/journal/cmd/site] / +// [internal/cli/journal/cmd/obsidian] — chief +// consumers. +// - [internal/parse] — +// [SplitFrontmatter] returns the raw YAML this +// package transforms. +// - [internal/entity] — the +// canonical [JournalFrontmatter] type. package frontmatter diff --git a/internal/cli/journal/core/generate/doc.go b/internal/cli/journal/core/generate/doc.go index 8e5569605..7e006abf6 100644 --- a/internal/cli/journal/core/generate/doc.go +++ b/internal/cli/journal/core/generate/doc.go @@ -1,13 +1,65 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package generate builds journal site pages from parsed entries. +// Package generate builds the **top-level pages** of the +// journal site from parsed entries — the README zensical +// reads at build time, the chronological index page, and the +// AI-generated summary insertion that decorates pages already +// produced upstream. // -// [SiteReadme] creates the site README with zensical configuration. -// [Index] generates the main index page with month-grouped entries. -// [InjectedSummary] inserts an AI-generated summary into existing -// page content. +// The package is the third leg of the site-building tripod +// alongside [section] (topic indexes) and [moc] (Maps of +// Content). Together they cover everything the journal site +// renders. +// +// # The Surface +// +// - **[SiteReadme](opts)** — produces the +// `site/README.md` zensical reads at build time. +// Embeds the zensical configuration block (theme, +// navigation, search settings) and the site-wide +// description. Idempotent: a call with identical +// `opts` produces byte-identical output. +// - **[Index](entries)** — produces the chronological +// index page: entries grouped by month, newest at +// the top. Output is markdown ready to land at +// `site/index.md` (or `site/journal/index.md`, +// depending on layout). +// - **[InjectedSummary](existing, summary)** — splices +// an AI-generated summary into existing page +// content **at a stable insertion point** (a +// marker comment) so re-running site generation +// does not duplicate the summary or push other +// content around. The marker pattern matches what +// `/ctx-blog` and `/ctx-blog-changelog` skills emit. +// +// # Idempotency Contract +// +// All three generators are idempotent under the same +// inputs. This is what makes `ctx journal site` safe to +// re-run during a CI build: identical entries → identical +// output → no spurious git diffs. +// +// # Concurrency +// +// All functions are pure data transformations. Concurrent +// callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/site] — invokes +// [SiteReadme] / [Index] when assembling the +// zensical site. +// - [internal/cli/journal/core/section] / +// [internal/cli/journal/core/moc] — produce the +// topic and MOC pages that complement the +// top-level pages this package generates. +// - [internal/cli/journal/core/normalize] — runs +// before generation to sanitize each entry's +// markdown. +// - [internal/entity] — [Entry], +// [GroupedIndex] domain types. package generate diff --git a/internal/cli/journal/core/index/doc.go b/internal/cli/journal/core/index/doc.go index 5f2ea0a47..5d2b9a61a 100644 --- a/internal/cli/journal/core/index/doc.go +++ b/internal/cli/journal/core/index/doc.go @@ -1,13 +1,60 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package index provides session ID indexing for journal files. +// Package index builds the **session-ID-to-filename map** that +// every other journal subcommand uses to find a journal entry +// when given only a session ID. // -// [Session] builds a map from session IDs to filenames by -// scanning journal markdown frontmatter. [ExtractSessionID] pulls -// the session_id from a single file. [LookupSessionFile] resolves -// a session ID to its filename. +// The map matters because users (and skills) routinely refer +// to a session by its ID — a short alphanumeric tag like +// `abc123` — but on disk the journal entry filename is keyed +// by date and slug. The mapping has to be built on demand +// from the entry frontmatter; it cannot be derived from the +// filename alone. +// +// # The Surface +// +// - **[Session](dir)** — walks the journal directory, +// reads the YAML frontmatter of every `*.md` entry, +// extracts each `session_id`, and returns a +// `map[sessionID]filename`. Entries without a +// `session_id` field are silently skipped. +// - **[ExtractSessionID](path)** — reads one file and +// returns its `session_id` (empty string if not +// present, error if the file cannot be read or the +// frontmatter cannot be parsed). +// - **[LookupSessionFile](dir, sessionID)** — convenience +// wrapper: calls [Session] and returns the matching +// filename, or empty string if not found. +// +// # Performance +// +// [Session] reads the frontmatter only — not the full +// body — so the cost scales with `O(N)` files but with +// a small per-file constant. For a journal with a few +// hundred entries, the build typically completes well +// under 100 ms. Callers that need many lookups in a +// row should call [Session] once and cache the map +// rather than calling [LookupSessionFile] repeatedly. +// +// # Concurrency +// +// All functions are stateless. Concurrent callers +// against the same directory each pay the full read +// cost; no module-level cache is implemented because +// the journal directory mutates between sessions and +// stale-cache bugs are worse than the perf cost. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/source] — invokes +// [Session] when resolving `--show `. +// - [internal/cli/journal/cmd/importer] — uses +// the inverse mapping (session ID → exists?) to +// skip already-imported sessions. +// - [internal/parse] — supplies +// [SplitFrontmatter] used internally. package index diff --git a/internal/cli/journal/core/lock/doc.go b/internal/cli/journal/core/lock/doc.go index 72cfc6f3d..94f4410d2 100644 --- a/internal/cli/journal/core/lock/doc.go +++ b/internal/cli/journal/core/lock/doc.go @@ -1,13 +1,72 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package lock manages journal entry lock state. +// Package lock manages **journal entry lock state** — the +// `locked: true` frontmatter flag that protects an enriched +// journal entry from being clobbered by a re-import of its +// raw source session. // -// [MatchJournalFiles] finds journal files matching a pattern. -// [MultipartBase] extracts the base name from multipart filenames. -// [UpdateFrontmatter] sets or clears the locked: field in a -// file's YAML frontmatter. +// Locking is the journal pipeline's "do not touch" affordance. +// Without it, every `ctx journal import --regenerate` would +// risk overwriting the careful edits an author made to an +// enriched entry. With it, the importer sees `locked: true`, +// skips that file, and reports it in the import summary. +// +// # The Surface +// +// - **[MatchJournalFiles](dir, pattern)** — finds journal +// files matching a CLI pattern (slug, date, ID, glob). +// Used by `ctx journal lock ` and `ctx journal +// unlock ` to expand a pattern to a concrete +// list of files. Pattern semantics match what the +// user-facing CLI documents. +// - **[MultipartBase](filename)** — extracts the base +// name from a multipart filename (e.g. +// `2026-04-12-foo--part2.md` → `2026-04-12-foo`). The +// lock state for a multipart entry lives on the **base +// part**, and other parts inherit it. +// - **[UpdateFrontmatter](path, lock)** — atomic update +// of the `locked:` field in a file's YAML frontmatter. +// Adds the field if missing; removes it when `lock` is +// false (rather than writing `locked: false`, which +// would still bypass the importer's omit-default +// check). +// +// # State File Sync +// +// The lock state can also be read from +// `.context/journal/.state.json` (per +// [internal/journal/state]). Frontmatter is the source of +// truth; the state file is a denormalized index for fast +// queries from `ctx journal sync` and the importer. The +// `ctx journal sync` command (in +// [internal/cli/journal/cmd/sync]) reconciles drift in +// either direction. +// +// # Concurrency +// +// All operations are file-local and hold the file open +// only for the duration of the read+write. Concurrent +// invocations against different files never race; +// concurrent updates to the same file would race on the +// final write (no per-file locking is implemented — the +// CLI is single-process anyway). +// +// # Related Packages +// +// - [internal/cli/journal/cmd/lock] / +// [internal/cli/journal/cmd/unlock] — the user-facing +// CLI surface. +// - [internal/cli/journal/cmd/sync] — frontmatter ↔ +// state-file reconciliation. +// - [internal/journal/state] — the denormalized +// state file the importer consults at scan time. +// - [internal/cli/journal/cmd/importer] — the importer +// that respects `locked: true` and reports skipped +// files. +// - [internal/parse] — [SplitFrontmatter] +// used by [UpdateFrontmatter]. package lock diff --git a/internal/cli/journal/core/moc/doc.go b/internal/cli/journal/core/moc/doc.go index eabea79ca..1fb270cf5 100644 --- a/internal/cli/journal/core/moc/doc.go +++ b/internal/cli/journal/core/moc/doc.go @@ -1,14 +1,71 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package moc generates Maps of Content for journal sites and -// Obsidian vaults. +// Package moc generates **Maps of Content** — the navigational +// index pages that sit at the top of both the journal site and +// the Obsidian vault and tell a human "here are the high-level +// topics, here are the key files, here are the recent entries +// that matter most". // -// [Home] generates the main topic/key-file MOC page for the site. -// [ObsidianTopics] generates the Obsidian topics index page. -// [GenerateObsidianTopicPage] generates a single topic page with -// wikilinks to matching entries. +// "MOC" is borrowed from the personal-knowledge-management +// world (Obsidian / Linking Your Thinking) where it names the +// curated dashboard page that aggregates by topic and key +// entity rather than by chronology. +// +// # The Surface +// +// - **[Home](entries, opts)** — generates the **site +// homepage MOC**: top topics, key files, recent +// entries, all in a single page. Output is markdown +// ready to land at `site/index.md`. +// - **[ObsidianTopics](entries)** — generates the +// Obsidian-vault topics index using `[[wikilink]]` +// syntax. Lives at `vault/MOC.md`. +// - **[GenerateObsidianTopicPage](topic, entries)** — +// generates a per-topic page in Obsidian format with +// wikilinks back to each matching entry. Lives at +// `vault/topics/.md`. +// +// # Site MOC vs Obsidian MOC +// +// The two flavors share the *aggregation logic* (topic +// counts, key-file detection, recency ranking) but +// diverge in **link syntax**: +// +// - The site uses standard `[text](url.md)` markdown +// links so zensical can resolve them through its +// navigation graph. +// - Obsidian uses `[[wikilinks]]` so its native graph +// view picks them up. +// +// Each helper assembles the link in the right dialect; +// the aggregation results are reused. +// +// # Inputs +// +// All MOC generators take a slice of [entity.Entry] and +// optionally a [TopicIndex] (built by +// [internal/cli/journal/core/section.BuildTopicIndex]). +// The MOC is a *projection* of the entry set, not a +// transformation: original entries are unchanged. +// +// # Concurrency +// +// All functions are pure data transformations over +// the entry slice. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/site] — invokes +// [Home] when building the zensical site. +// - [internal/cli/journal/cmd/obsidian] — invokes +// [ObsidianTopics] / [GenerateObsidianTopicPage] +// when exporting the vault. +// - [internal/cli/journal/core/section] — produces +// the [TopicIndex] this package consumes. +// - [internal/entity] — [Entry], +// [TopicData], [KeyFileData] domain types. package moc diff --git a/internal/cli/journal/core/normalize/doc.go b/internal/cli/journal/core/normalize/doc.go index f5d02c992..d92e50f57 100644 --- a/internal/cli/journal/core/normalize/doc.go +++ b/internal/cli/journal/core/normalize/doc.go @@ -1,14 +1,76 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package normalize sanitizes journal markdown for site rendering. +// Package normalize sanitizes journal markdown for two +// downstream renderers: the **zensical site builder** and the +// **Obsidian vault exporter**. Raw enriched journal entries +// often carry constructs that one or both renderers cannot +// handle (or render confusingly): unbalanced code fences, H1 +// headings that collide with the page title, raw HTML, etc. +// This package smoothes those out without losing meaning. // -// [MatchTurnHeader] parses conversation turn headers. [FindTurnBoundary] -// locates turn boundaries in content. [TrimBlankLines] removes -// leading and trailing blank lines from a slice. The main Content -// function (not exported here) handles fence stripping, heading -// demotion, and HTML escaping. +// # The Transformations +// +// The main `Content(text, opts)` entry point performs, in +// order: +// +// 1. **Fence stripping at boundaries** — orphan opening or +// closing fences left over from incomplete code blocks +// are removed so the renderer does not enter "code +// mode" for the rest of the document. +// 2. **Heading demotion** — every H1 in the body is +// demoted to H2 so it does not collide with the +// frontmatter-derived page title rendered by both +// zensical and Obsidian. +// 3. **HTML escaping** — bare `` patterns that are +// not legitimate HTML are escaped so they do not get +// swallowed silently. +// 4. **Turn-boundary normalization** — turn headers like +// `## [12:34:56] User:` are recognized and given a +// consistent shape via [MatchTurnHeader] / +// [FindTurnBoundary] so the per-turn navigator on the +// site can find them. +// 5. **Trim** — leading and trailing blank-line runs are +// reduced to a single blank line via [TrimBlankLines]. +// +// # The Public Helpers +// +// - **[MatchTurnHeader](line)** — returns true plus the +// parsed turn role + timestamp when the line matches +// the canonical turn-header shape. +// - **[FindTurnBoundary](lines, start)** — locates the +// index of the next turn boundary at or after `start`, +// used for slicing out a specific turn. +// - **[TrimBlankLines](lines)** — strips leading and +// trailing blank entries from a `[]string`. +// +// # Idempotency +// +// Every transformation is **idempotent**: running +// `Content` twice in a row produces no further changes. +// This is what makes the package safe to call from both +// the import pipeline (writes the normalized form to disk) +// and the renderers (re-normalize what they read). +// +// # Concurrency +// +// All exported functions are pure data transformations +// over `string` / `[]string`. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/site] — zensical +// site builder; calls into [Content] before +// templating. +// - [internal/cli/journal/cmd/obsidian] — Obsidian +// vault exporter; same. +// - [internal/cli/journal/core/section] — section +// index builder downstream of normalization. +// - [internal/wrap] — re-wraps +// long lines after normalization for the +// `ctx fmt` flow. package normalize diff --git a/internal/cli/journal/core/obsidian/doc.go b/internal/cli/journal/core/obsidian/doc.go index 2061432d8..f92bb2ab7 100644 --- a/internal/cli/journal/core/obsidian/doc.go +++ b/internal/cli/journal/core/obsidian/doc.go @@ -1,12 +1,51 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package obsidian builds an Obsidian vault from journal entries. +// Package obsidian builds an **Obsidian vault** from the +// project's enriched journal entries — the engine behind +// the user-facing `ctx journal obsidian` command. // -// [BuildVault] handles the full file generation pipeline: -// scan entries, create directories, transform frontmatter, -// convert links, build MOC pages, and write Home.md. +// The vault is a complete Obsidian-friendly directory +// tree: per-entry notes with vault-specific frontmatter, +// `[[wikilinks]]` instead of markdown links, MOC pages +// for navigation, and a `Home.md` landing page that +// surfaces recent entries and top topics. +// +// # Public Surface +// +// - **[BuildVault](journalDir, vaultDir, opts)** — +// end-to-end pipeline: scan entries (parse), +// create directory structure, transform +// frontmatter (frontmatter), convert links +// (wikilink), build MOC pages (moc), write +// `Home.md`. Idempotent: re-running with the +// same inputs produces byte-identical output. +// +// # Layout Produced +// +// - `/Home.md` — landing MOC +// - `/MOC.md` — topics overview +// - `/topics/.md` — per-topic pages +// - `///.md` — entries +// +// # Concurrency +// +// Single-process, sequential. `O(N)` over journal +// entries. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/obsidian] — the CLI +// surface. +// - [internal/cli/journal/core/parse] — entry +// scanner. +// - [internal/cli/journal/core/wikilink] — link +// conversion. +// - [internal/cli/journal/core/frontmatter] — +// vault frontmatter shape. +// - [internal/cli/journal/core/moc] — MOC +// pages. package obsidian diff --git a/internal/cli/journal/core/parse/doc.go b/internal/cli/journal/core/parse/doc.go index 8c1a20d32..90c5f874d 100644 --- a/internal/cli/journal/core/parse/doc.go +++ b/internal/cli/journal/core/parse/doc.go @@ -1,12 +1,53 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package parse scans journal directories and parses entries. +// Package parse scans a journal directory and turns each +// markdown file into a typed [entity.JournalEntry] by +// reading and validating its YAML frontmatter — the +// upstream of every site-builder, Obsidian-exporter, MOC +// generator, and lock-state syncer in the journal pipeline. // -// [ScanJournalEntries] reads all markdown files in the journal -// directory, parsing frontmatter into JournalEntry structs. -// [JournalEntry] parses a single file by path. +// # Public Surface +// +// - **[ScanJournalEntries](dir)** — walks `dir`, +// parses every `*.md` file, and returns +// `[]*JournalEntry` plus an error slice for files +// that failed to parse. The walk continues past +// bad files so a single malformed entry does not +// abort the whole scan. +// - **[JournalEntry](path)** — parses one file by +// path. Used by single-entry callers (the +// `--show ` lookup, the lock CLI, and the +// drift checker). +// +// # Frontmatter Schema +// +// Each entry's frontmatter must satisfy the journal +// schema documented in +// `internal/entity/journal.go.JournalFrontmatter`: +// `id`, `date`, `title`, `slug`, optional `topics`, +// optional `locked`, optional `enriched`, optional +// `part` / `parts` for multipart entries. Unknown +// fields are preserved (round-trip safe). +// +// # Concurrency +// +// Stateless and filesystem-bound. Concurrent calls +// against the same directory each pay the full read +// cost. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/site] / +// [internal/cli/journal/cmd/obsidian] — chief +// consumers of [ScanJournalEntries]. +// - [internal/cli/journal/core/lock] — uses +// [JournalEntry] for single-file lock checks. +// - [internal/cli/system/cmd/check_journal] — uses +// the count to nudge about pending imports. +// - [internal/parse] — supplies +// [SplitFrontmatter] used internally. package parse diff --git a/internal/cli/journal/core/reduce/doc.go b/internal/cli/journal/core/reduce/doc.go index 07b42c8df..a596016e5 100644 --- a/internal/cli/journal/core/reduce/doc.go +++ b/internal/cli/journal/core/reduce/doc.go @@ -1,13 +1,53 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package reduce strips formatting artifacts from session JSONL -// for clean journal markdown. +// Package reduce strips noise out of raw AI session JSONL so +// the journal markdown a user reads is the conversation, not +// the wire format. The package is the **noise-removal pass** +// the importer runs before the entry hits disk. // -// [StripFences] removes code fence markers. [StripSystemReminders] -// removes system-reminder XML tags. [CleanToolOutputJSON] simplifies -// tool output JSON for readability. +// # What Gets Reduced +// +// - **[StripFences](text)** — removes orphan code-fence +// markers left by the model when it abandoned a code +// block mid-response. Without this the renderer +// enters "code mode" for the rest of the document. +// - **[StripSystemReminders](text)** — Claude Code +// injects `` tags into tool results +// to nudge the model. The user did not write them +// and they should not appear in the journal. (See +// also [internal/parse.StripSystemReminders] which +// is the shared underlying helper.) +// - **[CleanToolOutputJSON](text)** — collapses raw +// JSON tool output into a more readable summary +// (top-level keys + first values + truncation +// notice) so a 5,000-line `ls` does not balloon +// the journal entry. The original is preserved +// under a `
` toggle for archival. +// +// # Idempotency +// +// All three functions are idempotent: running them +// twice in a row on the same input produces the same +// output as running them once. This is what makes +// them safe to run again during re-import. +// +// # Concurrency +// +// All functions are pure data transformations. +// Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/importer] — chief +// consumer; runs reduce before normalize. +// - [internal/cli/journal/core/normalize] — runs +// after reduce; assumes its input is already +// noise-stripped. +// - [internal/cli/journal/core/collapse] — sister +// pass that condenses *large* tool-output blocks +// into expandable summaries. package reduce diff --git a/internal/cli/journal/core/schema/doc.go b/internal/cli/journal/core/schema/doc.go index 60bb65b03..7bc2e8326 100644 --- a/internal/cli/journal/core/schema/doc.go +++ b/internal/cli/journal/core/schema/doc.go @@ -1,16 +1,56 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package schema provides core logic for journal schema -// validation. -// -// It resolves which directories to scan based on CLI flags, -// runs validation across all JSONL files in those directories, -// and manages the drift report lifecycle in .context/reports/. -// Used by both the standalone check command and the import -// integration, which validates source files after importing -// sessions and prints a summary if drift is found. +// Package schema is the **CLI-side wrapper** around the +// underlying [internal/journal/schema] validator: it +// resolves which directories to scan based on user +// flags, runs validation across every JSONL session file +// it finds, and persists the resulting drift report +// under `.context/reports/`. +// +// Used by two surfaces: +// +// 1. **`ctx journal schema`** — the standalone +// drift-check command users run when investigating +// parser issues. +// 2. **`ctx journal import`** — runs validation +// **after** an import as a post-flight check; +// prints a summary if drift is found so users know +// the next Claude Code release may need a parser +// update. +// +// # Public Surface +// +// - **[Run](opts)** — orchestration: resolve scan +// paths from flags, dispatch validation per file +// via [internal/journal/schema], aggregate the +// [Report], optionally write it to +// `.context/reports/schema-drift-.md`. +// +// # The Drift Report +// +// Drift is **informational, not fatal** — a session +// with unknown fields still imports cleanly. The +// report exists so maintainers can update +// [internal/journal/parser]'s schema declarations +// when a new Claude Code release adds fields. See +// the [internal/journal/schema] doc.go for the +// upstream semantics. +// +// # Concurrency +// +// Sequential. The validation itself is fast (a few +// milliseconds per JSONL file). +// +// # Related Packages +// +// - [internal/journal/schema] — the +// validator engine. +// - [internal/cli/journal/cmd/schema] — the +// standalone `ctx journal schema` CLI surface. +// - [internal/cli/journal/cmd/importer] — runs +// this package after every import. package schema diff --git a/internal/cli/journal/core/section/doc.go b/internal/cli/journal/core/section/doc.go index 75e8f71d0..00a6f6a4a 100644 --- a/internal/cli/journal/core/section/doc.go +++ b/internal/cli/journal/core/section/doc.go @@ -1,15 +1,64 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package section builds topic index pages and section-based site -// output for the journal. -// -// [BuildTopicIndex] aggregates entries by topic with popularity -// thresholds. [GenerateTopicsIndex] renders the topics index page. -// [GenerateTopicPage] renders a single topic's entry list. -// [WriteFormatted] and [WriteMonths] render section content into -// string builders. +// Package section builds **topic-based index pages** for the +// journal site: the page that lists every entry tagged with +// `#auth`, the page that lists every entry tagged with +// `#hooks`, and so on. It also assembles the section content +// (collated month/topic groupings) the site renderer drops +// into the navigation tree. +// +// The package is one of three site-rendering helpers — the +// other two are [moc] (Maps of Content for both the site and +// Obsidian) and [generate] (top-level page templates). +// +// # The Surface +// +// - **[BuildTopicIndex](entries, threshold)** — buckets +// entries by topic (frontmatter `topics:` list). +// Topics that appear in fewer than `threshold` entries +// are folded into a tail "other" bucket so the index +// stays readable as the journal grows. Returns a +// [TopicIndex] keyed by canonical topic slug. +// - **[GenerateTopicsIndex](idx)** — renders the +// topics-overview page: every topic name + entry +// count, sorted by popularity descending. Output is +// a `string` ready to be written to +// `site/topics/index.md`. +// - **[GenerateTopicPage](topic, entries)** — renders a +// single topic's entry list (date + title + slug +// link). Used per-topic by the site builder to +// produce `site/topics/.md`. +// - **[WriteFormatted](sb, entries)** — appends a +// formatted entry list into a `*strings.Builder`. +// - **[WriteMonths](sb, entries)** — appends entries +// grouped by year-month with month sub-headings. +// +// # Popularity Threshold +// +// The threshold is configurable via the site builder +// invocation; the default is "show topics with 3+ +// entries individually, fold the rest into 'other'". +// This is a tunable balance: too low and the topics +// page is dominated by one-off tags; too high and +// long-tail tags disappear entirely. +// +// # Concurrency +// +// All functions are pure data transformations over +// `[]Entry` / topic maps. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/site] — the zensical +// site builder that consumes this package. +// - [internal/cli/journal/core/moc] — Map-of-Content +// pages (homepage MOC, Obsidian topics MOC). +// - [internal/cli/journal/core/generate] — top-level +// site page templates (README, index page). +// - [internal/entity] — [Entry], +// [TopicData], [GroupedIndex] domain types. package section diff --git a/internal/cli/journal/core/slug/doc.go b/internal/cli/journal/core/slug/doc.go index f8c4856d3..72c725c41 100644 --- a/internal/cli/journal/core/slug/doc.go +++ b/internal/cli/journal/core/slug/doc.go @@ -1,13 +1,58 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package slug generates URL-safe identifiers from session titles. +// Package slug generates **URL-safe, filesystem-safe +// identifiers** from session titles and handles the +// deduplication logic that keeps two sessions with the +// same title from colliding on disk. // -// [FromTitle] converts a title to a lowercase hyphenated slug. -// [CleanTitle] removes non-alphanumeric characters. [ForTitle] -// generates both a slug and cleaned title for a session, handling -// deduplication against existing slugs. +// Slugs are how journal entries are addressed throughout +// ctx: filenames are `YYYY-MM-DD-.md`, links use +// the slug as the path component, and `ctx journal source +// --show ` looks up by slug. +// +// # Public Surface +// +// - **[FromTitle](title)** — converts a title to a +// lowercase, hyphenated, alphanumeric slug. +// Strips punctuation, collapses runs of separators, +// trims leading/trailing hyphens. Idempotent. +// - **[CleanTitle](title)** — strips non-alphanumeric +// characters from a display title (kept for the +// filename's human-readable suffix when one is +// wanted in addition to the slug). +// - **[ForTitle](title, existing)** — the dedup-aware +// wrapper: produces both a slug and a cleaned +// title, appending `-2`, `-3`, etc. when the +// base slug already exists in `existing`. Used by +// the importer when two sessions share a topic +// summary. +// +// # Stability Contract +// +// The slug for a given (title, dedup-context) pair is +// **deterministic**: re-running the importer against +// the same source produces the same slug. This is +// what makes the importer idempotent and what lets +// `git diff` show a meaningful patch when an entry +// is re-enriched. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/importer] — chief +// consumer; calls [ForTitle] when materializing +// a session as a journal entry. +// - [internal/cli/journal/core/parse] — reads +// slugs out of frontmatter when reconciling +// state. +// - [internal/cli/journal/cmd/source] — looks +// up entries by slug. package slug diff --git a/internal/cli/journal/core/source/doc.go b/internal/cli/journal/core/source/doc.go index 82a4dae24..1a5927009 100644 --- a/internal/cli/journal/core/source/doc.go +++ b/internal/cli/journal/core/source/doc.go @@ -1,13 +1,51 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package source contains helpers for rendering session source -// content to journal markdown. +// Package source contains the **rendering helpers** that +// turn a parsed AI session ([entity.Session]) into the +// markdown that `ctx journal source --show ` and +// `ctx journal import` write to disk. // -// Subpackages: format (part navigation, duration, tool use -// formatting), frontmatter (heading resolution, YAML field -// writing). +// The package is split into two focused subpackages: +// +// - **[format]** — small format primitives: +// part-navigation links for multipart sessions +// ([PartNavigation]), [Duration] for human-readable +// time spans, and [ToolUse] for one-line tool-call +// summaries. +// - **[frontmatter]** — YAML frontmatter assembly: +// heading resolution from session content, field +// writing, and ordering so re-import produces +// byte-identical output. +// +// The top-level `source.go` here defines the [Opts] +// flag-bag the `ctx journal source` subcommand fills in +// (`--show`, `--latest`, `--limit`, `--full`, `--project`, +// `--since`, etc.) and used by callers that need to ask +// "which session(s) does the user mean?". +// +// # Public Surface +// +// - **[Opts]** — flag-bag for source +// selection. +// - **[format]** — see subpackage docs. +// - **[frontmatter]** — see subpackage docs. +// +// # Concurrency +// +// All helpers are pure data transformations over +// [entity.Session]. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/source] — the +// `ctx journal source` CLI surface. +// - [internal/cli/journal/cmd/importer] — the +// importer that consumes the same renderer to +// write enriched journal entries to disk. +// - [internal/journal/parser] — produces +// the [entity.Session] this package renders. package source diff --git a/internal/cli/journal/core/source/format/doc.go b/internal/cli/journal/core/source/format/doc.go index 2055fdb07..93eb70724 100644 --- a/internal/cli/journal/core/source/format/doc.go +++ b/internal/cli/journal/core/source/format/doc.go @@ -1,12 +1,58 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package format provides formatting for session source content. +// Package format provides the **fine-grained formatting +// primitives** used to render a parsed AI session into +// human-readable markdown — part navigation, duration +// strings, tool-call summaries, file references. // -// [PartNavigation] generates Previous/Next links for multipart -// sessions. [Duration] formats a time.Duration for display. -// [ToolUse] formats a tool invocation as a readable string. +// The package sits one level below +// [internal/cli/journal/core/source]: this package answers +// "how do I render *this fragment*", `source` answers +// "how do I render the whole session". +// +// # Public Surface +// +// - **[PartNavigation](currentPart, totalParts, slug)** +// — generates Previous / Next links for multipart +// sessions (sessions long enough to be split across +// several files). Returns markdown ready to splice +// into the per-part frontmatter. +// - **[Duration](d)** — formats a `time.Duration` as +// "23m 14s" / "2h 5m" / "3 days" depending on +// magnitude. Empty when zero. +// - **[ToolUse](tu)** — one-line summary of a tool +// call: tool name, key argument(s), success/error. +// Used in the per-turn header and in the +// compressed view. +// - **[ToolResult](tr)** — one-line summary of the +// tool's output, truncated to a configurable +// preview length. +// +// # Local Time vs UTC +// +// Date headers use **local time** so the user sees +// timestamps in their own timezone. UTC is reserved for +// stored timestamps (frontmatter fields) where +// timezone-stable comparisons matter. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/journal/core/source] — parent +// package that orchestrates the per-fragment +// renderers exposed here. +// - [internal/cli/journal/core/source/frontmatter] — +// sister sub-package for YAML frontmatter +// assembly. +// - [internal/format] — general +// time/number formatters this package builds on. +// - [internal/entity] — [Session], +// [Message], [ToolUse], [ToolResult] domain types. package format diff --git a/internal/cli/journal/core/turn/doc.go b/internal/cli/journal/core/turn/doc.go index 35a253545..d2f482535 100644 --- a/internal/cli/journal/core/turn/doc.go +++ b/internal/cli/journal/core/turn/doc.go @@ -1,12 +1,45 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package turn handles conversation turn parsing and merging. +// Package turn handles **conversation turn parsing and +// merging** in journal markdown — finding where each +// user/assistant turn begins and ends, and merging +// adjacent turns from the same role into one block when +// the original transcript had artificial splits. // -// [Body] extracts the body text of a conversation turn starting -// from a given line index. [MergeConsecutive] combines adjacent -// turns from the same role into a single block. +// The package operates on already-normalized journal +// content (after [normalize] and friends have run); it +// is the per-turn slicer the renderers and the +// per-turn-anchor navigator both rely on. +// +// # Public Surface +// +// - **[Body](lines, startIdx)** — extracts the body +// text of a single turn starting at `startIdx`. +// Reads forward to the next turn header (or EOF) +// and returns the in-between lines. +// - **[MergeConsecutive](lines)** — collapses +// adjacent turns from the same role into a +// single combined block. Useful when Claude +// Code split a long assistant response across +// two consecutive `assistant:` turns due to +// internal pacing. +// +// # Concurrency +// +// Pure data transformation. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/core/normalize] — runs +// before this package; defines the canonical +// turn-header shape that [Body] looks for. +// - [internal/cli/journal/cmd/site] / +// [internal/cli/journal/cmd/obsidian] — +// consumers that need per-turn slicing for +// anchor navigation and per-turn rendering. package turn diff --git a/internal/cli/journal/core/wikilink/doc.go b/internal/cli/journal/core/wikilink/doc.go index 97f9300f8..26823ff58 100644 --- a/internal/cli/journal/core/wikilink/doc.go +++ b/internal/cli/journal/core/wikilink/doc.go @@ -4,10 +4,53 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package wikilink converts standard Markdown links to Obsidian -// wikilink format for vault generation. +// Package wikilink converts standard Markdown links to +// **Obsidian-style `[[wikilinks]]`** during vault export so +// Obsidian's graph view, backlinks, and unlinked-mentions +// features pick up the journal's cross-references natively. // -// [ConvertMarkdownLinks] rewrites [text](url) links to [[target|text]] -// syntax. [Format] builds a single wikilink string. [FormatEntry] -// builds a wikilink for a journal entry using its filename and title. +// The package is one of the per-renderer adapters in the +// site/vault pipeline; the site renderer keeps standard +// `[text](url.md)` markdown, the vault renderer routes +// links through here. +// +// # Public Surface +// +// - **[ConvertMarkdownLinks](text)** — rewrites every +// `[text](url.md)` in `text` to `[[target|text]]` +// (Obsidian's display-text wikilink form). +// Preserves URLs that are not journal entries (raw +// `https://...` links, anchor-only refs). +// - **[Format](target, display)** — builds a single +// wikilink string. `display` is optional; pass +// empty to get `[[target]]`. +// - **[FormatEntry](entry)** — convenience that +// produces the canonical wikilink for a journal +// entry using its slug as target and its title +// as display text. +// +// # The "Why Obsidian Form" Question +// +// Obsidian's wikilinks resolve **by note name**, not by +// path. A vault expects `[[my-note]]` regardless of where +// `my-note.md` lives in the folder hierarchy. Standard +// markdown links break the moment the vault is +// reorganized; wikilinks survive. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/obsidian] — chief +// consumer; calls [ConvertMarkdownLinks] on every +// entry body. +// - [internal/cli/journal/core/moc] — uses +// [Format] / [FormatEntry] when emitting the MOC +// pages. +// - [internal/cli/journal/core/frontmatter] — the +// vault's per-entry frontmatter sister +// transformation. package wikilink diff --git a/internal/cli/learning/cmd/reindex/cmd.go b/internal/cli/learning/cmd/reindex/cmd.go index 07c686d4a..43a8ac735 100644 --- a/internal/cli/learning/cmd/reindex/cmd.go +++ b/internal/cli/learning/cmd/reindex/cmd.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package reindex provides the "ctx learning reindex" subcommand. package reindex import ( diff --git a/internal/cli/learning/doc.go b/internal/cli/learning/doc.go index 701058698..8eddd028a 100644 --- a/internal/cli/learning/doc.go +++ b/internal/cli/learning/doc.go @@ -4,9 +4,51 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package learning implements the "ctx learning" command for managing. +// Package learning implements the **`ctx learning`** +// command group for managing `LEARNINGS.md` — currently +// just the `reindex` subcommand that regenerates the +// quick-reference index table at the top of the file. // -// Key exports: [Cmd]. -// See source files for implementation details. -// Part of the cli subsystem. +// `LEARNINGS.md` is the project's running record of +// gotchas, "gotcha" notes, and hard-won lessons. The +// quick-reference index lets `ctx agent` inject a +// token-cheap **table of contents** instead of the full +// prose, so the AI can scan available learnings and +// request the ones it needs by ID. +// +// # Subcommands +// +// - **`ctx learning reindex`** — rebuilds the index +// table by parsing every entry header in +// `LEARNINGS.md` and emitting a fresh +// chronologically-sorted table between the +// `` / `` +// markers. Idempotent. See +// [internal/cli/learning/cmd/reindex] for the +// implementation. +// +// # Adding Entries +// +// New learnings are added through `ctx add learning` +// (the `add` family lives in [internal/cli/add]); this +// package currently only owns the index-maintenance +// side. The `_ctx-learning-add` skill wraps the add +// flow with a guided prompt. +// +// # Concurrency +// +// Stateless. The CLI command runs once and exits. +// +// # Related Packages +// +// - [internal/cli/add] — the +// `ctx add learning` entry path. +// - [internal/cli/decision] — sister +// command for `DECISIONS.md`. +// - [internal/cli/reindex] — the +// `ctx reindex` convenience that runs both +// learning and decision reindex in one call. +// - [internal/index] — the +// index-table generator and parser this package +// ultimately drives. package learning diff --git a/internal/cli/mcp/mcp.go b/internal/cli/mcp/mcp.go index 39afdcd5d..21d0b20ec 100644 --- a/internal/cli/mcp/mcp.go +++ b/internal/cli/mcp/mcp.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package mcp provides the CLI command for running the MCP server. package mcp import ( diff --git a/internal/cli/memory/cmd/status/cmd.go b/internal/cli/memory/cmd/status/cmd.go index 06d246d8c..671790dee 100644 --- a/internal/cli/memory/cmd/status/cmd.go +++ b/internal/cli/memory/cmd/status/cmd.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package status implements the "ctx memory status" subcommand. package status import ( diff --git a/internal/cli/memory/cmd/sync/cmd.go b/internal/cli/memory/cmd/sync/cmd.go index 59c1c458b..1ad142405 100644 --- a/internal/cli/memory/cmd/sync/cmd.go +++ b/internal/cli/memory/cmd/sync/cmd.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package sync implements the "ctx memory sync" subcommand. package sync import ( diff --git a/internal/cli/message/cmd.go b/internal/cli/message/cmd.go index 376381e58..ba23a9e36 100644 --- a/internal/cli/message/cmd.go +++ b/internal/cli/message/cmd.go @@ -12,7 +12,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/message/cmd/root" ) -// Cmd returns the "ctx message" top-level command. +// Cmd returns the "ctx hook message" command. // // Returns: // - *cobra.Command: Configured message command with subcommands diff --git a/internal/cli/message/cmd/edit/cmd.go b/internal/cli/message/cmd/edit/cmd.go index cc41eb82d..c0afa33ef 100644 --- a/internal/cli/message/cmd/edit/cmd.go +++ b/internal/cli/message/cmd/edit/cmd.go @@ -13,7 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx message edit" subcommand. +// Cmd returns the "ctx hook message edit" subcommand. // // Returns: // - *cobra.Command: Configured edit subcommand diff --git a/internal/cli/message/cmd/edit/doc.go b/internal/cli/message/cmd/edit/doc.go index 3d1716349..2ba4aeb99 100644 --- a/internal/cli/message/cmd/edit/doc.go +++ b/internal/cli/message/cmd/edit/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package edit provides the ctx system message edit subcommand for. +// Package edit provides the ctx hook message edit subcommand for. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/message/cmd/list/cmd.go b/internal/cli/message/cmd/list/cmd.go index c9ea76de1..1ac114287 100644 --- a/internal/cli/message/cmd/list/cmd.go +++ b/internal/cli/message/cmd/list/cmd.go @@ -15,7 +15,7 @@ import ( cFlag "github.com/ActiveMemory/ctx/internal/config/flag" ) -// Cmd returns the "ctx message list" subcommand. +// Cmd returns the "ctx hook message list" subcommand. // // Returns: // - *cobra.Command: Configured list subcommand diff --git a/internal/cli/message/cmd/list/doc.go b/internal/cli/message/cmd/list/doc.go index 3b128e9c5..ebfff5b24 100644 --- a/internal/cli/message/cmd/list/doc.go +++ b/internal/cli/message/cmd/list/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package list provides the ctx system message list subcommand for. +// Package list provides the ctx hook message list subcommand for. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/message/cmd/reset/cmd.go b/internal/cli/message/cmd/reset/cmd.go index b962cf832..d7b43f91d 100644 --- a/internal/cli/message/cmd/reset/cmd.go +++ b/internal/cli/message/cmd/reset/cmd.go @@ -13,7 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx message reset" subcommand. +// Cmd returns the "ctx hook message reset" subcommand. // // Returns: // - *cobra.Command: Configured reset subcommand diff --git a/internal/cli/message/cmd/reset/doc.go b/internal/cli/message/cmd/reset/doc.go index ab5b62fcd..df78c4112 100644 --- a/internal/cli/message/cmd/reset/doc.go +++ b/internal/cli/message/cmd/reset/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package reset provides the ctx system message reset subcommand. +// Package reset provides the ctx hook message reset subcommand. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/message/cmd/root/cmd.go b/internal/cli/message/cmd/root/cmd.go index 57ec5a957..fb93a5ec9 100644 --- a/internal/cli/message/cmd/root/cmd.go +++ b/internal/cli/message/cmd/root/cmd.go @@ -17,7 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx message" top-level command. +// Cmd returns the "ctx hook message" command. // // Returns: // - *cobra.Command: Configured message command diff --git a/internal/cli/message/cmd/root/doc.go b/internal/cli/message/cmd/root/doc.go index 1487a291e..04d981cf6 100644 --- a/internal/cli/message/cmd/root/doc.go +++ b/internal/cli/message/cmd/root/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package root provides the parent command for ctx system message. +// Package root provides the parent command for ctx hook message. // // Key exports: [Cmd]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/message/cmd/show/cmd.go b/internal/cli/message/cmd/show/cmd.go index e5cccaf76..48fbc1c1f 100644 --- a/internal/cli/message/cmd/show/cmd.go +++ b/internal/cli/message/cmd/show/cmd.go @@ -13,7 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx message show" subcommand. +// Cmd returns the "ctx hook message show" subcommand. // // Returns: // - *cobra.Command: Configured show subcommand diff --git a/internal/cli/message/cmd/show/doc.go b/internal/cli/message/cmd/show/doc.go index 13e4d7206..422695fd8 100644 --- a/internal/cli/message/cmd/show/doc.go +++ b/internal/cli/message/cmd/show/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package show provides the ctx system message show subcommand. +// Package show provides the ctx hook message show subcommand. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/message/doc.go b/internal/cli/message/doc.go index d55d87d2e..cb81084b6 100644 --- a/internal/cli/message/doc.go +++ b/internal/cli/message/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package message provides the parent command for ctx system message. +// Package message provides the parent command for ctx hook message. // // Key exports: [Cmd]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/notify/cmd/setup/cmd.go b/internal/cli/notify/cmd/setup/cmd.go index e0f82f331..373d8f0c0 100644 --- a/internal/cli/notify/cmd/setup/cmd.go +++ b/internal/cli/notify/cmd/setup/cmd.go @@ -15,7 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx notify setup" subcommand. +// Cmd returns the "ctx hook notify setup" subcommand. // // Returns: // - *cobra.Command: Configured setup subcommand diff --git a/internal/cli/notify/cmd/setup/doc.go b/internal/cli/notify/cmd/setup/doc.go index ac3d3e8df..2ec859fb2 100644 --- a/internal/cli/notify/cmd/setup/doc.go +++ b/internal/cli/notify/cmd/setup/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package setup implements the ctx notify setup subcommand. +// Package setup implements the ctx hook notify setup subcommand. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/notify/cmd/test/cmd.go b/internal/cli/notify/cmd/test/cmd.go index 9ba7c9139..3e0699249 100644 --- a/internal/cli/notify/cmd/test/cmd.go +++ b/internal/cli/notify/cmd/test/cmd.go @@ -13,7 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx notify test" subcommand. +// Cmd returns the "ctx hook notify test" subcommand. // // Returns: // - *cobra.Command: Configured test subcommand diff --git a/internal/cli/notify/cmd/test/doc.go b/internal/cli/notify/cmd/test/doc.go index ebb95c4fd..f46575903 100644 --- a/internal/cli/notify/cmd/test/doc.go +++ b/internal/cli/notify/cmd/test/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package test implements the ctx notify test subcommand. +// Package test implements the ctx hook notify test subcommand. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/notify/doc.go b/internal/cli/notify/doc.go index 419cf4bd8..ec412a6f4 100644 --- a/internal/cli/notify/doc.go +++ b/internal/cli/notify/doc.go @@ -4,9 +4,42 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package notify implements the ctx notify command for sending webhook. +// Package notify implements the **`ctx hook notify`** +// command surface — webhook send, setup, and test — +// that wraps the in-process [internal/notify] engine for +// CLI use. // -// Key exports: [Cmd]. -// See source files for implementation details. -// Part of the cli subsystem. +// The command lives under `ctx hook` rather than at root +// because notifications are part of the **hook subsystem** +// (delivered when hooks fire) — see +// `internal/cli/hook/hook.go` for the parent registration. +// +// # Subcommands +// +// - **`ctx hook notify [message]`** — fire-and-forget +// send. Required: `--event `. Optional: +// `--session-id`, `--hook`, `--variant`. Honors +// the `notify.events` filter in `.ctxrc`; silent +// no-op when the event is not whitelisted. +// - **`ctx hook notify setup`** — interactive prompt +// to capture and encrypt the webhook URL. See +// [internal/cli/notify/cmd/setup]. +// - **`ctx hook notify test`** — sends a test event, +// **bypassing** the event filter so users can +// verify connectivity without subscribing the test +// event first. See [internal/cli/notify/cmd/test]. +// +// # Concurrency +// +// Stateless. The CLI command spawns one HTTP request +// and exits. +// +// # Related Packages +// +// - [internal/notify] — the engine; encrypt / +// decrypt URL, send HTTP, evaluate event filter. +// - [internal/cli/hook] — the parent command that +// registers this one. +// - [internal/cli/loop] — autonomous loops post +// `loop` events through this surface. package notify diff --git a/internal/cli/notify/notify.go b/internal/cli/notify/notify.go index ce82829af..53dc326f2 100644 --- a/internal/cli/notify/notify.go +++ b/internal/cli/notify/notify.go @@ -24,7 +24,7 @@ import ( iNotify "github.com/ActiveMemory/ctx/internal/notify" ) -// Cmd returns the "ctx notify" parent command. +// Cmd returns the "ctx hook notify" parent command. // // Returns: // - *cobra.Command: Configured notify command with subcommands diff --git a/internal/cli/pad/cmd/edit/doc.go b/internal/cli/pad/cmd/edit/doc.go index 2148b1a02..145b9d95a 100644 --- a/internal/cli/pad/cmd/edit/doc.go +++ b/internal/cli/pad/cmd/edit/doc.go @@ -4,9 +4,34 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package edit implements the ctx pad edit subcommand. +// Package edit implements **`ctx pad edit`** — the +// subcommand that decrypts the scratchpad to a temp file, +// spawns the user's `$EDITOR` against it, and re-encrypts +// the result on save. // -// Key exports: [Cmd], [Run]. -// Follows the cmd/root + core taxonomy. -// Registered by the cmd parent command. +// # Behavior +// +// - **Cleartext temp file** lives in the secure +// temp directory and is `0o600`. +// - **Editor invocation** uses `$EDITOR` (or +// `vi` as fallback). Foreground; ctx blocks +// until the editor exits. +// - **Re-encrypt** on successful exit. Editor +// non-zero exit aborts the write and leaves the +// scratchpad untouched. +// - **Cleanup** — the temp file is removed in a +// deferred handler regardless of outcome so a +// crashed editor does not leak plaintext. +// +// # Concurrency +// +// Single-process, sequential. +// +// # Related Packages +// +// - [internal/cli/pad/core/store] — the scratchpad +// read/write helpers. +// - [internal/crypto] — encrypt/decrypt +// primitives. +// - [internal/cli/pad] — parent CLI. package edit diff --git a/internal/cli/pad/core/parse/doc.go b/internal/cli/pad/core/parse/doc.go index 939099598..601a919a5 100644 --- a/internal/cli/pad/core/parse/doc.go +++ b/internal/cli/pad/core/parse/doc.go @@ -4,9 +4,39 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package parse splits raw scratchpad content into individual entry lines. +// Package parse splits raw scratchpad content into +// individual entries — the helper every `ctx pad` +// subcommand uses to turn the on-disk blob into a +// `[]Entry` it can filter, render, or mutate. // -// Key exports: [Entries], [FormatEntries]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// # Public Surface +// +// - **[Entries](raw)** — returns the entry slice +// parsed from the scratchpad text. Recognizes +// the `## YYYY-MM-DD HH:MM:SS` entry header; +// everything between two headers (or between a +// header and EOF) is one entry's body. +// - **[FormatEntries](entries)** — the inverse: +// serializes a `[]Entry` back to the raw on-disk +// shape so writes round-trip cleanly. +// +// # Round-Trip Stability +// +// `FormatEntries(Entries(x))` is byte-identical to +// `x` when `x` is well-formed. This invariant is +// what makes `ctx pad edit` safe: the user's edits +// only land where the user typed. +// +// # Concurrency +// +// Pure data transformation. Concurrent callers +// never race. +// +// # Related Packages +// +// - [internal/cli/pad/core/store] — chief consumer +// during read/write. +// - [internal/cli/pad/core/tag] — extracts +// `#tags` from each entry's body. +// - [internal/cli/pad] — top-level CLI. package parse diff --git a/internal/cli/pad/core/store/doc.go b/internal/cli/pad/core/store/doc.go index 66aa013a4..ae227eb8f 100644 --- a/internal/cli/pad/core/store/doc.go +++ b/internal/cli/pad/core/store/doc.go @@ -4,10 +4,54 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package store manages scratchpad file persistence including. +// Package store manages **scratchpad file persistence**: +// the encrypted on-disk file, the AES-256-GCM key, and +// the read/write helpers every `ctx pad` subcommand uses. // -// Key exports: [ScratchpadPath], [KeyPath], -// [EnsureKey], [ReadEntries], [WriteEntries]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// # Public Surface +// +// - **[ScratchpadPath](contextDir)** — returns the +// absolute path of `.context/.scratchpad.enc`. +// - **[KeyPath]** — returns the per-machine key +// path (`~/.ctx/.ctx.key`); shared with +// [internal/notify]. +// - **[EnsureKey]** — creates the key on first use +// and returns it. Subsequent calls just return +// the existing key. +// - **[ReadEntries](contextDir)** — decrypts the +// scratchpad, parses it via +// [internal/cli/pad/core/parse], returns a +// `[]Entry`. Returns an empty slice (not an +// error) when the file does not exist yet. +// - **[WriteEntries](contextDir, entries)** — +// formats, encrypts, atomically writes. Backup +// is unnecessary because re-encryption never +// produces a partial file when the rename +// succeeds. +// +// # Atomic Writes +// +// Writes go through `tmpfile + os.Rename` so a +// crashed process never leaves a half-written +// `.scratchpad.enc`. The temp file is always created +// in the same directory as the destination so the +// rename is on the same filesystem (atomic on +// POSIX). +// +// # Concurrency +// +// Single-process. Concurrent writers would race on +// the temp filename collision; ctx is single-process +// by design. +// +// # Related Packages +// +// - [internal/cli/pad/core/parse] — parser used +// during reads. +// - [internal/crypto] — encrypt / +// decrypt primitives. +// - [internal/notify] — shares the +// same per-machine key. +// - [internal/cli/pad] — top-level +// CLI. package store diff --git a/internal/cli/pad/core/tag/doc.go b/internal/cli/pad/core/tag/doc.go index 863704bb1..f72fdaa9a 100644 --- a/internal/cli/pad/core/tag/doc.go +++ b/internal/cli/pad/core/tag/doc.go @@ -4,9 +4,45 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package tag extracts and matches #word tags in scratchpad entries. +// Package tag extracts and matches **`#word` tags** in +// scratchpad entries. Tags are convention-based: any +// `#word` token in entry text counts as a tag. The +// package owns the predicates that the `ctx pad` CLI +// uses for filtering and the `ctx pad tags` subcommand +// uses to list every tag in the scratchpad. // -// Key exports: [Extract], [Has], [Match], [MatchAll], [ScanText]. -// Tags are convention-based: any #word token in entry text is a tag. -// Used by the pad root command for filtering and the tags subcommand. +// # Public Surface +// +// - **[Extract](text)** — returns every `#word` +// occurrence in `text` as a `[]string`. +// De-duplicates and lower-cases. +// - **[Has](text, tag)** — predicate: does `text` +// contain `#tag`? +// - **[Match](entry, query)** — true when `entry` +// matches the query (single tag). +// - **[MatchAll](entry, queries)** — true when +// `entry` matches every tag in `queries` (AND +// semantics). +// - **[ScanText](text, fn)** — visitor: invoke +// `fn` for every tag in `text`. +// +// # Tag Syntax +// +// `#word` where `word` is `[a-z0-9_-]+`. Anchored +// to a word boundary (so `class#1` does not produce +// a `1` tag). Comparison is case-insensitive. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/pad] — chief consumer +// for filtering. +// - [internal/cli/pad/cmd/tags] — the dedicated +// tags subcommand. +// - [internal/cli/pad/core/parse] — supplies the +// entry stream this package scans. package tag diff --git a/internal/cli/pause/cmd/root/cmd.go b/internal/cli/pause/cmd/root/cmd.go index 3d301d0c4..bfef23359 100644 --- a/internal/cli/pause/cmd/root/cmd.go +++ b/internal/cli/pause/cmd/root/cmd.go @@ -15,7 +15,7 @@ import ( cFlag "github.com/ActiveMemory/ctx/internal/config/flag" ) -// Cmd returns the top-level "ctx pause" command. +// Cmd returns the "ctx hook pause" command. // // Returns: // - *cobra.Command: Configured pause command diff --git a/internal/cli/pause/cmd/root/doc.go b/internal/cli/pause/cmd/root/doc.go index 2764c594f..95d46d37b 100644 --- a/internal/cli/pause/cmd/root/doc.go +++ b/internal/cli/pause/cmd/root/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package root implements the ctx pause command. +// Package root implements the ctx hook pause command. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/pause/pause.go b/internal/cli/pause/pause.go index 4e3564120..fd83bf7cf 100644 --- a/internal/cli/pause/pause.go +++ b/internal/cli/pause/pause.go @@ -12,7 +12,7 @@ import ( pauseRoot "github.com/ActiveMemory/ctx/internal/cli/pause/cmd/root" ) -// Cmd returns the top-level "ctx pause" command. +// Cmd returns the "ctx hook pause" command. // // Returns: // - *cobra.Command: The pause command with subcommands registered diff --git a/internal/cli/permission/doc.go b/internal/cli/permission/doc.go index 96dbd8b29..6b251d1d2 100644 --- a/internal/cli/permission/doc.go +++ b/internal/cli/permission/doc.go @@ -4,13 +4,63 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package permission implements the "ctx permission" command for managing -// Claude Code permission snapshots. +// Package permission implements **`ctx permission`** — the +// CLI for capturing and restoring **golden-image +// snapshots** of `.claude/settings.local.json` so a team +// can maintain a curated permission baseline and +// automatically drop session-accumulated permissions at +// session start. // -// The permission package provides subcommands to: -// - snapshot: Save settings.local.json as a golden image -// - restore: Reset settings.local.json from the golden image +// # The Problem It Solves // -// Golden images allow teams to maintain a curated permission baseline and -// automatically drop session-accumulated permissions at session start. +// During a Claude Code session, the user often grants +// one-off permissions (a `Bash(git commit*)`, a +// `Read(/tmp/...)`) that they did not mean to keep +// permanently. By session end the `allow:` list has +// drifted from the team's intended baseline. Manually +// pruning the file every few days is tedious and +// error-prone. +// +// # The Workflow +// +// 1. **Snapshot** — once, after a careful curation +// pass, the user runs `ctx permission snapshot`. +// The current `settings.local.json` is copied to +// `.context/permissions.golden.json` and committed +// to git as the team's baseline. +// 2. **Restore** — at session start (often via the +// `_ctx-permission-sanitize` skill or a simple +// `make` target), `ctx permission restore` resets +// `settings.local.json` to the golden image. +// Today's session starts clean. +// 3. **Iterate** — when the user finds a +// permission they actually want to keep, they +// re-snapshot to lock it in. +// +// # Subcommands +// +// - **snapshot** — copy `settings.local.json` → +// `permissions.golden.json` (overwrites previous +// golden image; the git history is the safety +// net). +// - **restore** — copy `permissions.golden.json` → +// `settings.local.json` (creates a `.bak` first). +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent +// invocations would race on the destination file; +// single-process is the assumed model. +// +// # Related Packages +// +// - [internal/cli/initialize/core/merge] — the +// same merger used by `ctx init` for permission +// defaults; complementary to this package's +// wholesale snapshot/restore. +// - [internal/assets/claude/skills/ctx-permission-sanitize] +// — the skill that wraps `restore` for +// interactive use. +// - [internal/config/claude] — settings +// file path constants. package permission diff --git a/internal/cli/remind/doc.go b/internal/cli/remind/doc.go index 0f299e6a9..ad48e2af3 100644 --- a/internal/cli/remind/doc.go +++ b/internal/cli/remind/doc.go @@ -4,9 +4,53 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package remind manages session-scoped reminders stored in. +// Package remind implements **`ctx remind`** — the +// session-scoped reminders that surface automatically at +// session start and repeat until dismissed. // -// Key exports: [Cmd]. -// See source files for implementation details. -// Part of the cli subsystem. +// Reminders are how a user (or skill) leaves a note for +// **the next session**: "remember to update CHANGELOG +// before merging", "the failing test in `auth_test.go` +// needs review", "Bob owes us the production credentials +// by Friday". +// +// # Subcommands +// +// - **`ctx remind add `** — appends a reminder +// with optional `--after ` date gate +// and `--once` (auto-dismiss after first surface). +// - **`ctx remind list`** — prints all open +// reminders (with date-gating respected). +// - **`ctx remind dismiss `** — marks one +// reminder dismissed (or `--all`). +// +// # The Surface Path +// +// At session start, the `check_reminder` system hook +// (`internal/cli/system/cmd/check_reminder`) reads the +// reminder store, filters by date and dismissal +// status, and emits the un-dismissed entries through +// the VERBATIM relay so the user (and the agent) both +// see them as the first interaction of the session. +// +// # Storage +// +// `.context/state/reminders.jsonl` — append-only, +// one [Reminder] per line. Dismissals are recorded as +// new lines (state-machine-over-log style); the +// reader collapses to the latest state per ID. +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent writes +// would race on the JSONL append; ctx is single-process. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_reminder] — the +// hook that surfaces reminders at session start. +// - [internal/cli/remind/core/store] — the +// append-only reminder store. +// - [internal/cli/remind/core/normalize] — the +// bulk maintenance command (de-dup, validate). package remind diff --git a/internal/cli/resume/cmd/root/cmd.go b/internal/cli/resume/cmd/root/cmd.go index f1bb33a6d..e30aae6e1 100644 --- a/internal/cli/resume/cmd/root/cmd.go +++ b/internal/cli/resume/cmd/root/cmd.go @@ -15,7 +15,7 @@ import ( cFlag "github.com/ActiveMemory/ctx/internal/config/flag" ) -// Cmd returns the top-level "ctx resume" command. +// Cmd returns the "ctx hook resume" command. // // Returns: // - *cobra.Command: Configured resume command diff --git a/internal/cli/resume/cmd/root/doc.go b/internal/cli/resume/cmd/root/doc.go index 392a10e9c..3e6374deb 100644 --- a/internal/cli/resume/cmd/root/doc.go +++ b/internal/cli/resume/cmd/root/doc.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package root implements the ctx resume command. +// Package root implements the ctx hook resume command. // // Key exports: [Cmd], [Run]. // Follows the cmd/root + core taxonomy. diff --git a/internal/cli/resume/resume.go b/internal/cli/resume/resume.go index b81f8af2d..dba97f01c 100644 --- a/internal/cli/resume/resume.go +++ b/internal/cli/resume/resume.go @@ -12,7 +12,7 @@ import ( resumeRoot "github.com/ActiveMemory/ctx/internal/cli/resume/cmd/root" ) -// Cmd returns the top-level "ctx resume" command. +// Cmd returns the "ctx hook resume" command. // // Returns: // - *cobra.Command: The resume command with subcommands registered diff --git a/internal/cli/setup/core/cline/cline.go b/internal/cli/setup/core/cline/cline.go index 65f974db2..5cf03a091 100644 --- a/internal/cli/setup/core/cline/cline.go +++ b/internal/cli/setup/core/cline/cline.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package cline generates Cline MCP and steering configuration files. package cline import ( diff --git a/internal/cli/setup/core/copilot/doc.go b/internal/cli/setup/core/copilot/doc.go index 7137755db..a4cf293d8 100644 --- a/internal/cli/setup/core/copilot/doc.go +++ b/internal/cli/setup/core/copilot/doc.go @@ -4,13 +4,57 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package copilot deploys GitHub Copilot integration files. +// Package copilot deploys the **GitHub Copilot integration +// files** that give Copilot Chat in VS Code access to +// project-specific context through the ctx MCP server. // -// [DeployInstructions] generates .github/copilot-instructions.md and -// the accompanying .vscode/mcp.json for VS Code Copilot MCP support. -// These files give Copilot access to project context through the ctx -// MCP server, enabling context-aware completions and chat responses. +// The package is the per-tool deployer called by the +// setup orchestrator when the user opts into Copilot +// integration (`ctx setup copilot`, or as part of +// `ctx init` when Copilot is detected). // -// Key exports: [DeployInstructions]. -// Called by the setup core orchestrator during ctx init. +// # What Gets Deployed +// +// [DeployInstructions] writes two artifacts to the +// project root: +// +// - **`.github/copilot-instructions.md`** — the +// prose Copilot Chat reads on session start. Tells +// it where context lives, which CLI commands it can +// ask the user to run, and the rule that MCP tool +// calls beat raw shell commands when both are +// available. Idempotent: ctx-managed sections are +// bracketed by markers so user edits outside the +// markers survive re-deployment. +// - **`.vscode/mcp.json`** — VS Code Copilot's MCP +// server registry. Spawns `ctx mcp` over +// stdin/stdout. Created if missing; merged if +// present (other MCP servers in the file are +// preserved). +// +// # Marker Convention +// +// `copilot-instructions.md` uses the same +// ` ... ` +// marker pattern as other ctx-managed files so users +// can edit non-ctx prose freely. See +// [internal/cli/initialize/core/merge] for the +// underlying mechanism. +// +// # Concurrency +// +// Filesystem-bound and stateless. Callers serialize +// through process-level execution. +// +// # Related Packages +// +// - [internal/cli/setup] — the +// `ctx setup copilot` CLI surface. +// - [internal/cli/setup/core/copilot_cli] — sister +// package for the Copilot **CLI** integration +// (different beast: hook scripts under `.github/ +// hooks/`, not VS Code chat). +// - [internal/cli/initialize/core/merge] — the +// marker-aware merger this package uses for the +// instructions file. package copilot diff --git a/internal/cli/setup/core/copilot_cli/doc.go b/internal/cli/setup/core/copilot_cli/doc.go index 621e3feb9..20367959d 100644 --- a/internal/cli/setup/core/copilot_cli/doc.go +++ b/internal/cli/setup/core/copilot_cli/doc.go @@ -4,9 +4,63 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package copilotcli deploys GitHub Copilot CLI hook scripts, -// agent definitions, instructions, skills, and MCP configuration. +// Package copilotcli deploys the **GitHub Copilot CLI +// integration**: hook scripts, agent definitions, skills, +// instructions, and MCP configuration that give the Copilot +// CLI feature parity with the Claude Code integration ctx +// ships natively. // -// [DeployHooks] is the public entry point called from the setup command. -// Helper functions handle individual artifact types. +// Copilot CLI is a different beast from Copilot Chat: it +// runs in the terminal, dispatches via shell scripts under +// `.github/hooks/`, and consumes a different config file +// format. This package handles all of that. +// +// # Public Surface +// +// - **[DeployHooks](projectRoot)** — the single +// public entry point called from the setup +// command. Orchestrates every artifact below. +// +// # What Gets Deployed +// +// - **`.github/hooks/ctx-hooks.json`** — the hook +// manifest declaring which `ctx system` command +// fires on each lifecycle event (sessionStart, +// preToolUse, postToolUse, sessionEnd). Skipped +// if a non-ctx version already exists. +// - **`.github/hooks/scripts/`** — wrapper shell +// scripts for any non-stdin hooks Copilot CLI +// expects. +// - **`.github/copilot/skills/`** — the same skills +// ctx ships under +// `internal/assets/integrations/copilot-cli/skills/`. +// - **`.github/copilot/agents/`** — agent definitions. +// - **`.github/copilot/INSTRUCTIONS.md`** — the +// persistent rules Copilot CLI loads on every +// prompt. +// - **MCP config** for Copilot CLI's MCP client so +// `ctx mcp` is available. +// +// # Idempotency +// +// Each deployment helper checks for an existing +// destination before writing; a present file is left +// alone (preserving user edits) unless the user passes +// `--force`. Skill files are stamped with a content +// hash so `ctx doctor` can detect drift between +// shipped and installed versions. +// +// # Concurrency +// +// Filesystem-bound and stateless. +// +// # Related Packages +// +// - [internal/cli/setup] — the +// `ctx setup copilot-cli` CLI surface. +// - [internal/cli/setup/core/copilot] — sister +// package for VS Code Copilot Chat. +// - [internal/assets/integrations/copilot-cli] — +// the embedded source for everything this +// package deploys. package copilotcli diff --git a/internal/cli/setup/core/cursor/cursor.go b/internal/cli/setup/core/cursor/cursor.go index e56f59a53..b6cbab22e 100644 --- a/internal/cli/setup/core/cursor/cursor.go +++ b/internal/cli/setup/core/cursor/cursor.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package cursor generates Cursor MCP and steering configuration files. package cursor import ( diff --git a/internal/cli/setup/core/kiro/kiro.go b/internal/cli/setup/core/kiro/kiro.go index 372ef769f..798516759 100644 --- a/internal/cli/setup/core/kiro/kiro.go +++ b/internal/cli/setup/core/kiro/kiro.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package kiro generates Kiro MCP and steering configuration files. package kiro import ( diff --git a/internal/cli/site/cmd/feed/cmd.go b/internal/cli/site/cmd/feed/cmd.go index 1533b5f11..5b9d574ad 100644 --- a/internal/cli/site/cmd/feed/cmd.go +++ b/internal/cli/site/cmd/feed/cmd.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package feed provides the "ctx site feed" subcommand. package feed import ( diff --git a/internal/cli/site/cmd/feed/doc.go b/internal/cli/site/cmd/feed/doc.go index 9f0fd7730..e0c965229 100644 --- a/internal/cli/site/cmd/feed/doc.go +++ b/internal/cli/site/cmd/feed/doc.go @@ -4,9 +4,42 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package feed implements the ctx site feed subcommand. +// Package feed implements **`ctx site feed`** — the +// hidden subcommand that generates the **RSS / Atom +// feed** for the project's published blog under the +// zensical site directory. // -// Key exports: [Cmd], [Run]. -// Follows the cmd/root + core taxonomy. -// Registered by the cmd parent command. +// # Public Surface +// +// - **[Cmd]** — cobra command with `--input` +// (blog directory; default `docs/blog/`) and +// `--output` (feed file; default +// `site/feed.xml`). +// - **[Run]** — scans the input directory via +// [internal/cli/site/core/scan], converts each +// post into a feed item (title, link, summary, +// pub date), and writes a valid feed XML +// document. +// +// # Why "Hidden" +// +// `ctx site` lives under [hiddenCmds] in the root +// registration because feed generation is part of +// the publish pipeline (called by `make site` and +// the `_ctx-blog-changelog` skill), not something +// users invoke at the prompt directly. +// +// # Concurrency +// +// Single-process, sequential. +// +// # Related Packages +// +// - [internal/cli/site] — parent +// command that hosts feed. +// - [internal/cli/site/core/scan] — the +// blog-post scanner. +// - [internal/cli/journal/cmd/site] — sister +// command that generates the journal site (this +// package targets the *blog*, not the journal). package feed diff --git a/internal/cli/site/core/scan/doc.go b/internal/cli/site/core/scan/doc.go index 20aed8273..9db0e8ff5 100644 --- a/internal/cli/site/core/scan/doc.go +++ b/internal/cli/site/core/scan/doc.go @@ -4,9 +4,48 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package scan reads blog post directories, parses frontmatter,. +// Package scan reads **blog-post directories** and turns +// them into the typed records the feed generator and +// blog-changelog skill need: title, slug, date, summary, +// canonical URL. // -// Key exports: [BlogPosts], [ParsePost], [ExtractSummary]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// # Public Surface +// +// - **[BlogPosts](dir)** — walks `dir` (typically +// `docs/blog/`), parses every `*.md`'s +// frontmatter via [internal/parse], and returns +// a slice of [BlogPost] sorted by `date` +// descending. +// - **[ParsePost](path)** — reads one file and +// returns a [BlogPost]. Used when the caller +// needs a single post by path. +// - **[ExtractSummary](body)** — extracts a feed- +// friendly summary from the post body: the first +// paragraph, or the explicit ``- +// marked block when present. +// +// # Frontmatter Schema +// +// A blog-post frontmatter must declare: +// +// - `title` — the post's display title. +// - `date` — the publication date (`YYYY-MM-DD`). +// +// Optional: `topics`, `summary`, `author`. Posts +// missing required fields are skipped with a +// warning. +// +// # Concurrency +// +// Filesystem-bound. Concurrent calls each pay the +// full read cost; no module-level cache. +// +// # Related Packages +// +// - [internal/cli/site/cmd/feed] — chief +// consumer for RSS/Atom generation. +// - [internal/parse] — supplies +// [SplitFrontmatter] used here. +// - [internal/entity] — [BlogPost] +// domain type (when promoted to entity). package scan diff --git a/internal/cli/sync/core/doc.go b/internal/cli/sync/core/doc.go index dd835bd3c..6bce90683 100644 --- a/internal/cli/sync/core/doc.go +++ b/internal/cli/sync/core/doc.go @@ -4,9 +4,41 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package core provides shared helpers for the sync command: action. +// Package core holds the **shared helpers** that the +// `ctx sync` subcommands all rely on: the action +// resolver that turns a scanner finding into a typed +// "consider documenting this" suggestion, the renderer +// that formats the resulting list, and the predicate +// helpers that decide what counts as "undocumented". // -// Shared helpers used by sibling cmd/ packages. -// Provides constants and definitions for core operations. -// See package source files for implementation details. +// # Public Surface +// +// Each suggestion produced by the scanner is an +// [Action] with a kind (package-file / config / +// directory), a path, and a one-line "consider +// documenting in X" pointer. The helpers here build +// the slice; the CLI surface ([internal/cli/sync]) +// orchestrates the run and the rendering. +// +// # Sub-Packages +// +// - **[validate]** — the predicate package: the +// type-aware "is this an undocumented X" +// checks. +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent +// invocations against the same project each pay +// the full scan cost. +// +// # Related Packages +// +// - [internal/cli/sync] — the +// `ctx sync` CLI surface. +// - [internal/cli/sync/core/validate] — the +// undocumented-artifact predicates. +// - [internal/assets/read/lookup] — supplies +// the config-file pattern set the predicates +// consult. package core diff --git a/internal/cli/sync/core/validate/doc.go b/internal/cli/sync/core/validate/doc.go index f09c1aba0..506eff20a 100644 --- a/internal/cli/sync/core/validate/doc.go +++ b/internal/cli/sync/core/validate/doc.go @@ -4,9 +4,50 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package validate checks for undocumented. +// Package validate is the **measurement layer** behind +// `ctx sync`: walks the project tree looking for files +// and directories that are not yet documented in +// `.context/`, and returns one suggestion per find. // -// Key exports: [CheckPackageFiles], [CheckConfigFiles], [CheckNewDirectories]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package is the *predicates*; the CLI +// ([internal/cli/sync]) is the *orchestrator*; neither +// mutates context files — sync is suggestion-only by +// design. +// +// # Public Surface +// +// - **[CheckPackageFiles](root, ctxFiles)** — +// walks for package-manager descriptors +// (`package.json`, `Cargo.toml`, `go.mod`, +// `pyproject.toml`, …) and returns suggestions +// when any are unmentioned in CONVENTIONS.md / +// ARCHITECTURE.md. +// - **[CheckConfigFiles](root, ctxFiles)** — +// same shape but for CI / tooling configs +// (`.github/workflows/*`, `.eslintrc*`, +// `.pre-commit-config.yaml`, …). +// - **[CheckNewDirectories](root, ctxFiles)** — +// same shape but for top-level directories +// that ARCHITECTURE.md does not mention. +// +// # Pattern Source +// +// The "what counts as a config file" patterns come +// from [internal/assets/read/lookup.ConfigPatterns] +// so a single edit there updates every check. +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent +// invocations against the same project each pay +// the full scan cost. +// +// # Related Packages +// +// - [internal/cli/sync] — chief +// consumer. +// - [internal/assets/read/lookup] — supplies +// the pattern set. +// - [internal/entity] — context +// file types. package validate diff --git a/internal/cli/sync/doc.go b/internal/cli/sync/doc.go index cba2475a2..5a91ecc66 100644 --- a/internal/cli/sync/doc.go +++ b/internal/cli/sync/doc.go @@ -4,11 +4,68 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package sync implements the "ctx sync" command for reconciling -// context files with codebase changes. +// Package sync implements **`ctx sync`** — the command +// that scans the project for new directories, package- +// manager files, and configuration files that are not yet +// represented in the context files, and surfaces them as +// "consider documenting these" suggestions. // -// The sync command scans the project for new directories, package -// manager files, and configuration files that are not yet documented -// in context files. It suggests actions to keep context aligned with -// the actual project structure. +// Sync is a *suggester*, not a *mutator*: it never edits +// `ARCHITECTURE.md` or `CONVENTIONS.md` on its own. The +// user (or the AI through a skill) sees the report and +// decides what to add. This boundary is intentional — +// auto-population would silently amplify whatever +// scanner mistakes are made. +// +// # The Scan +// +// The scanner looks for: +// +// - **New top-level directories** — anything in the +// project root not already mentioned in +// ARCHITECTURE.md. +// - **Package-manager files** — `package.json`, +// `Cargo.toml`, `go.mod`, `pyproject.toml`, +// `Gemfile`, `requirements.txt`, etc. (full list +// from [internal/assets/read/lookup.ConfigPatterns]). +// Their presence often indicates a language or +// framework that should be documented in +// CONVENTIONS.md. +// - **CI / tooling configs** — `.github/workflows/`, +// `.gitlab-ci.yml`, `.pre-commit-config.yaml`, +// `.eslintrc*`, `.prettierrc*`, etc. +// - **Hidden ctx-relevant directories** — `.devbox`, +// `.vscode`, `.idea`, etc. +// +// # The Output +// +// Each suggestion includes the file/directory path, +// the type of artifact, and a one-line "consider +// documenting in X" pointer. The user runs `ctx sync` +// periodically (or after a major code review) to keep +// `.context/` aligned with reality. +// +// # Sub-Packages +// +// - **[core/validate]** — the validation predicates +// used by the scanner (is this a real package +// manager file or a vendored copy?). +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent invocations +// against the same project would each pay the full +// scan cost. +// +// # Related Packages +// +// - [internal/assets/read/lookup] — supplies +// [ConfigPatterns] used by the scanner. +// - [internal/drift] — separate +// "is what's documented still accurate" check +// (sync handles the inverse: what's real but +// undocumented). +// - [internal/cli/initialize/core/project] — knows +// the canonical `.context/` layout the scanner +// compares against. package sync diff --git a/internal/cli/system/cmd/block_dangerous_command/run.go b/internal/cli/system/cmd/block_dangerous_command/run.go index 5d0319525..87550a081 100644 --- a/internal/cli/system/cmd/block_dangerous_command/run.go +++ b/internal/cli/system/cmd/block_dangerous_command/run.go @@ -52,7 +52,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { fallback = desc.Text(text.DescKeyBlockMidSudo) } - if variant == "" && regex.MidGitPush.MatchString(command) { + if variant == "" && regex.GitPush.MatchString(command) { variant = hook.VariantMidGitPush fallback = desc.Text(text.DescKeyBlockMidGitPush) } diff --git a/internal/cli/system/cmd/check_context_size/doc.go b/internal/cli/system/cmd/check_context_size/doc.go index 0282575f6..0fd600d07 100644 --- a/internal/cli/system/cmd/check_context_size/doc.go +++ b/internal/cli/system/cmd/check_context_size/doc.go @@ -4,9 +4,51 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package check_context_size implements the ctx system check-context-size -// subcommand. +// Package check_context_size implements the +// **`ctx system check-context-size`** hook — the prompt- +// counter that fires the periodic "context checkpoint" +// nudge so users remember to wrap up their session +// before the context window fills. // -// It counts prompts per session and emits adaptive checkpoint reminders -// prompting the user to consider wrapping up. +// The hook reads the per-session prompt counter from +// `.context/state/`, increments it, and emits the +// VERBATIM checkpoint banner when the counter crosses +// any of the configured graduated thresholds (e.g. +// every 20 prompts, or at 50% / 75% / 90% of the +// configured `context_window` budget). +// +// # Public Surface +// +// - **[Cmd]** — cobra command (hidden under +// `ctx system`; users do not invoke this +// directly). +// - **[Run]** — reads the JSON envelope from +// stdin (session ID, current usage), decides +// whether to fire, increments the counter, +// and writes the nudge through +// [internal/cli/system/core/nudge.EmitCheckpoint]. +// +// # Throttling +// +// To avoid nudging on every prompt, the hook +// honors the per-check throttle in +// [internal/config/hook] — at most one fire per +// configured prompt-count interval, with a +// graduated cadence as the budget pressure +// grows. +// +// # Concurrency +// +// Single-process per session. The hook is +// invoked by the AI tool's hook runtime +// serially per turn. +// +// # Related Packages +// +// - [internal/cli/system/core/nudge] — the +// emission helper. +// - [internal/cli/system/core/session] — reads +// and writes the per-session counters. +// - [internal/rc] — supplies +// the configured `context_window` budget. package check_context_size diff --git a/internal/cli/system/cmd/check_persistence/doc.go b/internal/cli/system/cmd/check_persistence/doc.go index c938a9ac0..041dfa918 100644 --- a/internal/cli/system/cmd/check_persistence/doc.go +++ b/internal/cli/system/cmd/check_persistence/doc.go @@ -4,9 +4,49 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package check_persistence implements the ctx system check-persistence -// subcommand. +// Package check_persistence implements the +// **`ctx system check-persistence`** hook — the nudge +// that tells the user (and the agent) "you have done a +// lot of work without persisting anything to +// `.context/`; consider adding a learning, decision, or +// task update before the session ends". // -// It tracks prompts since the last .context/ file modification and nudges -// the agent to persist learnings, decisions, or task updates. +// The hook tracks **prompts since the last `.context/` +// file mtime change**: every time the user submits a +// prompt, the hook increments a counter; every time a +// `.context/` file is touched, the counter resets. +// When the counter crosses the configured threshold, +// the hook emits the persistence reminder via the +// VERBATIM relay. +// +// # Public Surface +// +// - **[Cmd]** — cobra command (hidden under +// `ctx system`). +// - **[Run]** — reads the JSON envelope, scans +// `.context/` for the most recent mtime, +// compares against the prompt counter, and +// fires the nudge when the threshold is +// crossed. +// +// # Why "Mtime" Not "Edits" +// +// Mtime is the simplest proxy for "the user +// captured something" that does not require +// instrumenting every write path. It catches +// `ctx add` writes, the agent's direct edits, +// and even out-of-band `vi` edits. +// +// # Concurrency +// +// Single-process per session. +// +// # Related Packages +// +// - [internal/cli/system/core/nudge] — the +// emission helper. +// - [internal/cli/system/core/session] — reads +// and writes the per-session counter. +// - [internal/rc] — supplies +// the prompt-count threshold. package check_persistence diff --git a/internal/cli/system/core/archive/doc.go b/internal/cli/system/core/archive/doc.go index 85114cd79..244f610dd 100644 --- a/internal/cli/system/core/archive/doc.go +++ b/internal/cli/system/core/archive/doc.go @@ -4,10 +4,59 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package archive creates compressed tar archives of context data. +// Package archive creates **compressed tar archives** of ctx +// state for the `ctx backup` family of commands — bundling +// `.context/`, `.claude/`, `ideas/`, and (optionally) the +// global `~/.claude/` directory into a timestamped `.tar.gz` +// for off-machine storage on an SMB share or local disk. // -// Key exports: [Create], [BackupProject], [BackupGlobal], -// [CheckSMBMountWarnings], [CheckBackupMarker]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package is the engine. The user-facing knobs +// (`--scope project|global|all`, `--no-smb`, etc.) live in +// `internal/cli/backup`. +// +// # Public Surface +// +// - **[Create](outPath, entries)** — builds a tar.gz at +// `outPath` containing every entry in `entries`. +// Optional entries that resolve to a missing path +// emit a diagnostic message but do not fail the +// operation (a common case: `ideas/` may not exist +// in fresh projects). +// - **[BackupProject](contextDir, outDir)** — wraps +// [Create] with the project-scope entry list: +// `.context/`, `.claude/`, `ideas/`, `.bashrc`-like +// project shell extensions. +// - **[BackupGlobal](outDir)** — global scope: +// `~/.claude/` minus `todos/` (excluded because +// it can be huge and is regenerated by Claude Code). +// - **[CheckSMBMountWarnings](url)** — sanity-checks +// the configured SMB mount and warns about common +// misconfigurations (mount path missing, write +// permission missing, etc.). +// - **[CheckBackupMarker](contextDir)** — reads the +// last-backup timestamp marker so +// `ctx system check-backup-age` can nudge the user. +// +// # Output Naming +// +// Archives are named `ctx--YYYY-MM-DD-HHMMSS.tar.gz` +// for unambiguous chronological sorting in the destination +// directory. +// +// # Concurrency +// +// Filesystem-bound and stateless. Concurrent invocations +// against the same destination would compete for the same +// timestamped filename — single-process is the assumed +// model. +// +// # Related Packages +// +// - [internal/cli/backup] — the +// `ctx backup` CLI surface. +// - [internal/cli/system/cmd/check_backup_age] — the +// hook that nudges when [CheckBackupMarker] reports +// stale. +// - [internal/config/archive] — file-name +// prefix and SMB env-var name constants. package archive diff --git a/internal/cli/system/core/drift/doc.go b/internal/cli/system/core/drift/doc.go index d7b5dd9e4..c10b5e1da 100644 --- a/internal/cli/system/core/drift/doc.go +++ b/internal/cli/system/core/drift/doc.go @@ -4,10 +4,55 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package drift detects version drift across VERSION, plugin, and. +// Package drift detects **version drift** across the three +// places ctx's version can diverge: the source-of-truth +// `VERSION` file, the installed binary's +// `ctx --version`, and the marketplace plugin manifest. The +// `check_version` hook calls into here to nudge users when +// any of the three drift apart. // -// Key exports: [FormatStaleEntries], [CheckVersion], [ReadVersionFile], -// [ReadMarketplaceVersion]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// (This is the *system-hook* drift package and is unrelated +// to [internal/drift], which detects context-file drift.) +// +// # Public Surface +// +// - **[CheckVersion]** — runs the full three-way +// comparison and returns a [DriftReport]. +// - **[ReadVersionFile]** — reads the `VERSION` file +// from the install dir. +// - **[ReadMarketplaceVersion]** — reads the +// plugin manifest's pinned version from +// `~/.claude/marketplaces/...`. +// - **[FormatStaleEntries]** — formats a [DriftReport] +// as the user-facing nudge body (delivered via +// [internal/cli/system/core/message]). +// +// # The Three Sources, Why They Drift +// +// 1. **`VERSION` file** — bumped by maintainers as part +// of the release runbook. The source of truth. +// 2. **Installed binary** — the result of the user's +// last `make install` / `brew upgrade`. Drifts +// downward if the user has not updated. +// 3. **Marketplace plugin manifest** — pinned by the +// user's most recent `claude plugin install`. +// Drifts downward if the user has not run +// `claude plugin update`. +// +// Any pair-wise mismatch is a candidate nudge; the hook +// picks the most actionable phrasing per case. +// +// # Concurrency +// +// All functions are filesystem-bound and stateless. +// Concurrent invocations never race. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_version] — the +// hook that fires the nudge. +// - [internal/cli/system/core/message] — renders +// the nudge body. +// - [internal/bootstrap] — owns +// the `Version` constant baked into the binary. package drift diff --git a/internal/cli/system/core/health/doc.go b/internal/cli/system/core/health/doc.go index 0ff4dc8e9..30aa89a1b 100644 --- a/internal/cli/system/core/health/doc.go +++ b/internal/cli/system/core/health/doc.go @@ -4,10 +4,52 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package health monitors context health by detecting stale. +// Package health holds the **shared helpers** that the +// architecture-map staleness, knowledge-file growth, and +// background-task cleanup hooks all use to evaluate +// "context health" signals — things that are not strictly +// drift but indicate the project is drifting from its own +// process invariants. // -// Key exports: [ReadMapTracking], [CountModuleCommits], -// [EmitMapStalenessWarning], [UUIDPattern], [AutoPrune]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package is the *measurement layer*; the hooks +// (`check_map_staleness`, `check_knowledge`, +// background pruners) decide what to do with the numbers. +// +// # Public Surface +// +// - **[ReadMapTracking]** — reads the persisted +// architecture-map last-update tracking record. +// Used by `check_map_staleness` to decide whether +// ARCHITECTURE.md has fallen behind code changes. +// - **[CountModuleCommits](module, since)** — counts +// git commits touching a module path since a given +// timestamp. Used to score map staleness. +// - **[EmitMapStalenessWarning](staleModules)** — +// produces the formatted nudge sent to the agent +// via the VERBATIM relay path. +// - **[UUIDPattern]** — compiled regex for matching +// session UUIDs in state file names. Used by the +// auto-pruner. +// - **[AutoPrune](dir, age)** — removes per-session +// state files older than `age`. Idempotent and +// safe to run during a session (skips the active +// session's marker file). +// +// # Concurrency +// +// All functions are filesystem-bound and stateless. +// Concurrent invocations are safe; the auto-pruner +// uses `os.Remove` which is atomic on POSIX. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_map_staleness] — +// the hook that consumes [ReadMapTracking] and +// [EmitMapStalenessWarning]. +// - [internal/cli/system/cmd/check_knowledge] — +// consumes the entry-count helpers via +// [internal/drift]. +// - [internal/prune] — +// the `ctx prune` command that calls [AutoPrune] +// under explicit user invocation. package health diff --git a/internal/cli/system/core/journal/doc.go b/internal/cli/system/core/journal/doc.go index db123c516..d3680854e 100644 --- a/internal/cli/system/core/journal/doc.go +++ b/internal/cli/system/core/journal/doc.go @@ -4,10 +4,52 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package journal scans journal directories for the newest file. +// Package journal holds the **shared helpers** that the +// `check_journal` hook calls when deciding whether to nudge +// the user about unimported or unenriched session entries. // -// Key exports: [NewestMtime], [CountNewerFiles], [CountUnenriched], -// [CheckStage], [MarkStage]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package is the *measurement layer*; the hook decides +// when the numbers warrant a nudge. +// +// # Public Surface +// +// - **[NewestMtime](dir)** — returns the modification +// time of the most recently changed file in `dir`, +// or zero time when the directory is empty/missing. +// Used to compare the journal directory's freshness +// to the raw-source directory. +// - **[CountNewerFiles](dir, since)** — counts files +// in `dir` modified strictly after `since`. The +// hook calls this with the last-known-import +// timestamp to surface "N new sessions to import". +// - **[CountUnenriched](journalDir)** — counts entries +// in `journalDir` that have not been enriched +// (frontmatter `enriched: false` or missing). +// Surfaces the "N entries waiting for enrichment" +// nudge. +// - **[CheckStage](path, stage)** — predicate: is +// `path`'s frontmatter at or past `stage`? +// [internal/journal/state] supplies the canonical +// stage strings. +// - **[MarkStage](path, stage)** — atomically updates +// the frontmatter `stage:` field. Used by the +// enrichment pipeline to advance an entry through +// normalize → enrich → wrap. +// +// # Concurrency +// +// All functions are filesystem-bound and stateless. +// [MarkStage] uses the same atomic-rename pattern as +// the rest of the journal pipeline so a partial write +// never leaves an entry in an undefined state. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_journal] — the +// hook that consumes these helpers and emits the +// user-facing nudge. +// - [internal/journal/state] — the +// canonical stage strings. +// - [internal/cli/journal/cmd/importer] — the +// importer that advances entries through stages. package journal diff --git a/internal/cli/system/core/knowledge/doc.go b/internal/cli/system/core/knowledge/doc.go index 0829140cd..0ffebd2aa 100644 --- a/internal/cli/system/core/knowledge/doc.go +++ b/internal/cli/system/core/knowledge/doc.go @@ -4,9 +4,50 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package knowledge scans knowledge files against configured. +// Package knowledge is the **measurement helper** the +// `check_knowledge` system hook uses to evaluate whether +// a project's knowledge files (DECISIONS.md, LEARNINGS.md, +// CONVENTIONS.md) have outgrown the configured per-file +// thresholds and warrant a consolidation nudge. // -// Key exports: [ScanFiles], [FormatWarnings], [EmitWarning], [CheckHealth]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// # Public Surface +// +// - **[ScanFiles](contextDir)** — counts entries +// in DECISIONS.md and LEARNINGS.md, and lines +// in CONVENTIONS.md, and returns the result. +// - **[FormatWarnings](report, thresholds)** — +// turns the scan into the human-readable +// warning text emitted via the VERBATIM relay. +// - **[EmitWarning](text)** — writes the warning +// through the standard nudge path. +// - **[CheckHealth](contextDir)** — convenience: +// scan + threshold compare in one call; +// returns the warning text or empty. +// +// # Thresholds +// +// Per-file thresholds come from `.ctxrc`: +// +// - `entry_count_decisions` (default 20; +// 0 disables) +// - `entry_count_learnings` (default 30; +// 0 disables) +// - `convention_line_count` (default 200; +// 0 disables) +// +// Crossing a threshold means "consider running +// `/ctx-consolidate`", not "stop adding entries". +// +// # Concurrency +// +// Filesystem-bound. Stateless. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_knowledge] — +// the hook that fires the warning. +// - [internal/rc] — +// supplies the thresholds. +// - [internal/index] — +// used to count entry blocks. package knowledge diff --git a/internal/cli/system/core/message/doc.go b/internal/cli/system/core/message/doc.go index e5ab52884..143f36aed 100644 --- a/internal/cli/system/core/message/doc.go +++ b/internal/cli/system/core/message/doc.go @@ -4,10 +4,65 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package message loads and renders hook message templates with. +// Package message loads, renders, and lets users override +// the **hook message templates** every nudge in ctx prints +// — the small bits of text like "Context checkpoint +// reached" or "Consider archiving completed tasks" that +// hooks emit through the VERBATIM relay channel. // -// Key exports: [Load], [BoxLines], [NudgeBox], -// [FormatTemplateVars], [OverridePath]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package backs `ctx hook message list / show / edit / +// reset` plus the in-process consumers in every +// `cmd/check_*` hook. +// +// # Public Surface +// +// - **[Load](key, vars)** — resolves a message key to +// its rendered text. Looks up the user's per-project +// override at [OverridePath](key) first; falls back +// to the embedded default from +// [internal/assets/hooks/messages] when no override +// exists. Sprintf-substitutes [vars] into the +// template. +// - **[BoxLines](text)** — wraps text in the +// box-drawing border ctx uses to make hook nudges +// visually distinct from normal output. +// - **[NudgeBox](text)** — boxed-and-prefixed +// convenience for the standard nudge banner. +// - **[FormatTemplateVars](vars)** — exposes the +// normalized key/value pairs hooks pass to [Load]. +// - **[OverridePath](key)** — returns the per-project +// override file path for `key`. The CLI uses this +// for the `edit` subcommand; the resolver uses it +// to detect override presence. +// +// # Override Workflow +// +// Users edit messages by running `ctx hook message edit +// `. The CLI: +// +// 1. Computes [OverridePath](key). +// 2. Materializes the embedded default at that path +// so the user has something concrete to edit. +// 3. Spawns `$EDITOR` on the file. +// 4. Subsequent loads pick up the override +// automatically. +// +// `ctx hook message reset ` deletes the override +// so the embedded default takes over again. +// +// # Concurrency +// +// File reads are scoped per call; no module-level +// caches. Concurrent loads are safe. +// +// # Related Packages +// +// - [internal/cli/message] — the +// `ctx hook message *` CLI surface. +// - [internal/assets/hooks/messages] — the +// embedded default templates. +// - [internal/cli/system/cmd/check_*] — every hook +// consumes a message through this package. +// - [internal/cli/system/core/nudge] — the +// emitter that renders [NudgeBox] output. package message diff --git a/internal/cli/system/core/nudge/doc.go b/internal/cli/system/core/nudge/doc.go index 94978d1bb..4d498d105 100644 --- a/internal/cli/system/core/nudge/doc.go +++ b/internal/cli/system/core/nudge/doc.go @@ -4,9 +4,73 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package nudge emits checkpoint, pause, and oversize nudge. +// Package nudge holds the **shared nudge-emission helpers** +// every `ctx system check_*` hook calls when it has decided to +// surface a message to the user (or to the agent through the +// VERBATIM relay path). // -// Key exports: [EmitCheckpoint], [EmitWindowWarning], -// [EmitBillingWarning], [EvaluateTrigger], [PauseMarkerPath], [Paused]. -// Shared helpers used by sibling cmd/ packages. +// The package is the *muscle*; each `cmd/check_*` package is +// the *brain*. The check decides when a nudge fires; this +// package decides what it looks like and where it lands. That +// split keeps the per-check files small and ensures that +// every nudge — checkpoint, oversize, billing window, pause +// banner — has the same shape and routing. +// +// # Emission Path +// +// - **[EmitCheckpoint](msg)** — fires a "context +// checkpoint reached" nudge: prompt-counter trip, +// persistence-stale signal, etc. Routes through the +// VERBATIM relay so the user (and the agent) both see +// the exact text. +// - **[EmitWindowWarning](used, total)** — fires when +// session token usage crosses the configured +// `injection_token_warn` (or `context_window`) +// threshold. One-shot per session. +// - **[EmitBillingWarning](used)** — fires the one-shot +// "you've exceeded your included token allowance" +// nudge for Claude Pro 1M-context users; gated by +// `billing_token_warn` in `.ctxrc`. +// +// All three honor the **session pause** flag (see +// [Paused]) so a user who has explicitly silenced ceremony +// nudges sees nothing — except for the security-relevant +// hooks, which fire regardless. +// +// # Pause Semantics +// +// [PauseMarkerPath](sessionID) returns the per-session +// marker file. [Pause] / [Resume] write/remove it +// (exported for use by `ctx hook pause` / +// `ctx hook resume`). [Paused] returns the configured +// state and the turn count since pause began so the +// graduated reminder can render `ctx:paused (N)`. +// +// # Trigger Evaluation +// +// [EvaluateTrigger] is the per-check predicate +// dispatcher: takes a check name + threshold, reads the +// per-session counter, decides fire-or-not, increments +// the counter atomically. It is what every `cmd/check_*` +// hook calls before emitting. +// +// # Concurrency +// +// All functions are stateless or transact through the +// per-session marker files (which are atomic on POSIX +// for the small writes we issue). Concurrent hook fires +// across separate sessions never race. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_*] — the per-check +// hooks that call into this package. +// - [internal/cli/pause], [internal/cli/resume] — +// `ctx hook pause` / `ctx hook resume` consume +// [Pause] and [Resume]. +// - [internal/notify] — receives the +// nudge as a `nudge` event when webhook +// notifications are configured. +// - [internal/write/session] — terminal +// output formatters for the pause banner. package nudge diff --git a/internal/cli/system/core/nudge/pause.go b/internal/cli/system/core/nudge/pause.go index 816d46007..350712d67 100644 --- a/internal/cli/system/core/nudge/pause.go +++ b/internal/cli/system/core/nudge/pause.go @@ -74,7 +74,7 @@ func PausedMessage(turns int) string { } // Pause creates the session pause marker. Exported for use by the -// top-level ctx pause command. +// ctx hook pause command. // // Parameters: // - sessionID: Session identifier @@ -83,7 +83,7 @@ func Pause(sessionID string) { } // Resume removes the session pause marker. Exported for use by the -// top-level ctx resume command. No-op if not paused. +// ctx hook resume command. No-op if not paused. // // Parameters: // - sessionID: Session identifier diff --git a/internal/cli/system/core/session/doc.go b/internal/cli/system/core/session/doc.go index 683434cee..a87ed9501 100644 --- a/internal/cli/system/core/session/doc.go +++ b/internal/cli/system/core/session/doc.go @@ -4,10 +4,60 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package session builds hook JSON responses and manages session. +// Package session provides the **shared session-state +// helpers** every `ctx system` hook calls when it needs to +// (a) read the JSON envelope Claude Code sent on stdin, +// (b) extract the session ID, or (c) write the session-stats +// counters that drive ceremony nudges. // -// Key exports: [FormatContext], [ReadInput], [ReadID], -// [LatestPct], [WriteStats]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package owns the per-session state files under +// `.context/state/session-.json` — the lightweight +// counters that hooks like `check_ceremony`, +// `check_persistence`, and `check_context_size` evaluate +// each time they fire. +// +// # Public Surface +// +// - **[ReadInput]** — decodes the JSON envelope Claude +// Code writes to a hook's stdin. Returns a typed +// [HookInput] regardless of which event fired. +// - **[ReadID]** — convenience: pulls just the +// session ID from the envelope. +// - **[FormatContext](payload)** — formats a JSON +// payload as the canonical "context block" that +// hooks emit on stdout to inject content into the +// agent's next prompt. +// - **[LatestPct](contextDir)** — returns the most +// recent context-window-usage percentage written by +// the agent CLI. Used by the size-checkpoint hook +// to decide whether to nudge. +// - **[WriteStats](sessionID, stats)** — atomically +// updates the per-session stats file. Each `check_*` +// hook owns a slice of the stats struct. +// +// # State File Lifecycle +// +// `session-.json` is created lazily on first hook +// fire, updated in place by subsequent hooks, and pruned +// by `ctx prune` after the session has been idle for +// the configured threshold. +// +// # Concurrency +// +// Hooks within a session fire serially (Claude Code +// drives them one at a time per turn) so the in-process +// concurrency model is single-writer. The atomic +// rename pattern in [WriteStats] guards the rare case +// where a sibling process inspects the file mid-write. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_*] — every +// `check_*` hook reads stdin and stats through this +// package. +// - [internal/cli/system/cmd/session_event] — writes +// start/end markers consumed by ceremony hooks. +// - [internal/cli/system/core/nudge] — fires the +// actual nudge based on the stats this package +// maintains. package session diff --git a/internal/cli/system/core/stats/doc.go b/internal/cli/system/core/stats/doc.go index c0b500c7f..0b6d93fed 100644 --- a/internal/cli/system/core/stats/doc.go +++ b/internal/cli/system/core/stats/doc.go @@ -4,10 +4,46 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package stats provides session token usage statistics reading and formatting. +// Package stats reads, parses, and formats **per-session +// token-usage statistics** for the `ctx usage` command +// and the system hooks that rely on token-pressure +// signals. // -// Key exports: [ReadDir], [ExtractSessionID], [ParseFile], -// [FormatDump], [FormatJSON]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// Statistics are stored as JSON files under +// `.context/state/` — one per session, named for the +// session ID. Each record carries a timestamp, the +// running token count, the budget, and the percentage +// used. +// +// # Public Surface +// +// - **[ReadDir](contextDir)** — returns every +// session-stats file as a typed [Stats] slice, +// sorted by mtime descending. +// - **[ExtractSessionID](path)** — pulls the +// session ID out of a stats filename. +// - **[ParseFile](path)** — reads one stats +// file and returns the typed [Stats]. +// - **[FormatDump](stats)** — renders the +// collection as the human-readable table the +// `ctx usage` command displays. +// - **[FormatJSON](stats)** — renders the same +// collection as a JSON document for tooling. +// +// # Concurrency +// +// Filesystem-bound. Concurrent reads are safe; +// writers are the per-session tracking hook +// which is single-process per session. +// +// # Related Packages +// +// - [internal/cli/usage] — the +// `ctx usage` CLI surface. +// - [internal/cli/system/cmd/check_context_size] +// — writes the stats this package reads. +// - [internal/cli/system/cmd/heartbeat] — also +// contributes mtime updates. +// - [internal/format] — the +// Number / TimeAgo formatters used here. package stats diff --git a/internal/cli/task/cmd/complete/cmd.go b/internal/cli/task/cmd/complete/cmd.go index 1c59b4eb3..de82c13b5 100644 --- a/internal/cli/task/cmd/complete/cmd.go +++ b/internal/cli/task/cmd/complete/cmd.go @@ -13,7 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx complete" command for marking tasks as done. +// Cmd returns the "ctx task complete" command for marking tasks as done. // // Tasks can be specified by number, partial text match, or full text. // The command updates TASKS.md by changing "- [ ]" to "- [x]". diff --git a/internal/cli/task/doc.go b/internal/cli/task/doc.go index cde1379a3..936eeed2f 100644 --- a/internal/cli/task/doc.go +++ b/internal/cli/task/doc.go @@ -4,9 +4,67 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package task implements the ctx task command for managing task archival. +// Package task implements the **`ctx task`** command group: +// task completion, archival, and snapshots — the lifecycle +// operations on `TASKS.md`. // -// Key exports: [Cmd]. -// See source files for implementation details. -// Part of the cli subsystem. +// `TASKS.md` is the project's living checklist. Phase +// headers are constitutional structure (never moved or +// renamed); items are append-only with status flips +// (`[ ]` → `[x]` or `[-]`) when work completes or is +// skipped. This package owns the safe transitions. +// +// # Subcommands +// +// - **`ctx task complete [number|text]`** — flips a +// task from `[ ]` to `[x]`. Match by phase-relative +// number (e.g. `3`), partial text, or full text in +// quotes. See [internal/cli/task/cmd/complete]. +// - **`ctx task archive [--dry-run]`** — moves +// completed top-level tasks into a dated +// `.context/archive/tasks-YYYY-MM-DD.md` file, +// preserving phase structure. See +// [internal/cli/task/cmd/archive] (delegates to +// [internal/tidy.WriteArchive]). +// - **`ctx task snapshot [name]`** — copies the +// current TASKS.md verbatim to +// `.context/archive/snapshots/-.md`. No +// mutation of the source. Used before a major +// restructure to give the user a known-good +// restore point. +// +// # Constitutional Invariants +// +// The CONSTITUTION.md rules: +// +// - **Tasks stay in their Phase section permanently**. +// - **Phase headers are never removed or renamed**. +// - **Tasks are never deleted** — only marked +// `[x]` (completed) or `[-]` (skipped). +// - **Archival ≠ deletion** — archived tasks land in +// the archive file, not `/dev/null`. +// +// This package enforces all four. `ctx task complete` +// uses status flips, never moves. `ctx task archive` +// uses [internal/tidy] which preserves phase structure +// in the archive output. +// +// # Concurrency +// +// Filesystem-bound and stateless. Single-process +// assumption. +// +// # Related Packages +// +// - [internal/tidy] — the +// archival engine that powers `ctx task archive`. +// - [internal/task] — the parsing +// primitives (Completed, Pending, Indent, +// Content, Sub) used by Complete to find the +// right line. +// - [internal/mcp/handler] — the MCP +// `ctx_complete` tool delegates here. +// - [internal/cli/system/cmd/check_task_completion] +// — the hook that nudges when an edit looks +// task-completing but the task was not flipped. package task diff --git a/internal/cli/task/task.go b/internal/cli/task/task.go index 50c8e0542..1fcef8604 100644 --- a/internal/cli/task/task.go +++ b/internal/cli/task/task.go @@ -4,15 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package task implements the "ctx tasks" command for managing task archival -// and snapshots. -// -// The task package provides subcommands to: -// - archive: Move completed tasks to timestamped archive files -// - snapshot: Create point-in-time copies of TASKS.md -// -// Archive files preserve phase structure for traceability, while snapshots -// copy the entire file as-is without modification. package task import ( diff --git a/internal/cli/watch/core/apply/doc.go b/internal/cli/watch/core/apply/doc.go index e0c80c341..2a06d25c3 100644 --- a/internal/cli/watch/core/apply/doc.go +++ b/internal/cli/watch/core/apply/doc.go @@ -4,9 +4,58 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package apply routes context updates to the appropriate handler. +// Package apply routes **structured context-update +// commands** the AI emitted in its output (XML-tagged +// blocks) to the appropriate per-type writer (task, +// decision, learning, convention, complete). // -// Key exports: [Update]. -// Shared helpers used by sibling cmd/ packages. -// Used by core cmd/ packages. +// The package is the dispatch layer behind `ctx watch`, +// which reads stdin (typically piped from `tail -f` +// against an AI's transcript) and applies whatever +// `` blocks the AI wrote. +// +// # Public Surface +// +// - **[Update](upd)** — accepts a parsed +// [entity.PendingUpdate], dispatches to the +// right backend ([internal/entry] for add, +// [internal/cli/task] for complete, etc.), +// and reports the result. +// +// # Update Types +// +// Recognized blocks (each enclosed in +// `...`): +// +// - **task** — add a new task. +// - **decision** — add a new decision. +// - **learning** — add a new learning. +// - **convention** — add a new convention. +// - **complete** — mark a task as done. +// +// Unknown types are ignored with a warning so a +// future expansion does not break older installs. +// +// # Idempotency +// +// Add operations are deduplicated by content hash: +// the same block applied twice produces one entry. +// Complete operations match by ID/text and are +// no-ops on already-completed tasks. +// +// # Concurrency +// +// Single-process, sequential within a single +// `ctx watch` invocation. +// +// # Related Packages +// +// - [internal/cli/watch] — the `ctx watch` +// CLI surface. +// - [internal/entry] — backend for the +// four add types. +// - [internal/cli/task] — backend for +// `complete`. +// - [internal/entity] — [PendingUpdate] +// domain type. package apply diff --git a/internal/cli/watch/core/doc.go b/internal/cli/watch/core/doc.go index b98b790fd..e7d162fda 100644 --- a/internal/cli/watch/core/doc.go +++ b/internal/cli/watch/core/doc.go @@ -4,9 +4,48 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package core provides shared helpers for the watch command: stream. +// Package core holds the **shared helpers** behind +// `ctx watch`: the stdin streamer that scans an AI's +// output for `` blocks, the parser that +// turns a block into a typed [PendingUpdate], and the +// dispatcher that routes the update to the right +// backend. // -// Shared helpers used by sibling cmd/ packages. -// Provides constants and definitions for core operations. -// See package source files for implementation details. +// `ctx watch` is the bridge between an AI's +// transcript and the project's `.context/` files: it +// reads stdin, picks out the structured update blocks +// the AI emits, and writes the corresponding entries. +// +// # Sub-Packages +// +// - **[apply]** — dispatches a parsed update to +// the right backend (entry, task, etc.). +// +// # Public Surface +// +// The shared helpers in this package include the +// stream scanner that reads stdin line by line, the +// XML-block extractor that finds `` / +// `` boundaries, and the rate limiter +// that throttles re-application when the same block +// reappears within a short window (Claude Code +// echoes the same update across multiple tool +// results sometimes). +// +// # Concurrency +// +// Single goroutine per `ctx watch` invocation. The +// scanner is sequential by design — order matters +// when adds and completes interleave. +// +// # Related Packages +// +// - [internal/cli/watch] — the +// `ctx watch` CLI surface. +// - [internal/cli/watch/core/apply] — the +// dispatcher. +// - [internal/entry] — the +// add-side backend. +// - [internal/cli/task] — the +// complete-side backend. package core diff --git a/internal/config/copilot/copilot.go b/internal/config/copilot/copilot.go index b5b4f8168..d371ea7b3 100644 --- a/internal/config/copilot/copilot.go +++ b/internal/config/copilot/copilot.go @@ -4,12 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package copilot defines constants for Copilot Chat and Copilot CLI -// session parsing and integration. -// -// Provides JSON key paths, response item kinds, scanner buffer sizes, -// VS Code storage paths, and Copilot CLI directory names used by the -// journal parser and setup command. package copilot // JSON key paths in Copilot Chat JSONL session files. diff --git a/internal/config/embed/cmd/doc.go b/internal/config/embed/cmd/doc.go index c687fa2be..f0b30c716 100644 --- a/internal/config/embed/cmd/doc.go +++ b/internal/config/embed/cmd/doc.go @@ -4,9 +4,64 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package cmd defines description key constants for CLI commands. +// Package cmd holds the **lookup keys** for every CLI command's +// `Use`, short-description, long-description, and example +// strings, plus the cobra `Group` identifiers that organize +// `ctx --help`. // -// Constants are referenced by domain packages via config/cmd.*. -// Provides constants and definitions for cmd operations. -// Constants are referenced by domain packages. +// Two kinds of constants live here: +// +// - **`UseXxx`** — the cobra `Use` field. The literal +// command word as users type it. `UseBackup = "backup"`, +// `UsePause = "pause"`, etc. +// - **`DescKeyXxx`** — the lookup key for that command's +// short / long / example text. Resolved at run-time via +// [internal/assets/read/desc.Command](key) and +// [.Example](key), which read from the embedded YAML +// under [internal/assets/commands]. +// +// The same two-step indirection [internal/config/embed/text] +// uses for general display text applies here: copy lives in +// YAML so it can be edited without a Go toolchain, and every +// reference is a typed Go constant so a typo fails to +// compile. +// +// # File Layout — One Command per File +// +// Each file in this package corresponds to one command in +// the cobra tree (`backup.go`, `bootstrap.go`, `connect.go`, +// `system.go`, …) and owns that command's `Use`, `DescKey`, +// and any subcommand `Use`/`DescKey` constants. Adding a new +// command means: add the file here, add the matching YAML +// entry in [internal/assets/commands], wire the cobra +// command in [internal/cli/], and register it in +// [internal/bootstrap]. +// +// # Naming Convention +// +// Constants follow `Use` and +// `DescKey`. The dotted form of the path is +// the YAML key (`backup`, `system.bootstrap`, +// `hub.peer.add`). The audit suite enforces this both ways +// so a constant without a YAML entry — or a YAML entry +// without a constant — fails CI. +// +// # Group Constants +// +// `Group
` constants (e.g. `GroupGettingStarted`, +// `GroupContext`, `GroupRuntime`) name the cobra command +// groups that organize `ctx --help` output. The grouping is +// applied at registration time in [internal/bootstrap]. +// +// # Related Packages +// +// - [internal/assets/read/desc] — `desc.Command(key)` +// and `desc.Example(key)` resolve at run-time. +// - [internal/assets/commands] — the YAML store of +// short / long / example text. +// - [internal/bootstrap] — wires every cobra +// command and assigns each to a `Group
`. +// - [internal/config/embed/text], +// [internal/config/embed/flag] — sister key +// registries for general text and flag help. package cmd diff --git a/internal/config/embed/cmd/system.go b/internal/config/embed/cmd/system.go index 05c61a669..a3780b868 100644 --- a/internal/config/embed/cmd/system.go +++ b/internal/config/embed/cmd/system.go @@ -8,10 +8,16 @@ package cmd // Use strings for system subcommands. // -// The ctx system namespace hosts hook plumbing only. User-facing -// maintenance commands (backup, bootstrap, event, message, prune, -// resource, stats) have been promoted to top-level commands; their -// Use constants live in their own per-command files in this package. +// The ctx system namespace hosts hook plumbing plus the +// agent-only `bootstrap` command. Other user-facing maintenance +// commands (backup, event, message, prune, resource, stats) have +// been promoted to top-level commands; their Use constants live in +// their own per-command files in this package. +// +// `bootstrap` is intentionally NOT promoted to top-level — it is +// invoked by AI agents on session start, not by humans. Keeping it +// under `ctx system` keeps `ctx --help` focused on user-facing +// commands. The canonical invocation is `ctx system bootstrap`. const ( // UseSystemBlockDangerousCommand is the cobra Use string for the system // block dangerous command command. diff --git a/internal/config/embed/flag/doc.go b/internal/config/embed/flag/doc.go index 6cfc75d5b..f6dc9dcb0 100644 --- a/internal/config/embed/flag/doc.go +++ b/internal/config/embed/flag/doc.go @@ -4,9 +4,59 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package flag defines description key constants for CLI flags. +// Package flag holds the **lookup keys** for every CLI flag's +// help text — the short blurb cobra prints next to `--name` +// in `ctx --help` output. // -// Constants are referenced by domain packages via config/flag.*. -// Provides constants and definitions for flag operations. -// Constants are referenced by domain packages. +// The package is the flag-help half of the same two-step +// indirection used by [internal/config/embed/text] and +// [internal/config/embed/cmd]: +// +// 1. **Here** — `DescKeyXxx` Go constants, one per flag +// across every command in the binary. +// 2. **In** [internal/assets/commands/text/*.yaml] — the +// actual help string. Resolved at run-time via +// [internal/assets/read/desc.Flag](key). +// +// The split keeps flag wording editable without a Go +// rebuild, lets the audit suite catch typos at CI time, and +// makes per-locale flag help structurally possible. +// +// # File Layout — One Command per File +// +// Each file groups the flag-key constants for one command +// (`add.go`, `agent.go`, `backup.go`, …). Within a file, +// constants follow the alphabetical-by-flag-name order +// cobra itself uses. +// +// # Naming Convention +// +// `DescKey` for the constant; the YAML +// key is the dotted form `.`. The audit +// suite (`desckey_namespace_test`) verifies every constant +// has a matching YAML entry and every YAML entry has a +// matching constant. +// +// # Usage +// +// import ( +// "github.com/ActiveMemory/ctx/internal/assets/read/desc" +// "github.com/ActiveMemory/ctx/internal/config/embed/flag" +// ) +// c.Flags().Bool("dry-run", false, desc.Flag(flag.DescKeyAddDryRun)) +// +// In practice, most flag binding goes through +// [internal/flagbind] which already knows how to look up +// the desc key, so callers rarely call `desc.Flag` directly. +// +// # Related Packages +// +// - [internal/assets/read/desc] — `desc.Flag(key)` +// run-time lookup. +// - [internal/assets/commands] — the YAML store. +// - [internal/flagbind] — flag-binding +// helpers that consume these keys. +// - [internal/config/embed/text], +// [internal/config/embed/cmd] — sister key +// registries. package flag diff --git a/internal/config/embed/text/doc.go b/internal/config/embed/text/doc.go index bcfee31e2..c0598e15d 100644 --- a/internal/config/embed/text/doc.go +++ b/internal/config/embed/text/doc.go @@ -4,9 +4,67 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package text defines description key constants for user-facing text. +// Package text holds the **lookup keys** for every piece of +// user-facing display text emitted anywhere in ctx — error +// messages, status banners, MCP responses, hook nudges, +// admonition templates, you name it. // -// Constants are referenced by domain packages via config/text.*. -// Provides constants and definitions for text operations. -// Constants are referenced by domain packages. +// The package is one half of a deliberate two-step indirection: +// +// 1. **Here** — typed `DescKeyXxx` Go constants. Compile-time +// guarantees that every reference is a real key. +// 2. **In** [internal/assets/commands/text/*.yaml] — the +// actual strings, embedded into the binary at build time. +// Reachable via [internal/assets/read/desc.Text](key). +// +// The split exists for three reasons: +// +// - **Editing copy stops touching Go code** — copywriters, +// translators, and product owners can edit YAML without +// a Go toolchain. +// - **i18n is structurally possible** — adding a locale is +// a parallel YAML tree, not a fork of every package. +// - **One sentence cannot quietly drift between two +// callers** — both grab the same key, both render the +// same text. +// +// # File Layout +// +// One Go file per subsystem (`agent.go`, `bootstrap.go`, +// `mcp.go`, `journal.go`, `steering.go`, …). Each file groups +// the `DescKeyXxx` constants for that subsystem so adding a +// new message to the agent flow only edits `agent.go` and the +// corresponding YAML file. +// +// # Naming Convention +// +// Constants follow `DescKey`; the +// underlying YAML key follows the dotted form +// `.`. Both halves are validated by the +// `desckey_namespace_test` audit so a typo in either side +// fails CI. +// +// # Consumers +// +// Pretty much every CLI subcommand, every MCP handler, and +// every write-side terminal-output package imports this +// package. Common usage: +// +// import ( +// "github.com/ActiveMemory/ctx/internal/assets/read/desc" +// "github.com/ActiveMemory/ctx/internal/config/embed/text" +// ) +// msg := desc.Text(text.DescKeyAgentInstruction) +// +// # Related Packages +// +// - [internal/assets/read/desc] — the Text / +// Command / Flag lookup helpers that resolve a key into +// a string at run-time. +// - [internal/assets/commands/text] — the embedded +// YAML files that hold the actual strings. +// - [internal/config/embed/cmd] and +// [internal/config/embed/flag] — sister +// packages with the same key/YAML pattern for command +// short-help and flag-help text. package text diff --git a/internal/config/event/log.go b/internal/config/event/log.go index 04f49b59a..16599f431 100644 --- a/internal/config/event/log.go +++ b/internal/config/event/log.go @@ -18,6 +18,6 @@ const ( HookLogMaxBytes = 1 << 20 // RotationSuffix is the suffix appended to log files during rotation. RotationSuffix = ".1" - // DefaultLast is the default number of events shown by ctx system events. + // DefaultLast is the default number of events shown by ctx hook event. DefaultLast = 50 ) diff --git a/internal/config/hook/doc.go b/internal/config/hook/doc.go index 273d2f79b..65ed5750d 100644 --- a/internal/config/hook/doc.go +++ b/internal/config/hook/doc.go @@ -4,9 +4,56 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package hook defines hook names, event lifecycle stages,. +// Package hook holds the **constants** every package +// touching the hook subsystem references: hook names +// (`check_persistence`, `check_context_size`, …), +// lifecycle stages (sessionStart, preToolUse, …), +// supported AI tool identifiers (`claude`, `cursor`, +// `cline`, `kiro`, `codex`), category tags +// (`Customizable`, `CtxSpecific`), and the per-hook +// throttling thresholds. // -// Constants are referenced by domain packages via config/hook.*. -// Provides constants and definitions for hook operations. -// Constants are referenced by domain packages. +// The package is a constants registry — no logic. Its +// existence keeps consumers free of magic strings and +// lets the audit suite catch references to non-existent +// hook names at compile time. +// +// # Constant Families +// +// - **Hook names** — one per `cmd/check_*` and +// `cmd/block_*` package under +// [internal/cli/system/cmd]. Used by the +// `ctx hook event` query layer and the message +// loader. +// - **Lifecycle stages** — `sessionStart`, +// `sessionEnd`, `preToolUse`, `postToolUse`, +// `userPromptSubmit`, etc. The Claude Code +// hook config and the trigger dispatcher both +// speak this vocabulary. +// - **Tool identifiers** — `ToolClaude`, +// `ToolCursor`, `ToolCline`, `ToolKiro`, +// `ToolCodex` — the `tool:` field in `.ctxrc` +// and the `tools:` filter in steering files +// reference these. +// - **Categories** — `CategoryCustomizable`, +// `CategoryCtxSpecific` — used by +// [internal/assets/hooks/messages] to label +// each message in `ctx hook message list`. +// +// # Concurrency +// +// All exports are immutable. Safe for any access +// pattern. +// +// # Related Packages +// +// - [internal/cli/system/cmd/check_*] — hook +// implementations referenced by name here. +// - [internal/trigger] — uses +// the lifecycle-stage constants. +// - [internal/steering] — +// validates `tools:` against the tool ID set. +// - [internal/assets/hooks/messages] — uses +// category constants when emitting the +// message catalog. package hook diff --git a/internal/config/http/http.go b/internal/config/http/http.go index b799d3f94..a8acfbc27 100644 --- a/internal/config/http/http.go +++ b/internal/config/http/http.go @@ -4,11 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package http provides constants for HTTP operations (webhook -// notifications, outbound requests). -// -// Provides MIME types, timeouts, and URL masking constants. -// Import as config/http. package http // MIME type constants. diff --git a/internal/config/marker/doc.go b/internal/config/marker/doc.go index 8238ba5bf..c910150da 100644 --- a/internal/config/marker/doc.go +++ b/internal/config/marker/doc.go @@ -4,9 +4,60 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package marker defines HTML comment markers for parsing and. +// Package marker defines the **HTML-comment marker pairs** +// ctx uses to bracket auto-managed sections inside +// otherwise user-edited markdown files. The markers are +// invisible in rendered Markdown and trivially +// `grep`-able. // -// Constants are referenced by domain packages via config/marker.*. -// Provides constants and definitions for marker operations. -// Constants are referenced by domain packages. +// The package is the single source of truth for these +// strings so [internal/cli/initialize/core/merge] and +// every other consumer (drift detector, doctor, sync) +// reference the same constants. +// +// # The Convention +// +// Every ctx-managed section in a user-editable file is +// bracketed by: +// +// +// ... ctx-managed content ... +// +// +// Edits *outside* the markers survive `ctx init` +// re-runs; edits *inside* are blown away on the next +// re-run. The contract is documented to the user in +// `docs/home/configuration.md` so the destruction is +// not a surprise. +// +// # Marker Pairs Defined Here +// +// - **ctx:context** — the persistent-context block +// in `CLAUDE.md`-style files. +// - **ctx:copilot** — the persistent-context block +// in `.github/copilot-instructions.md`. +// - **ctx:agents** — the equivalent block in +// `AGENTS.md`. +// - **ctx:permissions** — the auto-managed allow/ +// deny entries in `settings.local.json`-style +// comments. +// - **INDEX:START / INDEX:END** — the +// auto-generated index table inside +// DECISIONS.md / LEARNINGS.md. +// +// # Concurrency +// +// All exports are immutable string constants. Safe +// for any access pattern. +// +// # Related Packages +// +// - [internal/cli/initialize/core/merge] — the +// marker-aware editor that respects these +// constants. +// - [internal/index] — uses +// `INDEX:START`/`INDEX:END` to locate the +// index table. +// - [internal/drift] — checks +// marker pairs are intact (header alignment). package marker diff --git a/internal/config/regex/cmd.go b/internal/config/regex/cmd.go index f957bf325..c541a4e97 100644 --- a/internal/config/regex/cmd.go +++ b/internal/config/regex/cmd.go @@ -11,8 +11,48 @@ import "regexp" // MidSudo matches mid-command sudo after && || ; var MidSudo = regexp.MustCompile(`(;|&&|\|\|)\s*sudo\s`) -// MidGitPush matches mid-command git push after && || ; -var MidGitPush = regexp.MustCompile(`(;|&&|\|\|)\s*git\s+push`) +// GitPush matches `git push` invocations across common shell shapes. +// +// Covered entry points (prefix anchor `[^|(`+"`"+`\n]`): +// - Bare `git push` at start of the command +// - After statement separators: `;`, `&&`, `||`, `|`, `&` +// - Subshells and command substitution: `(git push)`, `$(git push)`, +// backtick-wrapped `git push` +// - New lines in multi-line command input +// +// Covered prefixes (the `(\S+\s+)*` group before `git`): +// - Environment variable assignments: `GIT_DIR=/foo git push` +// - Command wrappers: `time git push`, `nice git push`, `nohup git push` +// +// Covered flag shapes between `git` and `push` (the `(\s+\S+)*` group): +// - Short flags with values: `-C /path`, `-c key=value` +// - Short boolean flags: `-p`, `-P`, `-h`, `-v` +// - Long flags (boolean or `=value`): `--git-dir=PATH`, `--no-pager`, +// `--bare`, `--work-tree=PATH`, etc. +// +// Trailing anchor `([^a-zA-Z0-9._/-]|$)`: matches any shell terminator +// (whitespace, `)`, backtick, `;`, `|`, `&`, `>`, `<`, quote, newline, +// end-of-string) but rejects ref-name continuations like `push-to-remote` +// or `push_branch` so `git push-to-remote` (an imagined alias) does not +// false-positive as a push subcommand. +// +// Known blind spots: +// - False-positives on literal `push` as an argument in other +// subcommands, e.g. `git log push` when `push` is a branch name. +// Accepted as a safer-than-sorry trade-off for a push guard: +// over-blocking is recoverable, under-blocking is not. +// - Does not match through `eval` or `sh -c` quoting, e.g. +// `eval "git push"` or `sh -c "git push"`. Parsing through arbitrary +// shell quoting is undecidable in the general case. +// - Shell aliases (`alias p=push; git p`) are invisible to static +// regex matching. +// +// Uses Go's RE2 engine, so `(\S+\s+)*` is linear-time despite its +// nested-quantifier appearance. Do not port this regex to a PCRE +// engine without reviewing backtracking behavior. +var GitPush = regexp.MustCompile( + `(^|[;&|(` + "`" + `\n]\s*)(\S+\s+)*git(\s+\S+)*\s+push([^a-zA-Z0-9._/-]|$)`, +) // CpMvToBin matches cp/mv to bin directories. var CpMvToBin = regexp.MustCompile( diff --git a/internal/config/regex/cmd_test.go b/internal/config/regex/cmd_test.go new file mode 100644 index 000000000..81e37086b --- /dev/null +++ b/internal/config/regex/cmd_test.go @@ -0,0 +1,93 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package regex + +import "testing" + +func TestGitPush(t *testing.T) { + cases := []struct { + name string + input string + want bool + }{ + // Positive: bare + {"bare", "git push", true}, + {"bare_with_args", "git push origin main", true}, + {"bare_with_force", "git push --force origin main", true}, + + // Positive: after statement separators + {"after_semicolon", "cd foo; git push", true}, + {"after_and", "make && git push", true}, + {"after_and_no_space", "make &&git push", true}, + {"after_or", "foo || git push", true}, + {"after_pipe", "echo x | git push", true}, + {"after_bg", "sleep 1 & git push", true}, + + // Positive: subshells and command substitution + {"subshell", "(git push)", true}, + {"command_sub_dollar", "$(git push)", true}, + {"command_sub_backtick", "`git push`", true}, + + // Positive: newline-separated multi-line + {"newline", "git status\ngit push origin main", true}, + + // Positive: env var prefix + {"env_var", "GIT_DIR=/tmp/foo git push", true}, + {"multi_env", "GIT_DIR=/x GIT_SSH_COMMAND=ssh git push", true}, + + // Positive: command wrappers + {"time_wrapper", "time git push", true}, + {"nice_wrapper", "nice git push", true}, + {"nohup_wrapper", "nohup git push", true}, + + // Positive: git top-level flags + {"dash_c_path", "git -C /path push", true}, + {"dash_c_config", "git -c push.default=simple push", true}, + {"long_git_dir", "git --git-dir=/path push", true}, + {"long_work_tree", "git --work-tree=/path push", true}, + {"long_no_pager", "git --no-pager push", true}, + {"long_bare", "git --bare push", true}, + {"short_paginate", "git -p push", true}, + {"short_no_pager", "git -P push", true}, + {"mixed_flags", "git -C /path --no-pager push origin", true}, + {"flags_other_order", "nice git --no-pager -C /path push", true}, + + // Negative: not a push + {"empty", "", false}, + {"no_git", "echo hello", false}, + {"other_subcommand", "git status", false}, + {"git_pull", "git pull origin main", false}, + {"git_log", "git log --oneline", false}, + {"git_log_with_grep_push", "git log --grep=push", false}, + + // Negative: ref-name starting with push (tail anchor rejects) + {"push_hyphen", "git push-to-remote", false}, + {"push_underscore", "git push_branch", false}, + {"push_slash", "git push/foo", false}, + {"push_dot", "git push.default", false}, + + // Negative: not the `git` program + {"mygit", "mygit push", false}, + {"gitpush_joined", "gitpush", false}, + {"git_push_joined", "gitpush origin", false}, + + // Accepted false positives: `push` as a literal arg after another + // subcommand. Documented trade-off — over-blocking is preferred + // to under-blocking for a push guard. + {"false_positive_log_push", "git log push", true}, + {"false_positive_commit_msg_push", "git commit -m push", true}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got := GitPush.MatchString(c.input) + if got != c.want { + t.Errorf("GitPush.MatchString(%q) = %v, want %v", c.input, got, c.want) + } + }) + } +} diff --git a/internal/config/regex/doc.go b/internal/config/regex/doc.go index 853ea6783..295f6af99 100644 --- a/internal/config/regex/doc.go +++ b/internal/config/regex/doc.go @@ -4,10 +4,68 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package regex defines compiled regular expressions for. +// Package regex centralizes every **compiled regular +// expression** ctx uses anywhere in its codebase, so they are +// compiled exactly once at process start (`regexp.MustCompile` +// at package-init time) and so the patterns themselves live in +// a single, auditable place. // -// Constants are referenced by domain packages via config/regex.*. -// Provides constants and definitions for regex operations. -// Exports: [OversizeTokens], [LineNumber], [MidSudo], -// [MidGitPush], [CpMvToBin], [InstallToLocalBin]. +// Two motivating problems: +// +// - **Cost** — `regexp.MustCompile` is non-trivial; calling +// it inside a hot loop is wasteful. Hoisting every pattern +// into a package-level `var` guarantees init-time +// compilation. +// - **Audit-ability** — patterns scattered through a +// codebase drift silently. Co-locating them lets +// reviewers eyeball the full surface in one place and +// lets the test suite (`regexp_location_test`, +// `regexp_test`) verify that no other package compiles +// its own regex. +// +// # File Layout — One Concern per File +// +// Each file groups patterns for a single concern: hook safety +// scanners (`mid_sudo.go`, `git_push.go`, +// `cp_mv_to_bin.go`), context tracing (`task_ref.go`), +// non-PATH-ctx detection (`ctx_absolute_start.go`, +// `ctx_relative_start.go`), source-tree linters +// (`oversize_tokens.go`, `line_number.go`), and so on. +// +// # Naming Convention +// +// Each exported `var` is a `*regexp.Regexp` named for what +// it matches, not how. `MidSudo`, `GitPush`, +// `InstallToLocalBin`, `OversizeTokens`, `TaskRef`. The doc +// comment above each variable documents the pattern, the +// captured groups, and the call sites that consume it. +// +// # Concurrency +// +// `*regexp.Regexp` is safe for concurrent use after +// compilation — the standard library guarantees it. No +// caller needs to lock when invoking `Match`, `Find`, +// `Replace`, or `Submatch`. +// +// # Audit +// +// Two AST tests defend the contract: +// +// - `regexp_location_test` — fails if any source file +// outside this package calls `regexp.Compile` or +// `regexp.MustCompile`. +// - `regexp_test` — fails if a pattern in this package is +// dead (unused) or if a referenced variable is missing. +// +// New patterns therefore must land here; new call sites +// reference the variable by name. +// +// # Related Packages +// +// - [internal/cli/system/cmd/block_dangerous_command] — +// consumes the safety-scanner patterns. +// - [internal/trace], [internal/drift] — consume the +// content-detection patterns. +// - [internal/cli/system/cmd/block_non_path_ctx] — +// consumes the absolute/relative ctx detection patterns. package regex diff --git a/internal/config/stats/doc.go b/internal/config/stats/doc.go index 1109675e0..1f5aa171e 100644 --- a/internal/config/stats/doc.go +++ b/internal/config/stats/doc.go @@ -4,9 +4,39 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package stats defines constants for context size monitoring,. +// Package stats holds the **constants** used by ctx's +// context-size monitoring and status reporting: the +// status-icon glyphs (`✓`, `⚠`, `✗`), the threshold +// percentages that decide which icon to use, and the +// per-line format strings the renderers consume. // -// Constants are referenced by domain packages via config/stats.*. -// Exports: [StatusIcon]. -// Constants are referenced by domain packages. +// The package is a typed constants registry — no logic. +// +// # Public Surface +// +// - **[StatusIcon]** — map from status level to +// the rendered glyph; consumed by `ctx status`, +// `ctx doctor`, and the per-section size lines +// in the agent context packet. +// - **Threshold percentage constants** — `ok` / +// `warn` / `danger` boundaries the renderers +// use to pick the matching icon. +// - **Format-string constants** — the per-line +// templates used to render "FILE: N tokens +// (PCT%)" rows. +// +// # Concurrency +// +// All exports are immutable. Safe for any access +// pattern. +// +// # Related Packages +// +// - [internal/cli/status] — chief +// consumer for the status one-liner. +// - [internal/cli/agent] — uses +// [StatusIcon] in the per-section budget +// summary at the head of the context packet. +// - [internal/cli/doctor] — uses +// the same icons in its checklist output. package stats diff --git a/internal/config/sysinfo/sysinfo.go b/internal/config/sysinfo/sysinfo.go index 6f6038f1d..faf004489 100644 --- a/internal/config/sysinfo/sysinfo.go +++ b/internal/config/sysinfo/sysinfo.go @@ -4,8 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package sysinfo provides constants for system information -// collection commands and keys. package sysinfo // macOS system command names. diff --git a/internal/config/token/doc.go b/internal/config/token/doc.go index 2bbf4ac08..f56a1cb88 100644 --- a/internal/config/token/doc.go +++ b/internal/config/token/doc.go @@ -4,9 +4,61 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package token defines string tokens, delimiters, and content. +// Package token defines the **string-token vocabulary** ctx +// uses everywhere it concatenates output, parses input, or +// scans content: delimiters (newline, comma, slash), markers +// (frontmatter fence, code fence, ellipsis), prefixes (URL +// schemes, file paths), and small fixed phrases (`Spec:`, +// `Status:`). // -// Constants are referenced by domain packages via config/token.*. -// Provides constants and definitions for token operations. -// Exports: [TopicSeparators], [SecretPatterns], [TemplateMarkers]. +// Centralizing them eliminates two whole classes of bug: +// +// - **Typo drift** — `","` vs `", "` vs `" ,"` no longer +// happen across 40 files; everyone uses +// [token.CommaSpace]. +// - **Magic-string hunts** — searches for a marker that +// appears in three places now resolve to one constant +// declaration with backlinks via `go references`. +// +// The audit suite enforces "no string literal duplication +// across packages" so adding a new common token here is +// the only sustainable path. +// +// # Token Families +// +// Each `*.go` file groups one family: +// +// - **delimiter** — newlines (`\n` / `\r\n`), commas, +// spaces, ellipsis, separator runs. +// - **marker** — Markdown fences, frontmatter fences, +// ctx HTML markers. +// - **prefix** — URL schemes (`http://`, `https://`, +// `ftp://`, `file://`, `//`), absolute path +// marker. +// - **slash / quote** — single character constants +// that have a name to avoid raw `'/'` / `'"'` in +// calling code. +// - **content** — common content-pattern groups +// ([SecretPatterns], [TopicSeparators], +// [TemplateMarkers]) used by drift, classify, and +// search. +// +// # Concurrency +// +// All exports are immutable. Safe for any access +// pattern. +// +// # Related Packages +// +// - [internal/config/embed/text], +// [internal/config/embed/cmd], +// [internal/config/embed/flag] — sister registries +// of typed lookup keys (text, command, flag help). +// - [internal/parse] — consumes the +// marker constants when splitting frontmatter. +// - [internal/wrap], [internal/index] — consume the +// newline + ellipsis constants for output +// formatting. +// - [internal/drift] — consumes +// [SecretPatterns] for the secret-leak check. package token diff --git a/internal/config/vscode/vscode.go b/internal/config/vscode/vscode.go index 15072c334..c4e865133 100644 --- a/internal/config/vscode/vscode.go +++ b/internal/config/vscode/vscode.go @@ -4,8 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package vscode defines constants for VS Code workspace configuration -// artifacts generated by ctx init. package vscode // Dir is the VS Code workspace configuration directory. @@ -54,6 +52,6 @@ var Tasks = []struct { {"ctx: status", "ctx status"}, {"ctx: drift", "ctx drift"}, {"ctx: agent", "ctx agent --budget 4000"}, - {"ctx: journal", "ctx recall export --all && ctx journal site --build"}, + {"ctx: journal", "ctx journal import --all && ctx journal site --build"}, {"ctx: journal-serve", "ctx journal site --serve"}, } diff --git a/internal/config/warn/warn.go b/internal/config/warn/warn.go index 6a59938ea..930b97a57 100644 --- a/internal/config/warn/warn.go +++ b/internal/config/warn/warn.go @@ -4,12 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package warn provides format string constants for best-effort -// warning messages routed through log.Warn. -// -// These are Printf-style format strings for common I/O failure -// patterns. Using constants prevents typo drift across 40+ call sites. -// Import as config/warn. package warn // Format strings for file I/O warnings. Each takes (path, error). diff --git a/internal/config/why/why.go b/internal/config/why/why.go index 786558841..38fb09862 100644 --- a/internal/config/why/why.go +++ b/internal/config/why/why.go @@ -4,12 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package why provides constants for the ctx why command's embedded -// philosophy documents. -// -// Doc* constants are user-facing alias keys (CLI args, menu items). -// DocAlias* constants are embedded asset names (file stems in assets/why/). -// Import as config/why. package why // User-facing document alias keys (CLI arguments and menu items). diff --git a/internal/context/load/doc.go b/internal/context/load/doc.go index c07b706f4..1e8cec5a7 100644 --- a/internal/context/load/doc.go +++ b/internal/context/load/doc.go @@ -4,9 +4,66 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package load reads the .context/ directory and assembles its files. +// Package load reads the `.context/` directory and +// **assembles its files into the in-memory +// [entity.Context]** that every downstream consumer — +// `ctx agent`, `ctx drift`, `ctx doctor`, every MCP +// handler — operates on. // -// Key exports: [Do]. -// See source files for implementation details. -// Part of the context subsystem. +// The package is the single point of truth for "what +// does the user's context look like right now?". Two +// callers running [Do] back-to-back see identical +// snapshots because the package holds no cache; freshness +// matters more than micro-optimization here. +// +// # Public Surface +// +// - **[Do](contextDir)** — reads every file in the +// priority order [internal/rc.PriorityOrder] +// defines, populates an [entity.Context] with +// each file's name, body, byte/token counts, and +// mtime, and returns the assembled snapshot. +// +// # Read Order +// +// Files are loaded in `priority_order` from `.ctxrc` +// (default: TASKS → DECISIONS → CONVENTIONS → +// LEARNINGS → ARCHITECTURE → CONSTITUTION → +// GLOSSARY). Order matters because downstream consumers +// (notably [internal/cli/agent/core/budget]) allocate +// budget tier-by-tier in this order. +// +// # Token Counts +// +// Each file's token count is computed by the rough +// estimator in [internal/cli/agent/core/budget] — +// approximate but stable. The count is what +// [entity.Context.TokenInfo] surfaces. +// +// # Errors +// +// File-not-found for an *expected* file is silently +// tolerated (returns an empty body); the user may +// legitimately not have populated every foundation +// file yet. Read errors that are not "not found" are +// returned to the caller. +// +// # Concurrency +// +// Stateless and filesystem-bound. Concurrent +// invocations against the same directory each pay +// the full read cost — by design, see above. +// +// # Related Packages +// +// - [internal/cli/agent] — chief +// consumer. +// - [internal/cli/drift] — also consumes +// the loaded context. +// - [internal/mcp/handler] — every MCP +// tool that operates on context calls [Do]. +// - [internal/rc] — supplies the +// priority order and the resolved context dir. +// - [internal/entity] — the +// [Context] type produced. package load diff --git a/internal/context/summary/doc.go b/internal/context/summary/doc.go index 5b2ba0c68..940645991 100644 --- a/internal/context/summary/doc.go +++ b/internal/context/summary/doc.go @@ -4,9 +4,52 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package summary generates brief human-readable summaries from. +// Package summary generates the **brief, human-readable +// summaries** ctx uses in compact-output contexts: the +// `ctx status` one-line health line, the agent context +// packet's lead paragraph, the doctor's roll-up banner. // -// Key exports: [Generate]. -// See source files for implementation details. -// Part of the context subsystem. +// The summaries operate against an already-loaded +// [entity.Context] so they share data with every other +// downstream consumer — no double-reading the filesystem. +// +// # Public Surface +// +// - **[Generate](ctx, opts)** — produces a multi-line +// summary string covering: +// - file count + total token estimate +// - newest / oldest file mtime +// - drift signal counts (warnings, violations) +// - open task count +// - last session timestamp from +// `state/session-event.jsonl` +// Output shape is tunable via [opts] for the +// different consumers (`ctx status` wants a single +// line; the agent packet wants 3-5 lines). +// +// # Why a Dedicated Package +// +// Three callers need the same numbers (`ctx status`, +// `ctx agent`, `ctx doctor`) and three different +// renderings. Hoisting the data computation here means +// each renderer reuses the same byte / token / mtime +// counters, and a fix to the count logic only has to +// happen once. +// +// # Concurrency +// +// All functions are pure data transformations over +// the input [entity.Context]. Concurrent callers +// never race. +// +// # Related Packages +// +// - [internal/cli/status] — chief +// consumer for the one-line summary. +// - [internal/cli/agent] — uses the +// packet-form summary as the lead paragraph. +// - [internal/cli/doctor] — uses the +// roll-up form for the report banner. +// - [internal/context/load] — produces +// the [entity.Context] this package consumes. package summary diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go index 0cf46816e..a1dd08de3 100644 --- a/internal/crypto/crypto.go +++ b/internal/crypto/crypto.go @@ -4,11 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package crypto provides AES-256-GCM encryption for the scratchpad. -// -// The key is a 256-bit random value stored as a raw file. The nonce is -// 12 bytes of random data prepended to the ciphertext. Each write -// re-encrypts the entire file. package crypto import ( diff --git a/internal/crypto/doc.go b/internal/crypto/doc.go index 16f40ddbf..0f2d45cd9 100644 --- a/internal/crypto/doc.go +++ b/internal/crypto/doc.go @@ -4,9 +4,73 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package crypto provides AES-256-GCM encryption and decryption for the. +// Package crypto provides the **AES-256-GCM** encryption +// primitives ctx uses for the two pieces of state that +// must never land on disk in plaintext: the **scratchpad** +// ([internal/pad]) and the **webhook URL** ([internal/notify]). // -// Key exports: [GenerateKey], [Encrypt], [Decrypt], [LoadKey], [SaveKey]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is deliberately small. Heavy lifting (key +// management policies, rotation cadence, file paths) lives +// in the consumer packages; this package owns only the +// "given a key and bytes, produce ciphertext / produce +// plaintext" primitives plus the on-disk key file format. +// +// # Public Surface +// +// - **[GenerateKey]** — returns a fresh 32-byte +// (256-bit) key from `crypto/rand`. The caller +// persists it via [SaveKey]. +// - **[SaveKey](path, key)** — writes the key to +// `path` with `0o600` permissions. Refuses to +// overwrite an existing file (the caller must +// remove first; `ctx pad rotate` does this +// intentionally). +// - **[LoadKey](path)** — reads the key back. Returns +// a typed error from [internal/err/crypto] when +// the file is missing, world-readable, or the +// wrong size. +// - **[Encrypt](key, plaintext)** — produces nonce +// prepended to ciphertext: a fresh random +// 12-byte nonce per call concatenated with the +// AES-256-GCM ciphertext. +// - **[Decrypt](key, payload)** — splits nonce from +// ciphertext and decrypts. Returns a typed error +// on auth-tag mismatch, short payload, or +// missing key. +// +// # File Format +// +// Both encrypted blobs (`.notify.enc`, +// `.context/.scratchpad.enc`) are the raw output of +// [Encrypt] — no header, no version, no JSON wrapper. +// The format is purely +// `nonce(12) || ciphertext(...) || tag(16)`. +// +// # Per-Machine Key +// +// The key lives at `~/.ctx/.ctx.key` (one key per user, +// shared by every project on that machine). Cross-machine +// scratchpad sync requires copying that key — see +// `docs/recipes/scratchpad-sync.md` for the user-facing +// procedure. +// +// # Concurrency +// +// All functions are stateless. AES-256-GCM is safe for +// concurrent calls — each [Encrypt] generates a fresh +// nonce internally, and the underlying cipher is +// reentrant. +// +// # Related Packages +// +// - [internal/pad] — chief consumer; +// the scratchpad encrypts every write through +// this package. +// - [internal/notify] — encrypts the +// webhook URL the same way. +// - [internal/rc] — owns the +// `KeyPath()` resolver this package's callers +// use. +// - [internal/err/crypto] — typed error +// constructors. package crypto diff --git a/internal/drift/detector.go b/internal/drift/detector.go index 774cf6363..4c361a105 100644 --- a/internal/drift/detector.go +++ b/internal/drift/detector.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package drift provides functionality for detecting stale or invalid context. package drift import ( diff --git a/internal/drift/doc.go b/internal/drift/doc.go index be494737e..57aae12a4 100644 --- a/internal/drift/doc.go +++ b/internal/drift/doc.go @@ -4,9 +4,112 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package drift detects stale or invalid context files and structural. +// Package drift detects ways a project's `.context/` files have +// drifted from the codebase or from project conventions, and +// surfaces the findings as a structured [Report] that the CLI +// (`ctx drift`), the doctor (`ctx doctor`), and the steering / +// trigger nudges all consume. // -// Key exports: [Detect]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is the *evaluator*; it never modifies state. It +// reads the loaded [entity.Context], runs a battery of +// independent checks, and returns a categorized list of issues. +// Whether a given issue stops the user, prints a yellow nudge, +// or is silently archived is the caller's concern. +// +// # The Public Surface +// +// One function does the work — [Detect](ctx) — plus the result +// types it returns: +// +// - [Report] — Warnings, Violations, Passed checks. +// - [Issue] — File, Line, Type, Message, Path, Rule. +// - [Report.Status] — rolls the report up to a single +// [config/drift.StatusType]: Violation > Warning > Ok. +// +// Everything else in the package is an internal `check*` helper +// that appends to the [Report] passed by reference. +// +// # The Checks +// +// [Detect] runs the following checks in order; each is +// independent and contributes to the same [Report]: +// +// - **Path references** ([checkPathReferences]) — scans +// ARCHITECTURE.md and CONVENTIONS.md for backtick-enclosed +// file paths and verifies each exists on disk. Skips URLs, +// glob patterns, and template placeholders. +// - **Staleness** ([checkStaleness]) — flags content that +// contradicts current code (placeholder markers left in +// CONSTITUTION.md, missing `.context/` markers, etc). +// - **Constitution heuristics** ([checkConstitution]) — +// basic rule presence checks against CONSTITUTION.md. +// - **Required files** ([checkRequiredFiles]) — flags empty +// files that the schema expects to be populated. +// - **File age** ([checkFileAge]) — warns when a context +// file has not been touched in `stale_age_days` (configured +// in `.ctxrc`; default 30; 0 disables). [staleAgeExclude] +// skips files that are intentionally static (CONSTITUTION). +// - **Entry counts** ([checkEntryCount]) — warns when +// DECISIONS.md / LEARNINGS.md exceed the per-file +// thresholds (consolidation nudge). +// - **Missing internal packages** ([checkMissingPackages]) — +// flags packages mentioned in ARCHITECTURE.md that no +// longer exist on disk; also normalizes Go internal +// package paths via [normalizeInternalPkg]. +// - **Template headers** ([checkTemplateHeaders]) — checks +// each context file's comment-header banner against the +// ctx-managed template; mismatch suggests `ctx init +// --force`. +// - **Steering tools** ([checkSteeringTools]) — every +// steering file's `tools:` field must reference a +// supported tool ID ([supportedTools]). +// - **Hook permissions** ([checkHookPerms]) — flags any +// trigger script in `.context/hooks/` that lacks the +// executable bit (matches the trigger-package security +// contract). +// - **Sync staleness** ([checkSyncStaleness]) — warns when +// a tool-native steering file is older than its source +// `.context/steering/*.md` (the user needs to run +// `ctx steering sync`). +// - **Tool field** ([checkRCTool]) — `.ctxrc`'s `tool:` +// field must be one of the supported AI tool IDs. +// +// New checks are added by appending one more `checkX` call in +// [Detect] and a constant to [config/drift.CheckName]. +// +// # Issues vs Warnings vs Violations +// +// Severity is decided per-check, not per-package: +// +// - **Violations** — things the user has to fix +// (constitution rule break, dead path in +// ARCHITECTURE.md). [Report.Status] returns +// `StatusViolation` if any violation exists. +// - **Warnings** — things the user *should* look at but can +// defer (stale file, oversize entry count). Reported +// individually; do not block a `ctx doctor` exit. +// - **Passed** — names of checks that ran clean. Used by +// `ctx doctor --json` to render a positive checklist. +// +// # Stateless and Concurrency-Safe +// +// The package holds no global state. Callers may invoke +// [Detect] concurrently as long as the [entity.Context] they +// pass is not mutated mid-call. Filesystem reads are scoped to +// the resolved context directory. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/cli/drift] — the +// `ctx drift` CLI surface. +// - [github.com/ActiveMemory/ctx/internal/cli/doctor] — +// `ctx doctor` consumes the same Report and adds health +// checks. +// - [github.com/ActiveMemory/ctx/internal/config/drift] — +// check-name constants, status types, issue-type tags. +// - [github.com/ActiveMemory/ctx/internal/steering] and +// [github.com/ActiveMemory/ctx/internal/trigger] — sources +// of the steering / trigger checks. +// - [github.com/ActiveMemory/ctx/internal/rc] — supplies +// thresholds (stale_age_days, entry counts). package drift diff --git a/internal/entity/doc.go b/internal/entity/doc.go index c5089a819..cf502fd81 100644 --- a/internal/entity/doc.go +++ b/internal/entity/doc.go @@ -4,9 +4,121 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package entity defines the core domain types shared across ctx. +// Package entity is the **shared domain-types layer** for ctx — +// the structs that carry information across package boundaries +// without coupling the producer to the consumer. // -// Types here are used by multiple packages (CLI, MCP, write). When a -// type is only used by one package, it belongs there instead. Entity -// types carry data; they have no I/O methods or business logic. +// # The Two-Sentence Rule +// +// A type belongs in `entity` if and only if **at least two +// packages need it AND it carries no I/O or business logic**. +// Single-package types live with their consumer; types with +// methods that touch the filesystem, the network, or the +// process environment live with the package that performs that +// I/O. Entity types are pure data carriers. +// +// This rule keeps the package free of import cycles: every +// package may depend on `entity`, but `entity` depends on +// nothing except the standard library and a handful of typed +// configuration constants. +// +// # File Layout — One Domain per File +// +// Types are grouped by **the subsystem that owns the data**, +// not by their Go shape. A non-exhaustive tour: +// +// - **`context.go`** — [Context], the assembled +// `.context/` snapshot every reader sees: file list, token +// stats, drift signals. +// - **`add.go`** — [EntryParams], [AddConfig], +// [EntryOpts] for the `ctx add` family. +// - **`change.go`** — [ContextChange], [CodeSummary] for +// `ctx change`. +// - **`message.go`** — [Message], [ToolUse], [ToolResult] +// — the normalized session-message shape produced by +// [internal/journal/parser] and consumed everywhere +// downstream. +// - **`session.go`** — the [Session] aggregate (start / +// end / duration / project / branch / messages / rollups) +// that flows from parser → journal pipeline → site / +// obsidian renderers. +// - **`journal.go`** — [JournalEntry], [JournalFrontmatter] +// — the on-disk shape of an enriched journal entry. +// - **`import.go`** — [ImportPlan], [ImportResult], +// [FileAction], [RenameOp] used by the journal-import +// pipeline. +// - **`index.go`** — [IndexEntry], [GroupedIndex], +// [TopicData], [KeyFileData], [TypeData] — index-table +// primitives consumed by `internal/index`. +// - **`hook.go`** — [HookInput], [ToolInput], +// [BlockResponse] — the payload shapes for ctx-system +// hook plumbing. +// - **`trigger.go`** — [TriggerSession], [TriggerInput] +// — payloads for project-authored lifecycle scripts (see +// [internal/trigger]). +// - **`system.go`** — system-hook input/output types. +// - **`event.go`** — [EventQueryOpts] and event log +// types used by `ctx hook event`. +// - **`notify.go`** — [NotifyPayload], [TemplateRef] — +// the webhook delivery payloads. +// - **`task.go`** — task-related domain types +// (priority, completion state, snapshot shapes). +// - **`mcp_session.go`**, **`mcp_deps.go`**, +// **`mcp_prompt.go`** — the per-session state, runtime +// dependency container, and prompt-spec types passed +// between the MCP server and its handler package. +// - **`bootstrap.go`** — [BootstrapOutput] — the JSON +// emitted by `ctx system bootstrap` for AI agents at +// session start. +// - **`deploy.go`**, **`merge.go`** — pipeline params for +// deploy/merge orchestration. +// - **`meta.go`** — [Stats], [TokenInfo] — rollup +// metadata attached to many other types. +// +// New types should slot into the file whose subsystem owns +// the data; create a new file only when a genuinely new +// subsystem appears. +// +// # The "No Behavior" Constraint +// +// Methods on entity types are limited to: +// +// - **Pure predicates** (e.g. `Message.BelongsToUser()`, +// `Task.IsDone()`) — they read fields and return derived +// facts. +// - **Pure derivations** (e.g. `Session.Duration()`). +// - **Display helpers** (e.g. `String()` overrides for +// debug logging). +// +// Anything that needs to read a file, hit the network, run +// `git`, or call `time.Now` belongs in the consumer package. +// `entity` types are safe to construct in tests with literal +// field assignment; they need no constructor and have no +// hidden state. +// +// # Concurrency +// +// All entity types are **plain data**. Mutation is the +// responsibility of whoever owns the value; concurrent +// readers of an immutable value are safe by definition. +// Several types (notably [MCPSession]) are documented as +// owned by a single goroutine at a time and must not be +// shared across requests; see their per-type doc comments +// for the full contract. +// +// # Related Packages +// +// Producers and consumers (non-exhaustive): +// +// - [internal/context/load] — assembles [Context]. +// - [internal/journal/parser] — produces [Session], +// [Message], [ToolUse], [ToolResult]. +// - [internal/index] — produces [IndexEntry] +// and the grouped variants. +// - [internal/drift] — consumes [Context]. +// - [internal/mcp/handler] — consumes [MCPDeps], +// [MCPSession], produces [PromptEntrySpec]. +// - [internal/trigger] — consumes [TriggerInput] +// and emits trigger output. +// - [internal/notify] — consumes [NotifyPayload]. package entity diff --git a/internal/entry/doc.go b/internal/entry/doc.go index e75b9226d..9e324eafe 100644 --- a/internal/entry/doc.go +++ b/internal/entry/doc.go @@ -4,9 +4,80 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package entry provides the domain API for adding entries to context files. +// Package entry is the **shared write-side API** for +// adding entries to context files (DECISIONS.md, +// LEARNINGS.md, CONVENTIONS.md). Both the CLI add +// commands and the MCP `ctx_add` tool route through here +// so the validation rules and the on-disk format are +// applied uniformly regardless of caller. // -// Key exports: [Validate], [Write], [ValidateAndWrite]. -// See source files for implementation details. -// Part of the internal subsystem. +// # Public Surface +// +// - **[Validate](params)** — applies all the rules +// a new entry must satisfy before it can be +// written: required fields present, body +// non-empty, identifier-like fields well-formed, +// provenance flags satisfy the per-project +// [internal/rc.ProvenanceConfig]. Returns a typed +// error per failure for actionable messages. +// - **[Write](params)** — writes the entry: builds +// the timestamped header, formats the body with +// the canonical attribute order, appends to the +// target file, and updates the file's index +// table via [internal/index]. +// - **[ValidateAndWrite](params)** — convenience +// wrapper that runs [Validate] and then [Write] +// when validation passes; this is the function +// the CLI commands and the MCP handler actually +// call. +// +// # Validation Rules +// +// Beyond presence checks, validation enforces: +// +// - **Title length** — fits the index-table +// column width without truncation. +// - **Body has at least one substantive line** — +// not just whitespace or template placeholders. +// - **Provenance** — `--session-id`, `--branch`, +// `--commit` are required when +// `provenance_required` enables them in +// `.ctxrc`. +// - **No secrets** — body is scanned against +// [internal/config/token.SecretPatterns]; a +// match aborts with a typed error so the user +// can scrub before retry. +// +// # On-Disk Format +// +// Entries follow the canonical shape: +// +// ## [YYYY-MM-DD-HHMMSS] Title text here +// +// Body paragraph(s)... +// +// **Attribute**: value +// **Attribute**: value +// +// Attributes are emitted in a fixed order so re-runs +// produce stable diffs. +// +// # Concurrency +// +// Single-process write assumption. Concurrent writes +// to the same file would race on the append; ctx +// CLI is single-process by design. +// +// # Related Packages +// +// - [internal/cli/add] — CLI surface +// that calls [ValidateAndWrite]. +// - [internal/mcp/handler] — MCP +// `ctx_add` tool that calls [ValidateAndWrite]. +// - [internal/index] — index-table +// updater invoked by [Write]. +// - [internal/rc] — supplies +// [ProvenanceConfig] for validation rules. +// - [internal/config/token] — supplies +// the secret-pattern set for the scan. package entry diff --git a/internal/err/backup/doc.go b/internal/err/backup/doc.go index edd271458..ce876bef4 100644 --- a/internal/err/backup/doc.go +++ b/internal/err/backup/doc.go @@ -1,13 +1,46 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package backup provides error constructors for backup operations. +// Package backup defines the **typed error constructors** +// the backup subsystem returns. Every failure that can +// happen during `ctx backup` flows through one of these +// constructors so the call site upstream sees a +// sentinel-able error and the renderer downstream knows +// which user-facing text to surface. // -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [Create], [CreateArchive], -// [CreateArchiveDir], [WriteArchive], [SMBConfig]. +// # Why Typed Errors +// +// Three reasons: +// +// - **Stability** — error categories are part of +// the public API; adding a constructor is an +// intentional change a reviewer can see. +// - **Routing** — the write-side +// ([internal/write/backup]) maps error types to +// localized text via [internal/assets/read/desc]. +// - **Wrapping** — every constructor wraps its +// underlying cause via `%w` so callers can +// `errors.Is` / `errors.As` against system +// errors when needed. +// +// # Public Surface +// +// Constructors (one per failure mode): [Create], +// [CreateArchive], [CreateArchiveDir], +// [WriteArchive], [SMBConfig]. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/backup] — chief consumer. +// - [internal/cli/system/core/archive] — also +// returns these. +// - [internal/write/backup] — the renderer that +// maps each error to its user-facing text. package backup diff --git a/internal/err/crypto/doc.go b/internal/err/crypto/doc.go index 8a34e438c..3d5d41cc9 100644 --- a/internal/err/crypto/doc.go +++ b/internal/err/crypto/doc.go @@ -1,13 +1,51 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package crypto provides error constructors for encryption and key management. +// Package crypto defines the **typed error constructors** +// returned by [internal/crypto] and its consumers +// ([internal/pad], [internal/notify]). Every encryption, +// decryption, and key-management failure flows through +// one of these constructors. // -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [LoadKey], [EncryptFailed], [DecryptFailed], +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors +// (`io.EOF`, `os.ErrNotExist`) when needed. +// +// # Public Surface +// +// Constructors (one per failure mode): +// [LoadKey], [EncryptFailed], [DecryptFailed], // [NoKeyAt], [SaveKey], [MkdirKeyDir]. +// +// # Why "NoKeyAt" Is Distinct from "LoadKey" +// +// "Key file does not exist yet" is the *normal* +// state on first use; consumers ([pad], [notify]) +// treat it as "generate one" rather than "fail". +// Other load failures (permission denied, wrong +// size) are real errors and surface through +// [LoadKey]. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/crypto] — chief producer. +// - [internal/pad] / +// [internal/notify] — also produce these. +// - [internal/write/pad] / +// [internal/write/notify] — render them. package crypto diff --git a/internal/err/fs/doc.go b/internal/err/fs/doc.go index 73958540e..b7599216e 100644 --- a/internal/err/fs/doc.go +++ b/internal/err/fs/doc.go @@ -1,13 +1,50 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package fs provides error constructors for filesystem operations. +// Package fs defines the **typed error constructors** +// for filesystem-level operations every other ctx +// package eventually performs: directory creation, +// reading, writing, amending. The package is the +// lowest level of the typed-error layer. // -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [Mkdir], [ReadDir], [DirNotFound], +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is(err, os.ErrNotExist)` against +// system errors when needed. +// +// # Public Surface +// +// Constructors: [Mkdir], [ReadDir], [DirNotFound], // [FileWrite], [FileRead], [FileAmend]. +// +// # When to Use [DirNotFound] vs [ReadDir] +// +// [DirNotFound] is for the actionable case "the +// directory the user expects to exist does not"; +// [ReadDir] wraps the underlying generic read +// failure (permission denied, IO error, etc.). +// The CLI surfaces them differently — the former +// suggests `ctx init`, the latter suggests +// checking permissions. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/io] — chief producer +// for low-level filesystem failures. +// - Most CLI commands — surface them +// when an expected file is missing. package fs diff --git a/internal/err/hub/hub.go b/internal/err/hub/hub.go index 2498acb5a..222e16033 100644 --- a/internal/err/hub/hub.go +++ b/internal/err/hub/hub.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package hub provides error constructors for the hub subsystem. package hub import ( diff --git a/internal/err/journal/doc.go b/internal/err/journal/doc.go index db63ad535..f1f194550 100644 --- a/internal/err/journal/doc.go +++ b/internal/err/journal/doc.go @@ -1,13 +1,43 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package journal provides error constructors for journal pipeline operations. +// Package journal defines the **typed error constructors** +// returned by the journal pipeline — state-file load +// and save failures, missing journal directories, and +// related operational errors that reach the user +// through `ctx journal *` subcommands. // -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [LoadState], [SaveState], [LoadStateErr], -// [LoadStateFailed], [SaveStateFailed], [NoDir]. +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors when +// needed. +// +// # Public Surface +// +// Constructors: [LoadState], [SaveState], +// [LoadStateErr], [LoadStateFailed], +// [SaveStateFailed], [NoDir]. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/journal/state] — chief +// producer for state-file errors. +// - [internal/cli/journal/cmd/importer] — also +// producer. +// - [internal/write/journal] — the +// renderer that maps these to user text. package journal diff --git a/internal/err/memory/doc.go b/internal/err/memory/doc.go index ddf034e7c..6669db609 100644 --- a/internal/err/memory/doc.go +++ b/internal/err/memory/doc.go @@ -1,13 +1,52 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package memory provides error constructors for memory bridge operations. +// Package memory defines the **typed error constructors** +// returned by [internal/memory] — the Claude Code auto- +// memory bridge — for discovery, diff, mirror, and +// publish failures. // -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [NotFound], [DiscoverFailed], [DiffFailed], -// [SelectContentFailed], [PublishFailed], [Read]. +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors when +// needed. +// +// # Public Surface +// +// Constructors: [NotFound], [DiscoverFailed], +// [DiffFailed], [SelectContentFailed], +// [PublishFailed], [Read]. +// +// # Why "NotFound" Is Distinct from "DiscoverFailed" +// +// "Auto memory does not exist for this project" +// ([NotFound]) is a normal state Claude Code +// returns for projects with no recorded memory; +// the CLI surfaces it as "no memory yet, run a +// session first". Discover failures (path +// resolution errors, permission denied) are real +// errors that need user attention. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/memory] — chief +// producer. +// - [internal/cli/memory] — also +// producer. +// - [internal/write/memory] — the renderer +// that maps these to user text. package memory diff --git a/internal/err/skill/doc.go b/internal/err/skill/doc.go index ace31f991..97b7ef028 100644 --- a/internal/err/skill/doc.go +++ b/internal/err/skill/doc.go @@ -1,16 +1,46 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package skill provides error constructors for skill operations. -// -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [CreateDest], [Install], [InvalidManifest], -// [InvalidYAML], [List], [Load], [MissingClosingDelimiter], -// [MissingName], [MissingOpeningDelimiter], [NotFound], -// [NotValidDir], [NotValidSource], [Read], [ReadDir], -// [Remove], [SkillLoad]. +// Package skill defines the **typed error constructors** +// returned by [internal/skill] (the install / list / +// load / remove engine) and its CLI consumers. +// +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors when +// needed. +// +// # Public Surface +// +// Constructors fall into three groups: +// +// - **Install / Remove** — [CreateDest], +// [Install], [NotFound], [Remove], [List], +// [ReadDir], [NotValidDir], [NotValidSource]. +// - **Load / Read** — [Load], [SkillLoad], +// [Read], [InvalidYAML]. +// - **Manifest validation** — [InvalidManifest], +// [MissingName], [MissingClosingDelimiter], +// [MissingOpeningDelimiter]. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/skill] — chief producer. +// - [internal/cli/skill] — also producer. +// - [internal/write/skill] — the renderer +// that maps these to user text. package skill diff --git a/internal/err/steering/doc.go b/internal/err/steering/doc.go index edf2ba343..da5895ce8 100644 --- a/internal/err/steering/doc.go +++ b/internal/err/steering/doc.go @@ -1,17 +1,61 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package steering provides error constructors for steering operations. -// -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [ComputeRelPath], [ContextDirMissing], [CreateDir], -// [FileExists], [InvalidYAML], [MissingClosingDelimiter], -// [MissingOpeningDelimiter], [NoTool], [OutputEscapesRoot], -// [Parse], [ReadDir], [ReadFile], [ResolveOutput], -// [ResolveRoot], [SyncAll], [SyncName], [UnsupportedTool], -// [WriteFile], [WriteSteeringFile], [WriteInitFile]. +// Package steering defines the **typed error +// constructors** returned by [internal/steering] — +// frontmatter parse failures, sync target validation, +// path-boundary violations, and missing-tool errors. +// +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors when +// needed. +// +// # Public Surface +// +// Constructors fall into three groups: +// +// - **Parse / IO** — [Parse], [InvalidYAML], +// [MissingClosingDelimiter], +// [MissingOpeningDelimiter], [ReadFile], +// [ReadDir], [WriteFile], [WriteSteeringFile], +// [WriteInitFile]. +// - **Sync** — [SyncAll], [SyncName], +// [UnsupportedTool], [NoTool], +// [ResolveOutput], [ResolveRoot], +// [OutputEscapesRoot], [ComputeRelPath], +// [CreateDir], [FileExists]. +// - **Context** — [ContextDirMissing]. +// +// # The Boundary Check +// +// [OutputEscapesRoot] is fired when a sync +// target's resolved absolute path would land +// outside the project root — a defensive check +// that prevents a malicious or buggy steering +// file from writing to arbitrary filesystem +// locations. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/steering] — chief +// producer. +// - [internal/cli/steering] — also +// producer. +// - [internal/write/steering] — renders +// these into user-facing text. package steering diff --git a/internal/err/trigger/doc.go b/internal/err/trigger/doc.go index 30cc273aa..342e32ebc 100644 --- a/internal/err/trigger/doc.go +++ b/internal/err/trigger/doc.go @@ -1,17 +1,65 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package trigger provides error constructors for trigger operations. -// -// Error constructors return structured errors with context for -// user-facing messages routed through internal/assets text lookups. -// Exports: [Chmod], [CreateDir], [DiscoverFailed], -// [EmbeddedTemplateNotFound], [Exit], [InvalidJSONOutput], -// [InvalidType], [MarshalInput], [NotFound], [OverrideExists], -// [RemoveOverride], [ResolveHooksDir], [ResolvePath], -// [ScriptExists], [Stat], [StatPath], [Timeout], [Unknown], -// [UnknownVariant], [Validate], [WriteScript], [WriteOverride]. +// Package trigger defines the **typed error +// constructors** returned by [internal/trigger] — every +// validation, discovery, and execution failure the +// trigger lifecycle can produce. +// +// # Why Typed Errors +// +// - **Stability** — error categories are part of +// the public API. +// - **Routing** — write-side packages map error +// types to localized text via +// [internal/assets/read/desc]. +// - **Wrapping** — constructors wrap the +// underlying cause via `%w` so callers can +// `errors.Is` against system errors when +// needed. +// +// # Public Surface +// +// Constructors fall into four groups: +// +// - **Validation** — [Validate], [InvalidType], +// [Symlink] (boundary check), +// [ResolveHooksDir], [ResolvePath], [Boundary], +// [Stat], [StatPath], [NotFound], +// [ScriptExists]. +// - **Discovery / Lifecycle** — [DiscoverFailed], +// [Chmod], [CreateDir], [Unknown], +// [UnknownVariant]. +// - **Override Management** — [OverrideExists], +// [WriteOverride], [RemoveOverride], +// [EmbeddedTemplateNotFound], [WriteScript]. +// - **Execution** — [Exit] (non-zero hook +// exit), [Timeout] (hook ran past the +// configured timeout), [InvalidJSONOutput] +// (hook stdout failed to parse), +// [MarshalInput] (input encoding failed). +// +// # Why So Many Constructors +// +// Triggers run **untrusted code** at a security- +// sensitive boundary. Every distinct failure mode +// gets its own typed error so the user-facing +// message is precise about *which* invariant the +// script violated and *what* to do about it. +// +// # Concurrency +// +// Pure constructors. Concurrent callers never race. +// +// # Related Packages +// +// - [internal/trigger] — chief +// producer. +// - [internal/cli/trigger] — also +// producer. +// - [internal/write/trigger] — renders +// these into user-facing text. package trigger diff --git a/internal/format/doc.go b/internal/format/doc.go index b934d132e..aa92a31c6 100644 --- a/internal/format/doc.go +++ b/internal/format/doc.go @@ -4,10 +4,67 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package format converts typed values into human-readable display. +// Package format converts typed Go values into the +// **human-readable display strings** ctx prints in CLI +// output, hook nudges, and journal headers — relative time +// ("3 hours ago"), durations ("23m 14s"), truncated previews, +// and grouped numbers ("1,234,567"). // -// Key exports: [TimeAgo], [Duration], [DurationAgo], [TruncateFirstLine], -// [Number]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is the small, well-tested layer below every +// renderer; centralizing the formatters keeps presentation +// consistent across the CLI and prevents subtle drift like +// "3h ago" in one place vs "3 hours ago" in another. +// +// # Public Surface +// +// - **[TimeAgo](t)** — relative time vs `now`: +// "just now", "5 minutes ago", "3 hours ago", +// "yesterday", "3 days ago", "Mar 12". The +// break-points and phrasing match what most CLIs +// have converged on. +// - **[Duration](d)** — formats a `time.Duration` as +// "23m 14s" / "2h 5m" / "3d 4h" depending on +// magnitude. Drops the smaller unit when the +// larger is ≥ 10 (so "12h 0m" → "12h"). +// - **[DurationAgo](d)** — convenience: takes a +// duration and renders the [TimeAgo] form for "now +// minus d". +// - **[TruncateFirstLine](text, n)** — returns the +// first line of `text`, truncated to `n` runes +// (rune-aware, not byte-aware) with an ellipsis +// when truncation occurs. +// - **[Number](n)** — thousands-grouped integer +// formatting ("1,234,567"). Uses comma regardless +// of locale (ctx is English-only at present). +// +// # Design Choices +// +// - **Rune-aware truncation** — byte truncation +// would split multi-byte characters and produce +// mojibake. [TruncateFirstLine] counts runes. +// - **Stable break-points** — relative-time +// phrasing is deterministic per input, so a +// re-render after a small clock advance does not +// produce noisy diffs in journal output. +// - **No localization** — single-locale today; +// when localization arrives, the per-locale +// phrase tables will plug in here without +// changing call sites. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/journal/core/source/format] — +// domain-specific formatters that compose these +// primitives. +// - [internal/write/*] — every +// terminal-output package consumes these +// formatters. +// - [internal/cli/system/core/nudge] — uses +// [TimeAgo] for "last seen 3 hours ago" lines in +// ceremony nudges. package format diff --git a/internal/hub/doc.go b/internal/hub/doc.go index 8bd6ef166..d1f3a2e07 100644 --- a/internal/hub/doc.go +++ b/internal/hub/doc.go @@ -4,17 +4,119 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package hub implements the ctx Hub server and client. +// Package hub implements the ctx Hub: a gRPC server that fans +// structured entries (decisions, learnings, conventions, tasks) +// across multiple ctx projects and the client primitives those +// projects use to talk to it. // -// The hub is a gRPC service that aggregates published entries -// (decisions, learnings, conventions) from multiple ctx instances -// and streams them to subscribers in real-time. +// The hub is deliberately small in scope. It is not a wiki, an +// audit log, or a multi-tenant service. It is a fan-out channel +// for the entries that should travel between trusted projects +// without dragging the rest of `.context/` along. // -// Storage is append-only JSONL. Auth is token-based (admin token -// for registration, per-client tokens for RPCs). Connection config -// is encrypted locally using AES-256-GCM via [internal/crypto]. +// # Architecture at a Glance // -// Key exports: [Store], [Entry], [Auth], [Server], [Client]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package layers four concerns: +// +// - Storage [Store] append-only JSONL + sequence +// numbers + per-client tokens +// - Transport [Server] gRPC Register / Publish / Sync +// / Listen / Status RPCs +// - Cluster [Cluster] Raft *for leader election only* +// (see "Raft-Lite" below) +// - Client [Client] connection registration, sync +// catch-up, push streaming, and +// ordered-peer failover +// +// Auth, validation, and fan-out broadcast support the four pillars: +// +// - Auth [GenerateAdminToken], [GenerateClientToken], +// bearer-token authentication on every RPC. +// - Validate [ValidateEntry] enforces the entry schema and +// normalizes provenance fields. +// - Fan-out internal broadcaster delivers each new entry to +// all live Listen subscribers without coupling +// them. +// +// # Storage Model +// +// The store is **append-only JSONL** under a hub data directory: +// +// - entries.jsonl one [Entry] per line, monotonically +// sequence-numbered +// - clients.json registered client tokens + per-client +// subscription filters +// - meta.json schema version + admin token hash +// +// Sequence numbers make replication and resume strictly +// idempotent: a follower or a returning client only asks for +// "entries after seq N" and the leader streams the tail. Because +// the log is append-only, there is no "edit" operation to +// reconcile and no conflict resolution to perform. +// +// # Raft-Lite +// +// The package embeds HashiCorp Raft (see [Cluster] and the no-op +// [leaderFSM]) for **leader election only** — never for data +// consensus. Entry replication is performed independently via the +// sequence-based gRPC sync described above. +// +// The trade-off is explicit and documented in +// docs/recipes/hub-cluster.md: writes are durable on the leader +// at the moment they are accepted, and followers catch up +// asynchronously. If the leader crashes between accepting a +// write and replicating it, that write may be lost. We take that +// risk in exchange for a much simpler implementation than full +// Raft log replication, and it is sound because the store is +// append-only and clients are idempotent. +// +// # Trust Model +// +// The hub assumes every holder of a client token is friendly. +// Origin is **self-asserted** by the publishing client; there is +// no per-user attribution and no read ACL beyond subscription +// filters. This is by design — the hub serves "Story 1" (single +// developer, multiple projects) and "Story 2" (small trusted +// team) shapes, not public multi-tenant deployments. +// +// Hostile clients, untrusted networks, and compliance-grade audit +// trails are explicitly out of scope. See docs/security/hub.md +// for the threat-model write-up. +// +// # Concurrency +// +// [Store] guards its in-memory indexes and the appender with a +// single mutex; gRPC handlers serialize through it. Listen +// streams subscribe to a fan-out channel and receive entries in +// publish order; slow subscribers are dropped rather than +// blocking publishers (see [fanOutBuffer]). +// +// # Encryption +// +// Connection state on the client side (the hub address and +// per-client token) is encrypted at rest via the package +// [github.com/ActiveMemory/ctx/internal/crypto] using AES-256-GCM +// with the same per-machine key that protects [internal/pad]. +// +// # Related Packages +// +// - [internal/cli/hub] server-side CLI +// (start/stop/status/peer/stepdown) +// - [internal/cli/connection] client-side CLI +// (register/subscribe/sync/listen/ +// publish/status) +// - [internal/err/hub] typed error constructors used by +// this package +// - [internal/config/hub] protocol/runtime constants +// (ports, tokens prefixes, paths) +// +// # Key Exports +// +// Server side: [Store], [Server], [NewServer], [Cluster], +// [NewCluster], [GenerateAdminToken], [GenerateClientToken]. +// +// Client side: [Client], [Connect], [Publish], [Sync], [Listen], +// [Status], [Register]. +// +// Domain types: [Entry], [EntryMsg], [ClientRecord]. package hub diff --git a/internal/index/doc.go b/internal/index/doc.go index 9aab5f124..68a9270d2 100644 --- a/internal/index/doc.go +++ b/internal/index/doc.go @@ -4,10 +4,74 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package index generates and parses indexes for context file entries. +// Package index generates and maintains the **quick-reference +// index tables** at the top of `DECISIONS.md` and +// `LEARNINGS.md` — the markdown tables wrapped in +// `` / `` markers that +// list every entry by ID, date, and title in chronological +// order. // -// Key exports: [ParseEntryBlocks], [ParseHeaders], -// [GenerateTable], [Update], [UpdateDecisions]. -// See source files for implementation details. -// Part of the internal subsystem. +// The index is the affordance that lets `ctx agent` send a +// **token-cheap** version of `DECISIONS.md` / `LEARNINGS.md` +// to the AI: instead of injecting the full prose for hundreds +// of entries, it injects only the index table. The agent +// scans the table, decides which entries it needs, and asks +// for those by ID. +// +// # The Index Format +// +// Each index row mirrors one entry block in the source file: +// +// | ID | Date | Title | +// |----|------|-------| +// | L-43 | 2026-04-12 | Lock acquisition order in fanout | +// +// Entry blocks in the source follow a strict shape: +// +// ## [YYYY-MM-DD-HHMMSS] Title text here +// +// [ParseHeaders] extracts the date + title pair from each +// `## [...]` header. [ParseEntryBlocks] returns full block +// metadata — start/end line, ID, date, title, body — so +// callers can grep, render, or rewrite individual entries. +// +// # Updating in Place +// +// [GenerateTable] turns a parsed entry list into the full +// markdown index (table header + rows). +// [Update](path, newTable) finds the marker pair in the +// existing file and replaces only the content between them, +// leaving the rest of the file untouched. If the markers are +// missing, [Update] inserts them under the H1 heading so the +// next run becomes idempotent. [UpdateDecisions] and the +// matching [UpdateLearnings] are convenience wrappers that +// know the canonical file paths. +// +// # Supersession +// +// An entry can be marked **superseded** by a later one +// (a body line starting with `**Status**: Superseded by +// L-99`). The parser tags such entries so renderers can +// gray-out / sort the index accordingly. +// +// # Concurrency +// +// The package is filesystem-IO at the boundary, pure data +// in the middle. Callers serialize updates externally +// (typically by holding the `.context/` directory +// implicitly through process-level execution). +// +// # Related Packages +// +// - [internal/cli/decision], [internal/cli/learning] — +// the `ctx decision reindex` and `ctx learning reindex` +// commands that drive [Update]. +// - [internal/cli/reindex] — +// the `ctx reindex` convenience that runs both at once. +// - [internal/cli/agent] — +// consumes the index table to render the AI-ready +// short form of DECISIONS / LEARNINGS. +// - [internal/entity] — +// [IndexEntry], [GroupedIndex], [TopicData] domain +// types this package returns. package index diff --git a/internal/index/index.go b/internal/index/index.go index 4e24b894a..55af9f079 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package index provides index generation and parsing for context files. package index import ( diff --git a/internal/journal/parser/doc.go b/internal/journal/parser/doc.go index c67213c89..5ed561cc5 100644 --- a/internal/journal/parser/doc.go +++ b/internal/journal/parser/doc.go @@ -4,10 +4,112 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package parser auto-detects and parses session files from multiple tools. +// Package parser auto-detects and parses AI-coding-assistant session +// transcripts from multiple tool formats into a single normalized +// [github.com/ActiveMemory/ctx/internal/entity.Session] type the rest +// of the journal pipeline can consume uniformly. // -// Key exports: [NewClaudeCode], [NewMarkdownSession], [ParseFile], -// [ScanDirectory], [ScanDirectoryWithErrors]. -// See source files for implementation details. -// Part of the journal subsystem. +// # Why a Parser Layer +// +// Every AI tool ctx integrates with stores its session history in a +// different on-disk format: +// +// - **Claude Code** writes one JSONL file per project under +// `~/.claude/projects//*.jsonl`, with multiple sessions +// interleaved by `sessionId` field. +// - **Copilot** (VS Code) keeps a binary-ish chunked store in +// the workspace state directory. +// - **Copilot CLI** writes a different, JSON-with-metadata layout +// under its own home tree. +// - **MarkdownSession** is the round-trip format ctx itself +// produces when an enriched journal entry is *re-imported* — it +// parses the YAML frontmatter + body that +// `ctx journal import` produced earlier. +// +// Downstream consumers (`ctx journal source`, `ctx journal import`, +// the journal site builder, the obsidian exporter) should never +// have to know which tool wrote a file. They get back +// `[]*entity.Session` and work with that. +// +// # Public Surface +// +// Three entry points cover the common use cases: +// +// - [ParseFile](path) — parse one file; returns all +// sessions it contains (a JSONL file may interleave many). +// - [ScanDirectory](dir) — recursively walk a tree, +// parse every parseable file, return sessions sorted +// newest-first; per-file errors are swallowed so one bad file +// does not abort the scan. +// - [ScanDirectoryWithErrors](dir) — same walk, but also +// returns a slice of (path, err) pairs for every parse failure +// so callers can surface them to the user. +// +// Tool-specific constructors ([NewClaudeCode], [NewCopilot], +// [NewCopilotCLI], [NewMarkdownSession]) are exported for callers +// that need to operate on a known format directly (tests, format +// converters, the schema validator). +// +// # Dispatch Mechanism +// +// All tool implementations satisfy the unexported `Session` +// interface (Tool, Matches, ParseFile, ParseLine). The package-level +// `registeredParsers` slice holds one instance of each. Dispatch is +// first-match-wins: [ParseFile] iterates the slice and asks each +// parser whether it `Matches(path)`. Implementations may check +// extension, directory shape, or peek at the first line — order in +// the slice matters when a file could plausibly match more than one +// (in practice, the four formats are disjoint). +// +// **Adding a new tool**: implement the four interface methods on a +// new type, then append a constructor call to `registeredParsers` +// in `parser.go`. No other changes are required. +// +// # Output Shape +// +// Every parser yields `*entity.Session` values populated with: +// +// - identity: ID, Slug, Tool, SourceFile +// - context: CWD, Project (basename of CWD), GitBranch +// - timing: StartTime, EndTime, Duration +// - content: a flat []Message in chronological order +// - rollups: TurnCount, FirstUserMsg (preview, truncated at +// [config/session.PreviewMaxLen]) +// +// [ScanDirectory] sorts the aggregated slice by `StartTime` +// descending so the most recent session lands at index 0 — the +// invariant the journal CLI and site generator both rely on. +// +// # Error Handling +// +// Errors fall into three buckets: +// +// - **No matching parser**: [ParseFile] returns +// [internal/err/parser.NoMatch] when no registered parser claims +// the file. Callers should treat this as "skip", not "fail" — +// the directory may legitimately contain unrelated files. +// - **Per-file parse errors**: malformed JSON, truncated stream, +// unexpected schema. [ScanDirectory] swallows these silently; +// [ScanDirectoryWithErrors] surfaces them paired with the path +// for the caller to log. +// - **Filesystem errors**: walk-time IO errors (permission, +// device) are returned directly from the Scan functions and +// terminate the walk. +// +// # Concurrency +// +// Parsers are stateless; the `registeredParsers` slice is read-only +// after package init. A single parser instance is reused across all +// calls. Concurrent [ParseFile] / [ScanDirectory] calls are safe. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/journal] pipeline +// orchestration: import, lock, sync, site/obsidian +// - [github.com/ActiveMemory/ctx/internal/journal/state] tracks +// which raw sources have been imported / enriched +// - [github.com/ActiveMemory/ctx/internal/journal/schema] JSONL +// schema validation for raw session files +// - [github.com/ActiveMemory/ctx/internal/entity] the +// Session and Message types this package produces package parser diff --git a/internal/journal/parser/types.go b/internal/journal/parser/types.go index 45b7db557..6498e0692 100644 --- a/internal/journal/parser/types.go +++ b/internal/journal/parser/types.go @@ -4,12 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package parser provides JSONL session file parsing for the recall system. -// -// It parses AI coding assistant session transcripts into structured Go types -// that can be rendered, searched, and analyzed. The package -// uses a tool-agnostic -// Session output type with tool-specific parsers (e.g., ClaudeCode). package parser import ( diff --git a/internal/journal/schema/doc.go b/internal/journal/schema/doc.go index ee4b87152..b1bf8f468 100644 --- a/internal/journal/schema/doc.go +++ b/internal/journal/schema/doc.go @@ -1,19 +1,78 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package schema validates Claude Code JSONL session files. -// -// Claude Code stores sessions as JSONL files with an undocumented, -// unversioned format that changes across releases. This package -// defines the expected record shape (known fields, record types, -// content block types) derived from empirical analysis, and -// validates raw lines against it to detect drift. -// -// Validation is strictly informational: it accumulates findings -// into a Collector but never blocks imports or other operations. -// Findings include unknown fields, missing required fields, -// unknown record types, and unknown content block types. +// Package schema is ctx's defense against the **silent +// drift** of Claude Code's session-file format. +// +// Claude Code stores sessions as JSONL files under +// `~/.claude/projects//` with an **undocumented, +// unversioned** record format that changes between +// releases. There is no schema URL, no version tag, no +// compatibility commitment. The only way to know whether +// a new Claude Code release added a field, removed a type, +// or quietly renamed a property is to compare empirical +// reality to a frozen reference shape — which is what this +// package does. +// +// # The Reference Shape +// +// [schema.go] declares the **expected** record shape +// derived from analysis of real session files: the set of +// known top-level fields, the set of known record `type` +// values, the set of known content-block types within +// `assistant` records, and the per-type required-field +// list. +// +// [build.go] / [check.go] / [validate.go] walk an actual +// JSONL file and accumulate findings into a [Collector]: +// +// - **Unknown fields** — a key the reference shape does +// not list (Claude added a property). +// - **Missing required fields** — a key the reference +// shape requires but the record omits (Claude +// removed a property; we may now silently drop data +// downstream). +// - **Unknown record types** — a `type` value not in +// the reference set (a new record kind appeared). +// - **Unknown content block types** — same, but for +// content blocks inside `assistant` records. +// +// # Strictly Informational +// +// Validation **never blocks** an import or other +// operation. Findings flow into [Report] which formats a +// markdown drift report consumed by `ctx doctor` and the +// release-prep runbook. The intent is "tell me when the +// upstream shape moved so I can update the parser", not +// "refuse to ingest anything we have not pre-blessed". +// +// # Updating the Reference +// +// When a new Claude Code release introduces fields the +// drift report flags: +// +// 1. Inspect the new records to confirm semantics. +// 2. Update the reference declarations in [schema.go]. +// 3. Update [internal/journal/parser] if the new +// fields carry session-relevant data. +// 4. Add a learning to LEARNINGS.md so the change is +// not repeated when reviewing the next release. +// +// # Concurrency +// +// All exported functions are pure data transformations +// over byte slices and `[]Finding`. Concurrent callers +// never race. +// +// # Related Packages +// +// - [internal/journal/parser] — the actual session +// parser; this package is its **canary**. +// - [internal/cli/journal/cmd/schema] — the +// `ctx journal schema` CLI surface. +// - [internal/entity] — finding and report +// types. package schema diff --git a/internal/journal/state/doc.go b/internal/journal/state/doc.go index 5d9248d4c..ed4335da5 100644 --- a/internal/journal/state/doc.go +++ b/internal/journal/state/doc.go @@ -4,9 +4,72 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package state manages journal processing state via an external JSON file. +// Package state manages the **journal processing state** +// stored in `.context/journal/.state.json` — a denormalized +// JSON index that tracks, for each raw session source, where +// it is in the import → normalize → enrich → wrap pipeline +// and whether it is locked against re-import. // -// Key exports: [Load], [ValidStages]. -// See source files for implementation details. -// Part of the journal subsystem. +// # Why an External State File +// +// The original design embedded markers in the journal files +// themselves (``, etc.). That broke +// when a journal entry's body legitimately contained one of +// those marker strings — the parser saw a false positive and +// concluded the entry had been processed when it had not. +// +// Moving state out of the file body fixes the false-positive +// problem and gives the importer a fast index it can scan +// without parsing every entry. +// +// # The State File Shape +// +// `.state.json` is a `map[sourceID]Record` where each +// [Record] tracks: +// +// - **stage** — current pipeline stage (one of +// [ValidStages]: imported, normalized, enriched, +// wrapped, indexed). +// - **locked** — true when the entry is protected +// from re-import regeneration. +// - **part** — for multipart entries, which part this +// record refers to. +// - **filename** — the on-disk filename the importer +// produced for this source. +// +// # Public Surface +// +// - **[Load](journalDir)** — reads `.state.json` and +// returns the deserialized map. Returns an empty +// map (not an error) when the file is missing — +// fresh projects have no state yet. +// - **[ValidStages]** — canonical stage names in +// pipeline order. Stage advancement is forward-only +// in normal flow; re-import (`--regenerate`) resets +// to "imported". +// +// # Sync With Frontmatter +// +// Frontmatter is the source of truth for `locked:`; the +// state file is a denormalized cache. `ctx journal sync` +// reconciles drift between the two so users who edit +// frontmatter directly see the importer respect the +// change on next run. +// +// # Concurrency +// +// File reads are scoped per call. Writes from the +// importer use the atomic-rename pattern so a partial +// write never produces a malformed JSON file. +// +// # Related Packages +// +// - [internal/cli/journal/cmd/importer] — the chief +// reader/writer. +// - [internal/cli/journal/core/lock] — the locking +// helpers; mutate frontmatter and re-sync state. +// - [internal/cli/journal/cmd/sync] — explicit +// reconciliation between frontmatter and state. +// - [internal/cli/system/cmd/check_journal] — reads +// state to count unimported sources. package state diff --git a/internal/journal/state/state.go b/internal/journal/state/state.go index e853fed7d..e9610c4fe 100644 --- a/internal/journal/state/state.go +++ b/internal/journal/state/state.go @@ -4,11 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package state manages journal processing state via an external JSON file. -// -// Instead of embedding markers () inside journal -// files, which causes false positives when journal content includes those -// exact strings, state is tracked in .context/journal/.state.json. package state import ( diff --git a/internal/log/event/doc.go b/internal/log/event/doc.go index 9ab7ba4c9..e3d03d7bf 100644 --- a/internal/log/event/doc.go +++ b/internal/log/event/doc.go @@ -1,17 +1,72 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package event provides JSONL event logging for hook lifecycle tracking. +// Package event implements the **JSONL hook event log** — +// the append-only on-disk record of every hook lifecycle +// event ctx generates so users can inspect, audit, or +// timeline what happened in a session. // -// [Append] writes timestamped entries to a rotating JSONL log file in -// the context state directory. [Query] reads entries back with optional -// filters for hook name, session ID, and count limits. Log rotation -// happens automatically when the file exceeds a size threshold. +// Two pieces of the system depend on it: // -// Key exports: [Append], [Query]. -// Used by hook handlers and the event query CLI to persist and retrieve -// lifecycle events. +// - **`ctx hook event`** — user-facing query: "what did +// the hooks do during the last session?". +// - **`ctx system check_persistence`** and friends — read +// the log to detect "you committed but never wrote a +// decision" patterns and nudge accordingly. +// +// # On-Disk Format +// +// The log lives at `.context/state/events.jsonl` and is +// **append-only JSONL**: one [Event] per line, written via +// [Append], rotated to `events.1.jsonl` when the file +// exceeds [config/event.LogMaxBytes] (1 MiB). At most one +// rotation generation is kept; older history is discarded. +// +// # Opt-In +// +// Logging is **disabled by default** — many users do not +// want hook activity persisted. [Append] is a noop when +// `event_log: false` in `.ctxrc`; setting it to `true` +// activates collection. The `ctx hook event` query +// gracefully reports "no events recorded" when the file is +// missing. +// +// # The Query Surface +// +// [Query](opts) reads both `events.jsonl` and the rotated +// `events.1.jsonl` (in chronological order), then applies +// the filters from [entity.EventQueryOpts]: +// +// - **Hook** — match a specific hook name +// (e.g. `check-persistence`). +// - **Session** — match a session ID prefix. +// - **Event** — match an event-type tag (`fired`, +// `relayed`, `blocked`, …). +// - **Last N** — keep only the most recent N matches +// (default [config/event.DefaultLast] = 50). +// +// # Concurrency +// +// [Append] uses an O_APPEND open which is atomic for +// small (sub-PIPE_BUF) writes on POSIX systems — the +// log line size we emit is well under that bound, so +// concurrent appenders interleave but never tear a line. +// [Query] reads a snapshot of the file; concurrent +// appends mid-read are tolerated (the worst case is a +// half-written final line that the JSONL decoder skips). +// +// # Related Packages +// +// - [internal/cli/event] — the `ctx hook +// event` CLI surface. +// - [internal/config/event] — file names, +// rotation threshold, default-last constant. +// - [internal/cli/system/cmd/check_persistence] and +// other `check_*` hooks — append events at fire time +// and read them at evaluation time. +// - [internal/entity] — [EventQueryOpts] +// filter struct. package event diff --git a/internal/mcp/handler/doc.go b/internal/mcp/handler/doc.go index e3dbe3f4c..f9951bb40 100644 --- a/internal/mcp/handler/doc.go +++ b/internal/mcp/handler/doc.go @@ -4,10 +4,113 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package handler contains domain logic for MCP tool operations. +// Package handler holds the **domain logic** behind every MCP +// (Model Context Protocol) tool that ctx exposes to Claude +// Code and other MCP-compatible clients. // -// Functions accept typed Go parameters and return (string, error) pairs. -// The server package handles JSON-RPC protocol translation, argument -// extraction from MCP maps, and response wrapping. This separation -// keeps domain logic testable without protocol coupling. +// The package is intentionally protocol-free. Every exported +// function takes typed Go parameters (a `*entity.MCPDeps`, a +// path, a string, a struct) and returns `(string, error)` — +// the formatted user-facing reply and a Go error. The sister +// package [internal/mcp/server] handles JSON-RPC framing, +// argument extraction from `map[string]any`, and response +// wrapping. This split keeps the domain logic +// **unit-testable without standing up a server** and makes it +// reusable from non-MCP callers (notably the CLI's +// `ctx agent`). +// +// # The Tool Surface +// +// The functions in [tool.go] correspond one-to-one with the +// MCP tools advertised by the server. A non-exhaustive +// inventory: +// +// - [Status] — context summary (file list, +// token counts, drift signals). +// - **`ctx_add`** — add a task / decision / +// learning / convention. +// - **`ctx_complete`** — flip a task from `[ ]` to +// `[x]` via [taskComplete]. +// - **`ctx_compact`** — invoke [tidy] to archive +// done work. +// - **`ctx_drift`** — run [drift.Detect] and +// render the report. +// - **`ctx_journal_source`** — list raw session +// transcripts via [journal/parser]. +// - **`ctx_search`** — text search across context +// files via [internal/entry]. +// - **`ctx_remind`** — read/dismiss reminders via +// [remindStore]. +// - **`ctx_session_*`** — `session_start`, +// `session_end`, `session_event` lifecycle plumbing +// (covered in [session_hooks.go]). +// - **`ctx_steering_get`** — surface matched steering +// files via [steering.go] (see [internal/steering]). +// - **`ctx_check_task_completion`** — match recent file +// edits to open tasks. +// - **`ctx_watch_update`** — apply context updates the +// agent emits in `` blocks. +// +// Each function loads context fresh via [load.Do] when it +// needs current state — there is no per-tool cache. This +// keeps the response correct after edits the agent itself +// just made. +// +// # Governance — The Append-on-Every-Reply Layer +// +// [governance.go] implements the **governance trailer**: +// short, structured warnings that ride along with every MCP +// reply when the session has accumulated overdue work. +// [CheckGovernance] is invoked by the server **after** the +// tool has produced its answer; it consults the per-session +// state on `entity.MCPDeps`, drains the VS Code extension's +// violations file ([violations.go]), and assembles a +// newline-separated banner of nudges to append. +// +// The function is a free function rather than a method on +// `MCPSession` precisely because it does I/O (reading the +// violations file). `toolName` is passed in so the function +// can suppress redundant warnings — e.g. the drift warning +// is not appended to a `ctx_drift` response, since the user +// is already looking at it. +// +// # Violations Drain +// +// The Claude Code VS Code extension records hook-detected +// violations to a JSON file under the context dir. The +// handler reads it with [readViolations], surfaces the +// entries, and **truncates the file** so each violation +// surfaces exactly once. The JSON shape is +// [violationsData] / [violation]. +// +// # Session Hooks +// +// [session_hooks.go] implements the three lifecycle tools +// (`session_start`, `session_end`, `session_event`) the MCP +// client calls to mark transitions. They write to per-session +// state files under `state/` and emit nudge messages when the +// configured ceremonies have been skipped. +// +// # Concurrency +// +// Handler functions are reentrant; they hold no module-level +// state. Per-session state lives on [entity.MCPDeps] (passed +// in by the server) and on the per-session files in `state/`, +// which are written through the package's own append helpers. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/mcp/server] — +// JSON-RPC framing, tool dispatch, response wrapping. +// - [github.com/ActiveMemory/ctx/internal/mcp/proto] — +// wire-protocol types. +// - [github.com/ActiveMemory/ctx/internal/entity] — +// `MCPDeps`, `MCPSession`, and the domain types these +// handlers operate on. +// - [github.com/ActiveMemory/ctx/internal/context/load] — +// reads and assembles the loaded [entity.Context]. +// - [github.com/ActiveMemory/ctx/internal/drift], +// [internal/tidy], [internal/journal/parser], +// [internal/steering] — domain packages this handler +// orchestrates. package handler diff --git a/internal/mcp/proto/doc.go b/internal/mcp/proto/doc.go index 6f501ae88..f1e42d289 100644 --- a/internal/mcp/proto/doc.go +++ b/internal/mcp/proto/doc.go @@ -4,9 +4,65 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package proto defines JSON-RPC 2.0 message types and MCP protocol. +// Package proto defines the **JSON-RPC 2.0** wire types and +// the **MCP** (Model Context Protocol) extension types ctx +// speaks with Claude Code and other MCP-compatible clients. // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Provides constants and definitions for proto operations. -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). +// The package is wire-protocol-only: structs with `json:` +// tags, sentinel constants for method names and error codes, +// no logic. Domain behavior lives in +// [internal/mcp/handler]; transport in [internal/mcp/server]. +// +// # JSON-RPC 2.0 +// +// - **[Request]** — `jsonrpc`, `id`, `method`, +// `params`. `id` may be string, number, or null per +// the spec. +// - **[Response]** — `jsonrpc`, `id`, plus exactly +// one of `result` / `error`. +// - **[Error]** — `code`, `message`, optional `data`. +// The standard error codes ([CodeParseError], +// [CodeInvalidRequest], etc.) are exported as +// constants. +// - **[Notification]** — `Request` without an `id`, +// used for one-way messages (logging, progress). +// +// # MCP Extensions +// +// MCP layers these methods on top of JSON-RPC: +// +// - **`tools/list` / `tools/call`** — for tool +// dispatch. +// - **`prompts/list` / `prompts/get`** — for +// server-curated prompts. +// - **`resources/list` / `resources/read` / +// `resources/subscribe`** — for server-exposed +// resources. +// +// Each method has a typed request and response struct +// in this package: [ToolsCallRequest], +// [ToolsCallResponse], [Tool], [PromptsGetResponse], +// etc. +// +// # Stability +// +// The wire shape is fixed by external specifications +// (JSON-RPC 2.0 and the MCP spec). Changes here +// require coordinated client updates and should not +// happen casually. The audit suite watches for +// accidental field renames. +// +// # Concurrency +// +// All exports are immutable types. Encoding / +// decoding is goroutine-safe at the +// `encoding/json` boundary. +// +// # Related Packages +// +// - [internal/mcp/server] — encodes and decodes +// these types; calls into [internal/mcp/handler] +// for the result. +// - [internal/mcp/handler] — produces the typed +// payloads the server marshals into [Response]. package proto diff --git a/internal/mcp/server/def/tool/doc.go b/internal/mcp/server/def/tool/doc.go index e54a06e4e..aa5f549d9 100644 --- a/internal/mcp/server/def/tool/doc.go +++ b/internal/mcp/server/def/tool/doc.go @@ -1,12 +1,56 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package tool defines MCP tool definitions and shared property builders. +// Package tool defines the **MCP tool catalog** ctx +// advertises in `tools/list` responses: every callable +// MCP tool's schema, parameter definitions, and the +// shared property builders that keep parameter shapes +// consistent across tools. // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Exports: [MergeProps], [EntryAttrProps], [Defs]. -// Exports: [MergeProps], [EntryAttrProps]. +// The package is the *catalog declaration*; the +// dispatch is in [internal/mcp/server/route/tool] and +// the actual logic is in [internal/mcp/handler]. +// +// # Public Surface +// +// - **[Defs]** — the slice of tool definitions +// advertised in `tools/list`. Each definition +// carries a name, description, JSON-schema +// parameters, and an "annotations" map for +// UI hints. +// - **[MergeProps](base, extra)** — composes two +// property maps so a tool can layer its +// specific arguments on top of the shared +// entry-attribute boilerplate. +// - **[EntryAttrProps]** — the canonical property +// map shared by `ctx_add` variants (priority, +// branch, commit, session-id, etc.) so the +// four entry-add tools have an identical +// argument shape. +// +// # Why a Definitions Package +// +// MCP clients consume `tools/list` once at session +// start and cache the schemas. Centralizing the +// declarations makes the surface stable across +// versions: dispatch and handler refactors do not +// change what the client sees. +// +// # Concurrency +// +// All exports are immutable. Safe for concurrent +// reads. +// +// # Related Packages +// +// - [internal/mcp/server/route/tool] — the +// dispatcher that maps `tools/call` to the +// right handler. +// - [internal/mcp/handler] — the +// handlers tools dispatch to. +// - [internal/mcp/proto] — the +// wire-protocol types. package tool diff --git a/internal/mcp/server/dispatch/poll/doc.go b/internal/mcp/server/dispatch/poll/doc.go index d67accb84..bf25b31d4 100644 --- a/internal/mcp/server/dispatch/poll/doc.go +++ b/internal/mcp/server/dispatch/poll/doc.go @@ -1,12 +1,54 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package poll watches subscribed resources for file changes and. +// Package poll implements the **resource-change watcher** +// behind the MCP `resources/subscribe` notification. When +// a client subscribes to one or more `.context/` files, +// this package polls their mtimes and emits a +// `notifications/resources/updated` JSON-RPC message +// when any of them changes. // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Exports: [NewPoller]. -// Exports: [NewPoller]. +// MCP supports change notifications, but the underlying +// substrate (a polling watcher) is opaque to the client. +// This package is that substrate. +// +// # Public Surface +// +// - **[NewPoller](paths, intervalMs)** — builds a +// poller for the given file paths with the +// given polling interval. +// - **Poller methods** — `Start(ctx, ch)` to +// begin emitting change events on `ch`, +// `Stop()` to cease, `Update(paths)` to swap +// the watch set without restart. +// +// # Why Polling, Not fsnotify +// +// Polling at ~1 Hz is reliable across every +// platform ctx supports (Linux, macOS, Windows) +// without per-platform watcher quirks (file +// renames, fsync timing, cross-FS edge cases). +// MCP's notification cadence does not need +// sub-second precision; "saw a change within a +// second" is enough. +// +// # Concurrency +// +// `Start` spawns a single goroutine that ticks +// on the configured interval; `Stop` signals it +// via context cancellation. Concurrent calls to +// `Update` are serialized through the poller's +// mutex. +// +// # Related Packages +// +// - [internal/mcp/server/resource] — the +// `resources/subscribe` handler that creates +// a poller per subscription. +// - [internal/mcp/proto] — the +// `notifications/resources/updated` message +// type emitted by the poller. package poll diff --git a/internal/mcp/server/doc.go b/internal/mcp/server/doc.go index 8b5347c83..7f72b15a9 100644 --- a/internal/mcp/server/doc.go +++ b/internal/mcp/server/doc.go @@ -1,12 +1,98 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package server implements the MCP server that exposes ctx context. +// Package server implements the **Model Context Protocol +// (MCP) server** that exposes ctx context, commands, and +// session-lifecycle hooks to MCP-compatible AI clients — +// primarily Claude Code, but also any other tool that speaks +// the same JSON-RPC 2.0 dialect. // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Exports: [New]. -// Exports: [New]. +// The server runs over **stdin/stdout** as a sub-process +// launched by the AI client; it does not bind a network port. +// Spawn behavior is configured by the client's MCP block (see +// [internal/cli/setup] for what `ctx setup` writes into each +// tool's config). +// +// # Wire Protocol +// +// MCP is JSON-RPC 2.0 with three core verbs ctx implements: +// +// - **`tools/list`** — advertise the catalog of MCP tools +// this server provides (`ctx_status`, `ctx_add`, +// `ctx_complete`, `ctx_drift`, `ctx_journal_source`, +// `ctx_search`, `ctx_steering_get`, `ctx_remind`, +// `ctx_session_*`, `ctx_check_task_completion`, +// `ctx_watch_update`). +// - **`tools/call`** — invoke one tool with a typed +// arguments map. +// - **`prompts/list` / `prompts/get`** — surface +// ctx-curated prompts (e.g. the session-start +// ceremony prompt) as first-class MCP prompts. +// +// Wire types live in [internal/mcp/proto]; this package +// concerns itself with **dispatch** and **state**. +// +// # Architecture +// +// The package layers four sub-concerns: +// +// - **[New]** — constructs a server bound to a +// [entity.MCPDeps] (paths, runtime config). The +// server is single-threaded by design — Claude Code +// spawns one sub-process per session and does not +// pipeline requests. +// - **Routing** — [route/tool], [route/prompt], +// [route/resource] register handlers per MCP verb. +// - **Dispatch** — [dispatch/poll] reads one +// JSON-RPC message at a time from stdin and routes +// it to the right handler. +// - **Catalog** ([catalog/data.go]) — the static +// tool/prompt/resource definitions surfaced via +// `*/list` calls. +// +// All actual domain logic — what `ctx_drift` *does*, what +// `ctx_search` returns — lives in [internal/mcp/handler]. +// This package is the protocol-aware shell around it. +// +// # Per-Session State +// +// Each running server instance owns one +// [entity.MCPSession] (turn counter, last-loaded context +// snapshot, governance flags). The session is created on +// the first `tools/call` and persists for the lifetime of +// the sub-process. The handler layer reads/mutates it +// through [entity.MCPDeps]. +// +// # Governance Trailers +// +// After every `tools/call`, the dispatcher invokes +// [internal/mcp/handler.CheckGovernance] to append any +// session-overdue nudges (drift, persistence, journal +// import) to the response. The trailers are appended +// inside the JSON-RPC `result` envelope so they reach the +// AI without changing the protocol shape. +// +// # Concurrency +// +// One goroutine reads from stdin; one goroutine writes +// to stdout; tool dispatch runs in the read goroutine +// to preserve request ordering. Long-running tools +// (currently none) would need to spawn a goroutine and +// signal completion through a channel. +// +// # Related Packages +// +// - [internal/mcp/handler] — domain logic for every +// tool the server exposes. +// - [internal/mcp/proto] — JSON-RPC + MCP wire +// types. +// - [internal/cli/mcp] — the `ctx mcp` CLI +// entry point that boots an instance. +// - [internal/cli/setup] — writes per-tool MCP +// config blocks that spawn this server. +// - [internal/entity] — [MCPDeps], [MCPSession] +// domain types. package server diff --git a/internal/mcp/server/route/prompt/doc.go b/internal/mcp/server/route/prompt/doc.go index ce772f3b4..0b2269f6a 100644 --- a/internal/mcp/server/route/prompt/doc.go +++ b/internal/mcp/server/route/prompt/doc.go @@ -1,12 +1,47 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package prompt dispatches MCP prompt list and get requests to the. +// Package prompt is the **MCP `prompts/*` dispatcher** — +// the layer that takes a JSON-RPC request, validates +// the parameters, and routes to the right handler in +// [internal/mcp/handler] (or, for static catalog +// queries, returns the cached catalog directly). // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Exports: [DispatchList], [DispatchGet]. -// Exports: [DispatchList], [DispatchGet]. +// MCP exposes two prompt RPCs: +// +// - **`prompts/list`** — return the catalog of +// server-curated prompts the client may invoke. +// - **`prompts/get`** — render one prompt by name +// with its argument values filled in. +// +// # Public Surface +// +// - **[DispatchList](req, deps)** — handles +// `prompts/list`. Returns the static catalog +// of ctx-curated prompts (session-start +// ceremony, decision-add wizard, etc.). +// - **[DispatchGet](req, deps)** — handles +// `prompts/get`. Validates the requested +// prompt name + arguments, calls into +// [internal/mcp/handler] for the rendering. +// +// # Concurrency +// +// Each request runs in the read goroutine of +// [internal/mcp/server]; concurrent requests +// against the same `MCPDeps` are sequential by +// MCP design. +// +// # Related Packages +// +// - [internal/mcp/server] — owner of the +// dispatch loop. +// - [internal/mcp/handler] — domain +// logic for prompt rendering. +// - [internal/mcp/server/def/tool] — sister +// package for tool catalog declarations. +// - [internal/mcp/proto] — wire types. package prompt diff --git a/internal/mcp/server/route/tool/doc.go b/internal/mcp/server/route/tool/doc.go index 135a03bc3..0875896a8 100644 --- a/internal/mcp/server/route/tool/doc.go +++ b/internal/mcp/server/route/tool/doc.go @@ -1,12 +1,61 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package tool dispatches MCP tool list and call requests to the. +// Package tool is the **MCP `tools/*` dispatcher** — the +// layer that takes a JSON-RPC `tools/list` or +// `tools/call` request, validates parameters against +// the per-tool schema in [internal/mcp/server/def/tool], +// and routes to the right handler in +// [internal/mcp/handler]. // -// Part of the MCP server (JSON-RPC 2.0 over stdin/stdout). -// Exports: [DispatchList], [DispatchCall]. -// Exports: [DispatchList], [DispatchCall]. +// # Public Surface +// +// - **[DispatchList](req, deps)** — returns the +// full tool catalog from +// [internal/mcp/server/def/tool.Defs]. +// - **[DispatchCall](req, deps)** — extracts the +// tool name and arguments map from the JSON-RPC +// params, dispatches to the matching handler, +// wraps the handler's `(string, error)` return +// into the MCP response envelope, then runs +// [handler.CheckGovernance] to append any +// overdue-work nudges. +// +// # Argument Extraction +// +// MCP tool arguments arrive as `map[string]any` +// (raw JSON). This package owns the typed +// extraction (`mustString`, `optionalInt`, etc.) +// so the handlers see typed Go values, not +// `any`. +// +// # Error Mapping +// +// Handler errors map to JSON-RPC error codes: +// +// - **InvalidParams** — typed validation errors. +// - **InternalError** — anything else. +// +// The original error message is included in the +// `data` field so the client can surface it to +// the user / agent. +// +// # Concurrency +// +// Sequential per server instance; see +// [internal/mcp/server]. +// +// # Related Packages +// +// - [internal/mcp/server] — owner of +// the dispatch loop. +// - [internal/mcp/handler] — handler +// functions tools dispatch to; also the +// governance trailer. +// - [internal/mcp/server/def/tool] — the tool +// catalog this dispatcher consumes. +// - [internal/mcp/proto] — wire types. package tool diff --git a/internal/memory/doc.go b/internal/memory/doc.go index ed984259e..34f641ec6 100644 --- a/internal/memory/doc.go +++ b/internal/memory/doc.go @@ -4,17 +4,87 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package memory bridges Claude Code's auto memory (MEMORY.md) into -// the .context/ directory with discovery, mirroring, and drift detection. +// Package memory bridges Claude Code's per-project **auto memory** +// file (`MEMORY.md`) into a project's `.context/` directory so that +// memory written by Claude — outside the repo, under +// `~/.claude/projects//memory/` — becomes git-tracked, +// version-controlled, drift-checkable, and importable into the +// structured context files (DECISIONS / LEARNINGS / CONVENTIONS). // -// Claude Code maintains per-project auto memory at -// ~/.claude/projects//memory/MEMORY.md. This package locates that -// file from the project root, mirrors it into .context/memory/mirror.md -// (git-tracked), and archives previous versions before each sync. +// # The Problem It Solves // -// Discovery encodes the project root path into the Claude Code slug -// format: absolute path with "/" replaced by "-", prefixed with "-". +// Claude Code's auto memory lives at +// `~/.claude/projects//memory/MEMORY.md`. That path: // -// Sync state is tracked in .context/state/memory-import.json to support -// drift detection and future import/publish phases. +// - is **not** in the project repo (no peer review, no git +// history, no diff-on-PR); +// - is **per-machine** (a teammate working on the same project +// sees a different file); +// - **silently grows** as Claude takes notes during sessions, +// drifting from `.context/` content over time. +// +// This package makes that file a first-class citizen. +// +// # Pipeline Stages +// +// Stages are wired to the `ctx memory` CLI surface: +// +// - **discover** ([Discover], [discover.go]) — encodes the +// project root path into Claude Code's slug format +// (absolute path, `/` → `-`, `-` prefix) and resolves the +// auto-memory file. Returns an actionable error if the +// auto-memory file does not exist. +// - **status** — reports whether the source file exists, +// when the last sync happened, and whether drift is +// present (size or content). +// - **sync** ([Mirror], [mirror.go]) — copies the source +// into `.context/memory/mirror.md`. Previous mirror is +// archived under `.context/memory/archive/.md` before +// overwrite, so history is preserved. +// - **diff** ([diff.go]) — line-level diff between source +// and mirror; surfaces what Claude wrote since the last +// sync. +// - **import** ([extract.go], [classify.go]) — parses the +// mirror into discrete entries and routes each one to the +// matching `.context/` file based on keyword heuristics +// ([Classify]); rules come from `.ctxrc.classify_rules` +// with built-in fallbacks. The user gets a preview before +// anything is written. +// - **publish** ([publish.go], [promote.go]) — the inverse +// direction: promotes a `.context/` entry into the +// auto-memory file so future Claude sessions get it +// up front. +// +// # State Tracking +// +// Sync and import state lives in +// `.context/state/memory-import.json` (see [state.go]): +// last-synced timestamps, last-imported entry hashes, and the +// drift signal computed from comparing source-vs-mirror file +// sizes. The drift hook +// (`internal/cli/system/cmd/check_memory_drift`) reads this +// state and nudges the user when `MEMORY.md` has changed since +// last sync. +// +// # Concurrency and Idempotency +// +// All operations are **read or write-once** — no long-lived +// goroutines. [Mirror] is idempotent: an unchanged source +// produces no archive entry and no mirror write. [Discover] +// caches its result in process memory but the cache is +// keyed on `projectRoot`, so different projects do not +// collide. +// +// # Related Packages +// +// - [internal/cli/memory] — the `ctx memory` CLI surface +// (status, sync, diff, import, publish, unpublish). +// - [internal/config/memory] — slug-format, path constants, +// classification-rule schema. +// - [internal/drift] — consumes the drift state to +// produce the user-facing nudge. +// - [internal/cli/system/cmd/check_memory_drift] — the +// hook that fires the nudge. +// - [internal/write/memory] — terminal output for the CLI +// subcommands. package memory diff --git a/internal/notify/doc.go b/internal/notify/doc.go index 4cb2de627..fc0105725 100644 --- a/internal/notify/doc.go +++ b/internal/notify/doc.go @@ -4,10 +4,86 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package notify provides fire-and-forget webhook notifications with. +// Package notify implements **fire-and-forget webhook +// notifications**: ctx posts a small JSON payload to a +// user-configured URL when something interesting happens +// (loop completion, hook nudge, version mismatch, key-rotation +// reminder, etc.) and never blocks the caller waiting for the +// response. // -// Key exports: [LoadWebhook], [SaveWebhook], -// [EventAllowed], [Send], [PostJSON]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is what backs `ctx hook notify`, +// `ctx hook notify setup`, and `ctx hook notify test` on the +// CLI side, plus the in-process callers like the autonomous +// loop runner. +// +// # End-to-End Flow +// +// 1. **Setup** ([SaveWebhook]) encrypts a webhook URL with +// AES-256-GCM ([internal/crypto]) and writes +// `.context/.notify.enc`. The same per-machine key +// protects the scratchpad — a fresh key is generated and +// saved on first use if none exists. +// 2. **Send** ([Send]) loads + decrypts the URL via +// [LoadWebhook], gates on the configured event filter +// via [EventAllowed], builds an [entity.NotifyPayload], +// and ships it to [PostJSON]. +// 3. **PostJSON** does the actual HTTP — short timeout, +// `Content-Type: application/json`, single attempt, no +// retry. The intent is "best-effort signal", not "guaranteed +// delivery". +// +// All three functions return cleanly when nothing is +// configured: `("", nil)` from [LoadWebhook] when either +// the key or the encrypted URL file is missing, and a +// silent noop from [Send]. +// +// # Event Filter +// +// `notify.events` in `.ctxrc` is **opt-in**: empty list +// means **no events fire** (not "all events"). Recognized +// events: `loop`, `nudge`, `relay`, `heartbeat`. The filter +// is enforced by [EventAllowed]. +// +// **`ctx hook notify test` bypasses the filter** as a +// special case so users can verify connectivity without +// having to subscribe their target event first; the test +// path warns when an unfiltered event would normally have +// been dropped. +// +// # Template References +// +// Some emitters attach a [entity.TemplateRef] (hook name + +// variant) to the payload so downstream relays can render a +// canonical message. [template_ref.go] holds the helpers +// that resolve a [TemplateRef] to its rendered string at +// the receiving end (used by integrations that re-emit +// via Slack/Discord/ntfy.sh). +// +// # Encryption Key +// +// The encryption key is shared by both `ctx pad` and +// `ctx hook notify`. Rotating it (every +// `key_rotation_days`, default 90) requires re-running +// `ctx pad init` *and* `ctx hook notify setup`. The +// rotation nudge fires from +// `internal/cli/system/cmd/check_version`. +// +// # Concurrency +// +// All exported functions are safe to call concurrently; +// they hold no module-level state. The HTTP client is the +// stdlib default — connection-pooled and goroutine-safe. +// +// # Related Packages +// +// - [internal/cli/notify] — the `ctx hook notify` +// CLI: setup, test, and the bare send invocation. +// - [internal/cli/hook] — parent command that +// hosts notify under `ctx hook`. +// - [internal/crypto] — AES-256-GCM helpers +// for the encrypted URL store. +// - [internal/rc] — `notify.events` +// filter + key-rotation threshold. +// - [internal/entity] — [NotifyPayload], +// [TemplateRef] domain types. package notify diff --git a/internal/notify/notify.go b/internal/notify/notify.go index 586cd776e..62c8ea4a7 100644 --- a/internal/notify/notify.go +++ b/internal/notify/notify.go @@ -4,11 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package notify provides fire-and-forget webhook notifications. -// -// The webhook URL is stored encrypted in .context/.notify.enc using the -// same AES-256-GCM key as the scratchpad (resolved via rc.KeyPath()). -// When no webhook is configured, all operations are silent noops. package notify import ( diff --git a/internal/parse/doc.go b/internal/parse/doc.go index cf836616e..85c44d155 100644 --- a/internal/parse/doc.go +++ b/internal/parse/doc.go @@ -4,9 +4,58 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package parse provides shared text-to-typed-value conversion functions. +// Package parse holds the small **string-to-typed-value** +// converters that more than one ctx package needs: dates, +// section ranges, frontmatter splits, system-reminder +// stripping, and word-set helpers. Each function is a thin, +// well-tested wrapper around standard-library or +// canonical-format primitives so callers do not have to +// duplicate the same edge-case handling. // -// Functions here convert string inputs (dates, durations, identifiers) -// into Go types. They are thin wrappers that handle empty inputs and -// use canonical format constants from the config package. +// # Functions +// +// - **[Date](s)** — parses `YYYY-MM-DD` into a +// `time.Time` at midnight UTC. Empty input returns the +// zero time with no error so callers can branch on +// `.IsZero()` instead of comparing strings. +// - **[SplitFrontmatter](data)** — splits a `---`-fenced +// YAML frontmatter from the markdown body and returns +// the two byte slices plus a parse error. Used by the +// skill, steering, and journal-entry parsers. +// - **[StripSystemReminders](text)** — Claude Code injects +// `` tags into tool results that the +// user did not write. This function strips them so the +// journal pipeline records what the user actually said. +// - **[FixCodeFenceSpacing](text)** — users often type +// `text: ```code` without proper line spacing around +// the fence; this function normalizes the spacing so +// the renderer treats it as a code block. +// - **[WordSet](words)** — builds a `map[string]struct{}` +// from a slice for O(1) membership; used by the +// classifier and several lint helpers. +// +// # Why a Shared Package +// +// Every one of these conversions sat in two or three +// places before it was hoisted here. The package's +// existence is enforced by the audit suite: a duplicate +// implementation in another package fails CI. +// +// # Concurrency +// +// All functions are pure and stateless. Concurrent +// callers never race. +// +// # Related Packages +// +// - [internal/config/time] — date and timestamp +// format constants this package consumes. +// - [internal/steering], [internal/skill], +// [internal/journal/parser] — direct consumers of +// [SplitFrontmatter]. +// - [internal/journal/parser] — consumes +// [StripSystemReminders] when normalizing Claude Code +// transcripts. +// - [internal/memory], [internal/drift] — consume +// [WordSet] for keyword-classification helpers. package parse diff --git a/internal/rc/doc.go b/internal/rc/doc.go index c7e49162e..a9d1d2862 100644 --- a/internal/rc/doc.go +++ b/internal/rc/doc.go @@ -4,9 +4,132 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package rc loads and manages runtime configuration from .ctxrc files. +// Package rc loads, caches, and exposes the **runtime +// configuration** every other ctx package and command depends +// on. It is the single source of truth for "where does the +// context directory live", "what is the token budget", "is +// scratchpad encryption on", and the dozens of other knobs that +// shape ctx behavior. // -// Key exports: [Default], [RC], [ContextDir], [TokenBudget], [PriorityOrder]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is foundational: most binaries and hooks call +// [RC] (or one of its accessor wrappers like [ContextDir], +// [TokenBudget], [PriorityOrder]) within the first few +// instructions, and many do so on every prompt. Performance and +// determinism here matter to the rest of the system. +// +// # Configuration Sources, in Resolution Order +// +// 1. **CLI overrides** — set via `ctx --context-dir ` +// (and equivalent flags). Highest priority. Stored in +// `rcOverrideDir` under [rcMu]. +// 2. **Environment variables** — `CTX_DIR`, `CTX_TOKEN_BUDGET` +// (see [internal/config/env]) override the corresponding +// `.ctxrc` fields when set. +// 3. **`.ctxrc` (YAML)** — read once at process start by +// [load] from the current working directory. Parse errors +// are logged via [internal/write/rc.ParseWarning] and the +// defaults are kept; **a malformed `.ctxrc` never aborts +// ctx**. +// 4. **Defaults** — every field has a hardcoded default in +// [Default]; see the constants in `default.go` +// (`DefaultTokenBudget`, `DefaultArchiveAfterDays`, +// `DefaultContextWindow`, etc.). +// +// The result is the singleton `*CtxRC` returned by [RC]. It is +// memoized via [sync.Once] so the YAML is parsed at most once +// per process (tests can call [Reset] to invalidate the cache). +// +// # Context-Directory Resolution +// +// [ContextDir] returns the absolute path of the project's +// `.context/` directory and is the most-called function in the +// package. Its resolution order is: +// +// 1. CLI override (`rcOverrideDir`) → return absolute, no +// walk. +// 2. Configured **absolute** path (`.ctxrc` or env var) → +// return as-is. +// 3. **Upward walk from CWD** ([walkForContextDir]) — find +// the first ancestor directory that contains a folder +// whose basename matches the configured name. The walk is +// bounded by the **git root** when one is present: a +// candidate that falls outside the git root is discarded +// so commands run in submodules or sibling projects do not +// leak into the wrong project's context. +// 4. **Fallback** — `filepath.Join(cwd, name)` returned +// absolute. This preserves `ctx init`'s ability to create +// a fresh `.context/` at the current location. +// +// The walk result is cached for the life of the process. Hook +// scripts and subcommand binaries invoked from a project +// subdirectory therefore consistently see the same context dir +// the user's terminal does. +// +// # The Configuration Schema +// +// [CtxRC] is the YAML-tagged struct mirrored by `.ctxrc`. A +// non-exhaustive tour of the field families: +// +// - **Layout** — `ContextDir`, `KeyPathOverride`, +// `Steering.Dir`, `Hooks.Dir`. Where things live. +// - **Budgets** — `TokenBudget`, `InjectionTokenWarn`, +// `BillingTokenWarn`, `ContextWindow`. How big context +// packets and injections are allowed to be. +// - **Lifecycle** — `AutoArchive`, `ArchiveAfterDays`, +// `KeyRotationDays`, `StaleAgeDays`. Time-based nudges and +// auto-cleanup. +// - **Per-tool** — `Tool` (claude / cursor / cline / kiro / +// codex), `Steering.DefaultTools`. Which AI assistant is +// active and which tools steering files default to. +// - **Provenance** — [ProvenanceConfig] toggles which of +// `--session-id`, `--branch`, `--commit` are required when +// adding entries; default is "all required". +// - **Notifications** — [NotifyConfig] holds the event +// filter for `ctx hook notify` (loop / nudge / relay / +// heartbeat). +// - **Memory bridge** — `ClassifyRules` overrides the +// keyword classifier in [internal/memory] when set. +// - **Spec nudge** — `SpecSignalWords`, `SpecNudgeMinLen` +// control when `ctx add task` suggests writing a spec. +// - **Freshness tracking** — [FreshnessFile] entries make +// the staleness check warn when technology-dependent +// constants in source files have not been reviewed in N +// months. +// +// Pointer-typed `*bool` fields ([CtxRC.ScratchpadEncrypt], +// [HooksRC.Enabled], [ProvenanceConfig.SessionID/Branch/Commit]) +// distinguish "user explicitly set false" from "unset, use the +// default true" — assignment by value would lose that +// distinction. +// +// # Concurrency +// +// [RC] is safe to call from any goroutine; it serializes +// initialization through `rcOnce`. Read accessors hold an +// `RLock` on `rcMu`; the only writer is the test-only [Reset]. +// CLI override mutation goes through a brief `Lock()`. +// +// # Profiles +// +// `.ctxrc` supports an optional `profile:` field plus profile +// overlays (`.ctxrc.dev`, `.ctxrc.base`) wired by `ctx config +// switch`. The active profile name is exposed via +// [ActiveProfile]; see [internal/cli/config] for the +// switching logic and [docs/recipes/configuration-profiles.md] +// for the user-facing story. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/config/env] — the +// env-var name constants this package consumes. +// - [github.com/ActiveMemory/ctx/internal/config/file] — +// `.ctxrc` file name constant. +// - [github.com/ActiveMemory/ctx/internal/crypto] — +// resolves the encryption key path; consumed by +// [internal/pad] and [internal/notify]. +// - [github.com/ActiveMemory/ctx/internal/cli/config] — the +// `ctx config` CLI surface (status, switch, schema). +// - [github.com/ActiveMemory/ctx/internal/write/rc] — +// terminal output helpers, including the parse-warning +// formatter used when `.ctxrc` YAML is malformed. package rc diff --git a/internal/rc/rc.go b/internal/rc/rc.go index d999f5012..c29b6c799 100644 --- a/internal/rc/rc.go +++ b/internal/rc/rc.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package rc provides runtime configuration loading from .ctxrc files. package rc import ( @@ -231,7 +230,7 @@ func KeyPath() string { // KeyRotationDays returns the configured key rotation threshold in days. // -// The encryption key is shared by both ctx pad and ctx notify, so the +// The encryption key is shared by both ctx pad and ctx hook notify, so the // rotation threshold is a project-wide setting. // // Priority: top-level key_rotation_days > diff --git a/internal/skill/doc.go b/internal/skill/doc.go index 94fdf6958..95e4bc926 100644 --- a/internal/skill/doc.go +++ b/internal/skill/doc.go @@ -4,9 +4,82 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package skill manages reusable instruction bundles with YAML frontmatter. +// Package skill manages **reusable instruction bundles** — +// the `SKILL.md` + supporting-files trees that ship under +// `.claude/skills//` and tell an AI tool how to perform +// a recurring workflow. // -// Key exports: [Install], [LoadAll], [Load], [Remove]. -// See source files for implementation details. -// Part of the internal subsystem. +// A skill is a self-contained directory: +// +// skills// +// SKILL.md # YAML frontmatter + instructions +// references/... # optional supporting docs +// # optional supporting script +// +// The package's job is to **install, list, load, and remove** +// these bundles. It does not execute them — execution is the +// AI tool's responsibility (Claude Code, Copilot CLI, etc.). +// +// # The Frontmatter Schema +// +// Each `SKILL.md` declares a [Manifest] in YAML +// frontmatter: +// +// - **name** — globally unique identifier; used as the +// directory name and as the slash-command alias. +// - **description** — one-line trigger phrase the AI uses +// to decide when to invoke the skill. +// - **tools** — Copilot-style allowed-tools list (`bash`, +// `read`, `write`, `edit`, `glob`, `grep`). +// - **allowed-tools** — Claude-Code-style permission +// scopes (`Bash(ctx:*)`, `Read`, etc.). +// +// [manifest.go] parses and validates the frontmatter; +// missing required fields produce a typed error from +// [internal/err/skill] that names the file path. +// +// # Public Surface +// +// - **[Install]** — copies a source skill directory into +// the target `skillsDir//`. Refuses to overwrite +// an existing skill (the user must `Remove` first); use +// `--force` at the CLI for replacement. +// - **[Load]** — reads one skill by name, returns its +// full [Skill] with manifest + body + path. +// - **[LoadAll]** — walks the skills directory, returns +// every loadable skill. Skills that fail to parse are +// reported in the error slice rather than aborting the +// load. +// - **[Remove]** — deletes a skill directory after +// verifying it lives under the canonical skills +// directory (boundary check guards against `..` +// escape). +// +// # File-Copy Semantics +// +// [copy.go] does the recursive copy with three rules: +// +// 1. **Preserve mode bits** — executable scripts stay +// executable. +// 2. **Skip dotfiles at the source root** — +// `.DS_Store`, `.git`, etc. never end up installed. +// 3. **Validate the destination** lies within the +// skills-dir boundary. +// +// # Concurrency +// +// All operations are filesystem-bound and stateless. +// Callers serialize through process-level execution. +// +// # Related Packages +// +// - [internal/cli/skill] — the `ctx skill install / +// list / remove` CLI surface. +// - [internal/err/skill] — typed error constructors +// used here and by callers for consistent messaging. +// - [internal/entity] — [Skill], [Manifest] +// domain types. +// - [internal/assets/claude/skills] — +// the project's own bundled skills, deployed to +// user environments at `ctx init` time. package skill diff --git a/internal/steering/doc.go b/internal/steering/doc.go index 5bf9d212b..60831e3bb 100644 --- a/internal/steering/doc.go +++ b/internal/steering/doc.go @@ -4,9 +4,136 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package steering manages behavioral guidance files with YAML frontmatter. +// Package steering parses, scores, and synchronizes +// **steering files** — the small frontmattered Markdown +// documents under `.context/steering/` that tell each +// configured AI tool *how to behave* when a specific kind of +// prompt arrives. // -// Key exports: [Parse], [Print], [Filter], [LoadAll], [SyncTool], [SyncAll]. -// See source files for implementation details. -// Part of the internal subsystem. +// Steering is the declarative half of ctx's behavior layer +// (the imperative half is [internal/trigger]: scripts that +// *do* things on lifecycle events). A steering file says +// "when the user asks about Y, prepend these rules to the +// prompt"; a trigger says "when X happens, run this code." +// +// # The Steering File +// +// Each `.md` file under the steering directory is a +// [SteeringFile]: a short YAML frontmatter block followed by +// a Markdown body. The schema: +// +// - **name** — unique identifier; matches the manual +// selector in `ctx steering preview --names ...`. +// - **description** — one-line summary; doubles as the +// match phrase for [cfgSteering.InclusionAuto]. +// - **inclusion** — `always` | `auto` | `manual` +// ([cfgSteering.InclusionMode]). Default `manual`. +// - **tools** — list of AI-tool IDs the file applies to; +// empty/nil means "all tools". +// - **priority** — injection order; lower priority is +// injected earlier (default 50). +// +// [Parse] reads bytes + a path and returns a fully populated +// [SteeringFile] with defaults applied; YAML errors are wrapped +// via [internal/err/steering] so the file path is always part +// of the message. [LoadAll] is the bulk variant that walks a +// directory. +// +// # The Inclusion Modes +// +// Three modes determine when a file's body is appended to the +// next prompt: +// +// - **always** — every prompt, every turn, no questions. +// Heaviest on context budget; reserve for genuinely +// foundational rules. +// - **auto** — included when the lowercased prompt contains +// the file's lowercased description (substring match — +// simple, deterministic, fast). The most common mode for +// project-specific guidance. +// - **manual** — only when the file's name appears in the +// `manualNames` argument to [Filter] / [matchInclusion]. +// Used by `ctx steering preview --names ...` and by the +// MCP `steering_get` tool. +// +// [matchInclusion] does the per-file decision; [matchTool] +// adds tool-scope filtering on top. [Filter] composes the two +// against a list of files for a given (prompt, tool, manual) +// triple. +// +// # Two Tool Families, Two Delivery Paths +// +// Not every AI editor consumes steering the same way; ctx +// handles two families: +// +// - **Native-rules tools** — Cursor, Cline, Kiro +// ([syncableTools]) — have a built-in rules primitive +// (`.cursor/rules/*.mdc`, `.clinerules`, +// `.kiro/steering/*.md`). [SyncTool] writes +// ctx-managed `.context/steering/*.md` into each tool's +// native format. [SyncAll] does this for every supported +// tool in one call. Idempotent: unchanged content is +// skipped. +// - **Hook-driven tools** — Claude Code and Codex use +// `ctx agent` to assemble the context packet on every +// prompt; their steering arrives via the agent pipeline +// (no file sync). They are deliberately **not** in +// [syncableTools]; calling `SyncTool` for them returns +// [errSteering.UnsupportedTool]. +// +// Mixed setups (project uses both Cursor and Claude Code) +// run `ctx steering sync` for the native-rules tools and let +// the hook+MCP pipeline cover Claude Code automatically. See +// `docs/home/steering.md` for the user-facing summary of this +// split. +// +// # Foundation Files +// +// `ctx init` scaffolds four foundation steering files — +// `product`, `tech`, `structure`, `workflow` — so users have +// real templates to edit instead of an empty directory. +// [FoundationFiles] returns the set; bodies and descriptions +// come from YAML text assets at call time so they stay in sync +// with the embedded copy. Re-running `ctx init` is safe: +// existing files are left alone. +// +// # Format Adapters +// +// Each native tool needs a slightly different frontmatter +// shape: +// +// - [cursorFrontmatter] — `description`, `globs`, +// `alwaysApply`. +// - [kiroFrontmatter] — `name`, `description`, `mode`. +// - Cline takes plain Markdown with no frontmatter. +// +// [format.go] holds the per-tool serializers; the unexported +// types in [types.go] keep the YAML shape decoupled from the +// canonical [SteeringFile]. +// +// # Concurrency and Idempotency +// +// Functions are stateless. [SyncTool] reads from the steering +// directory, computes the desired output for each file, +// compares it to what is on disk, and writes only the +// changed files — so running it twice in a row produces no +// `Written` entries the second time, just `Skipped`. Output +// paths are validated to resolve within `projectRoot` before +// writing. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/cli/steering] — +// CLI surface: `add`, `list`, `preview`, `init`, `sync`. +// - [github.com/ActiveMemory/ctx/internal/mcp/handler] — +// MCP `steering_get` tool that surfaces matched files to +// Claude Code via JSON-RPC. +// - [github.com/ActiveMemory/ctx/internal/config/steering] +// — inclusion modes and foundation file names. +// - [github.com/ActiveMemory/ctx/internal/config/hook] — +// supported tool ID constants used by [syncableTools]. +// - [github.com/ActiveMemory/ctx/internal/err/steering] — +// typed error constructors with file-path context. +// - [github.com/ActiveMemory/ctx/internal/parse] — +// [SplitFrontmatter] used by [Parse]. package steering diff --git a/internal/sysinfo/doc.go b/internal/sysinfo/doc.go index ddea198a3..540d564dd 100644 --- a/internal/sysinfo/doc.go +++ b/internal/sysinfo/doc.go @@ -4,10 +4,73 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package sysinfo gathers OS-level resource metrics (memory, swap, disk, load) -// and evaluates them against configurable thresholds to produce alerts at -// WARNING and DANGER severity levels. +// Package sysinfo gathers OS-level resource metrics — memory, +// swap, disk, load average — and evaluates them against +// configurable thresholds to produce alerts at **WARNING** and +// **DANGER** severity levels. // -// Platform support uses build tags: Linux reads /proc, macOS shells out to -// sysctl/vm_stat, and other platforms return Supported: false gracefully. +// The package powers two surfaces: +// +// - **`ctx sysinfo`** — the top-level user-facing CLI that +// prints a snapshot of host resources. +// - **`ctx system check_resource`** — the hook that fires a +// pressure warning during sessions when load, memory, or +// disk crosses a danger threshold. +// +// # Per-Platform Implementations +// +// Resource collection is **platform-conditional** via Go build +// tags so the binary stays a single static cross-compile while +// still asking each OS in its native dialect: +// +// - **Linux** — reads `/proc/meminfo` and `/proc/loadavg` +// directly ([memory_linux.go], [load_linux.go]). +// - **macOS / Darwin** — shells out to `sysctl -n vm.loadavg` +// and `vm_stat` and parses their output +// ([memory_darwin.go], [load_darwin.go]). +// - **Other / Windows** — stubs that return +// `Supported: false` ([memory_other.go], [load_other.go], +// [disk_windows.go]). The hook degrades gracefully rather +// than aborting the session. +// +// Disk usage is read uniformly via `syscall.Statfs` on +// Unix-likes ([disk.go]) and stubbed on Windows. +// +// # Threshold Evaluation +// +// [threshold.go] holds the WARNING / DANGER cutoffs and the +// per-metric evaluator that turns a raw measurement into a +// severity. Defaults reflect "headroom you almost certainly +// want": load averages compared against CPU count, memory +// available below a percentage, disk free below a percentage. +// The 5-minute load average — not the 1-minute — is used to +// avoid false positives from transient spikes (a deliberate +// behavior, see commit `5958e558`). +// +// # The Output Shape +// +// [Resource] ([types.go]) is the unified record emitted by +// each collector: kind, value, unit, threshold, severity, +// support flag. [calc.go] holds the per-metric arithmetic +// (percent free, ratio computations) with explicit +// zero-division guards. +// +// # Concurrency +// +// Each call to a collector is a one-shot syscall + parse; +// nothing is cached at the package level. Concurrent callers +// produce independent readings. The `vm_stat` / `sysctl` +// shell-out path on macOS uses an external process which is +// the slowest case (~tens of milliseconds). +// +// # Related Packages +// +// - [internal/cli/sysinfo] — the +// top-level `ctx sysinfo` CLI surface. +// - [internal/cli/system/cmd/check_resource] — the +// resource-pressure hook. +// - [internal/config/sysinfo] — procfs +// paths and meminfo field-key constants used by Linux. +// - [internal/write/sysinfo] — terminal +// output formatters consumed by both the CLI and hook. package sysinfo diff --git a/internal/task/doc.go b/internal/task/doc.go index 612aff664..f3bc90255 100644 --- a/internal/task/doc.go +++ b/internal/task/doc.go @@ -4,9 +4,71 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package task provides task item parsing, matching, and domain logic. +// Package task is the **pure-logic core** behind every +// operation against `TASKS.md` lines: parsing one task +// line into its components, classifying it as completed +// or pending, locating its sub-tasks, and matching it +// against user-supplied selectors. // -// Key exports: [Completed], [Pending], [Indent], [Content], [Sub]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is the foundation of [internal/cli/task] +// (the CLI), [internal/tidy] (the archive engine), and +// [internal/mcp/handler] (the `ctx_complete` MCP tool). +// Everything that touches a TASKS.md line passes through +// the predicates here. +// +// # Public Surface +// +// - **[Completed](line)** — true when the line is +// a `- [x] ...` or `- [-] ...` task. +// - **[Pending](line)** — true when the line is a +// `- [ ] ...` task. +// - **[Indent](line)** — returns the leading +// whitespace count for a task line; used to +// determine top-level vs nested. +// - **[Content](line)** — strips the +// `- [x] `/`- [ ] ` prefix and any trailing +// inline tags (`#priority:`, `#session:`, +// `#branch:`, `#commit:`, `#added:`, `#done:`), +// returning just the human-readable task text. +// - **[Sub](lines, parentIdx)** — returns the +// index range of sub-tasks under the task at +// `parentIdx` (those with strictly greater +// indent up until the next sibling/parent). +// +// # Why a Separate Package +// +// Five callers need the same predicates and the same +// "what counts as a task line" definition. Hoisting +// them here means the spec lives in one place and the +// audit suite catches duplication. +// +// # Format Reference +// +// Task lines follow the canonical shape established +// by [internal/assets/tpl.Task]: +// +// - [ ] Implement rate limiting #priority:high +// #session:abc1 #branch:main #commit:def2 +// #added:2026-04-12-093000 +// +// Continuation indents (the wrapped attributes) are +// not separate tasks; [Indent] and the parsers in +// this package treat them as part of the parent +// task's body. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/task] — chief consumer for +// `complete` matching. +// - [internal/tidy] — uses the predicates +// to identify archive candidates. +// - [internal/mcp/handler] — uses them in the +// MCP `ctx_complete` tool. +// - [internal/assets/tpl] — defines the task +// line shape this package parses. package task diff --git a/internal/task/task.go b/internal/task/task.go index 3e542e419..504826990 100644 --- a/internal/task/task.go +++ b/internal/task/task.go @@ -4,10 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package task provides task item parsing and matching. -// -// This package handles the domain logic for task items, independent of -// their Markdown representation. package task import ( diff --git a/internal/tidy/doc.go b/internal/tidy/doc.go index 2a9ea5ca1..3e1ae419b 100644 --- a/internal/tidy/doc.go +++ b/internal/tidy/doc.go @@ -4,11 +4,81 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package tidy provides shared helpers for context file maintenance:. +// Package tidy provides the **archive and compact** primitives +// that keep `.context/` files lean as a project ages — moving +// completed tasks into dated archive files, sweeping empty +// sections, and reorganizing TASKS.md without losing +// provenance. // -// Key exports: [WriteArchive], [ParseTaskBlocks], -// [RemoveBlocksFromLines], [CompactContext], -// [RemoveEmptySections]. -// See source files for implementation details. -// Part of the internal subsystem. +// The package is the *engine*; the user-facing surface is +// `ctx task archive`, `ctx compact`, and the `_ctx-archive` +// skill. All three call into the helpers here so the rules are +// applied identically regardless of caller. +// +// # The Archive Pipeline +// +// [WriteArchive](contextDir) is the top-level entry point. +// Behavior: +// +// 1. **Parse** TASKS.md into [TaskBlock] records via +// [ParseTaskBlocks]. Only **top-level** tasks +// (`indent == 0`) marked `[x]` are candidates; +// nested subtasks ride along with their parent. +// 2. **Group** archived tasks by Phase header so the +// archive file preserves the same Phase structure as +// the source (a constitutional invariant — Phase +// identity must survive archival). +// 3. **Write** the archive to +// `.context/archive/tasks-YYYY-MM-DD.md`, creating +// the directory if needed. If today's archive file +// already exists, the new content is *appended*, not +// overwritten. +// 4. **Remove** the archived blocks from TASKS.md via +// [RemoveBlocksFromLines] — the rewriter operates on +// the raw line slice so byte offsets stay aligned. +// +// # Compact and Sanitize +// +// [CompactContext] runs the broader cleanup that `ctx +// compact` performs: archives done tasks **and** sweeps +// empty H2/H3 sections via [RemoveEmptySections] so the +// file does not accumulate dangling headers after every +// archival round. [sanitize.go] holds the helpers that +// trim trailing whitespace, normalize blank-line runs to +// at-most-one, and ensure the file ends with a single +// newline. +// +// # Pure-Logic Core +// +// [block.go] and [parse.go] form the pure-logic core: no +// IO, no time, no flags. They take `[]string` and return +// `[]TaskBlock` / new `[]string`. This split makes the +// archival math testable in isolation; the only IO sits +// in [archive.go] and [compact.go] at the boundary. +// +// # Constitutional Invariants Honored +// +// The CONSTITUTION.md rule "Archival is allowed, deletion +// is not" is enforced at this layer: archival never drops +// content; archive files preserve Phase headers; and +// compaction refuses to touch an entry that has not been +// explicitly marked complete. +// +// # Concurrency +// +// All functions are stateless. Callers serialize through +// process-level execution; concurrent invocations against +// the same context dir would race on file writes (no +// locking is implemented). +// +// # Related Packages +// +// - [internal/cli/task] — `ctx task archive`, +// `ctx task complete`, `ctx task snapshot`. +// - [internal/cli/compact] — `ctx compact` CLI +// entry point. +// - [internal/mcp/handler] — MCP `ctx_compact` +// tool calls into this package. +// - [internal/entity] — [TaskBlock] and +// related domain types. package tidy diff --git a/internal/trace/doc.go b/internal/trace/doc.go index cb8cbcfb6..cbc7db107 100644 --- a/internal/trace/doc.go +++ b/internal/trace/doc.go @@ -4,12 +4,134 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package trace provides commit context tracing — linking git commits -// back to the decisions, tasks, learnings, and sessions that motivated them. -// -// Key exports: [Collect], [FormatTrailer], [Record], [Resolve], [ShortHash], -// [ReadHistory], [WriteHistory], [ReadOverrides], [WriteOverride], -// [CollectRefsForCommit], [ResolveCommitHash], [CommitMessage], [CommitDate]. -// See source files for implementation details. -// Part of the internal subsystem. +// Package trace implements **commit context tracing** — the layer +// that links a git commit back to the decisions, learnings, +// conventions, tasks, and AI sessions that motivated it. +// +// The point is to make `git log -p` answer not just "what +// changed" but "*why* it changed", without forcing the developer +// to write hand-curated provenance every time. The package +// gathers context references from three sources at commit time, +// renders them as a structured git trailer, and persists a +// per-commit history record so the link survives even when the +// commit message is later squashed or rewritten. +// +// # Reference Format +// +// A "ref" is a short, parseable string that points at one +// concrete piece of context: +// +// - `decision:12` — DECISIONS.md entry #12 +// - `learning:7` — LEARNINGS.md entry #7 +// - `convention:3` — CONVENTIONS.md entry #3 +// - `task:8` — TASKS.md item #8 +// - `session:abc` — AI session ID `abc` +// - `"free note"` — quoted free-form note +// +// [parseRef] turns a string into (type, number, text); [Resolve] +// looks up the entry and returns a [ResolvedRef] populated with +// the entry title and a one-line detail preview. +// +// # The Three-Source Collection +// +// [Collect] runs at commit time (typically from a `prepare- +// commit-msg` hook) and gathers refs from three independent +// sources, **in this order**, then deduplicates while preserving +// first-occurrence order: +// +// 1. **Pending records** — refs that were explicitly staged +// ahead of time via `ctx trace tag` and stored as +// [PendingEntry] in `state/trace-pending.jsonl`. Cleared +// after the commit lands. +// 2. **Staged file diffs** — [StagedRefs] runs `git diff +// --cached` on each of DECISIONS.md, LEARNINGS.md, +// CONVENTIONS.md and parses **added entries** into refs of +// the matching type. For TASKS.md it parses **completed +// tasks** (lines that flipped from `[ ]` to `[x]`). This is +// the source that catches "I just wrote a new decision and +// committed it" without any tagging. +// 3. **Working state** — [WorkingRefs] adds in-progress task +// refs (from TASKS.md) plus an `session:` ref derived +// from `$CTX_SESSION_ID` when an AI session is active. +// +// First-source-wins ordering means a ref a developer explicitly +// pinned via `ctx trace tag` always shows up before one auto- +// detected from a diff. +// +// # The Trailer +// +// [FormatTrailer] turns a `[]string` of refs into a single git +// trailer line of the form: +// +// ctx-context: decision:12, task:8, session:abc +// +// Empty input produces an empty string (no trailer is written). +// The trailer is appended to the commit message by the +// `prepare-commit-msg` hook installed by `ctx trace hook +// enable`. +// +// # Persistence +// +// Two append-only JSONL stores live under `state/`: +// +// - **history.jsonl** — one [HistoryEntry] per commit: +// full commit hash, the refs that were attached, the +// commit message, and a UTC timestamp. Written by the +// `post-commit` hook so the link survives later message +// edits or squashes. +// - **overrides.jsonl** — [OverrideEntry] records that let a +// human pin a different set of refs to a commit after the +// fact (`ctx trace tag --note "..."`). Resolution +// prefers the most recent override over the original +// history entry. +// +// Both files are read with [ReadHistory] / [ReadOverrides]; both +// silently skip malformed lines so a corrupt tail does not +// break query commands. [WriteHistory] / [WriteOverride] use +// [appendJSONL] which creates the parent directory on demand and +// stamps a UTC timestamp when the caller leaves it zero. +// +// # Resolution +// +// The CLI side (`ctx trace `, `ctx trace file `) +// asks the package to **resolve** raw refs back to human +// information: +// +// - [Resolve](ref, contextDir) → [ResolvedRef] with title and +// one-line preview (or `Found: false` for stale refs). +// - [CollectRefsForCommit] picks the ref set for a given +// commit, preferring override → history. +// - [ResolveCommitHash] takes a short hash, abbrev, or +// ref-like string and returns the full SHA via `git +// rev-parse`. +// - [CommitMessage] / [CommitDate] are thin `git log` wrappers +// used to render the trace output. +// +// # Concurrency and Safety +// +// All filesystem operations go through [appendJSONL] / +// [readJSONL]; writes are append-only so concurrent commits in +// quick succession (rare but possible with parallel worktrees) +// produce interleaved-but-valid JSONL. The package holds no +// process-wide state. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/cli/trace] — CLI +// subcommands (`ctx trace `, `ctx trace file`, +// `ctx trace tag`, `ctx trace hook enable/disable`, +// `ctx trace collect` plumbing). +// - [github.com/ActiveMemory/ctx/internal/config/trace] — +// ref-type keywords, file names, trailer format constants. +// - [github.com/ActiveMemory/ctx/internal/config/dir] — state +// directory layout (history.jsonl, overrides.jsonl, +// trace-pending.jsonl all live under `state/`). +// - [github.com/ActiveMemory/ctx/internal/config/env] — +// `CTX_SESSION_ID` env var read by [WorkingRefs]. +// +// # Background +// +// See `specs/commit-context-tracing.md` and +// `docs/cli/trace.md` for the design rationale and end-user +// documentation. package trace diff --git a/internal/trigger/doc.go b/internal/trigger/doc.go index 0b5c7b621..95184bb53 100644 --- a/internal/trigger/doc.go +++ b/internal/trigger/doc.go @@ -4,10 +4,154 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package trigger manages lifecycle automation scripts for AI tool events. +// Package trigger implements ctx's **lifecycle automation** +// layer: project-owned shell scripts that run when an AI +// session crosses a defined boundary (start, end, before a +// tool call, after a tool call, file save, context-add). // -// Key exports: [Discover], [FindByName], [RunAll], -// [ValidatePath], [ValidTypes]. -// See source files for implementation details. -// Part of the internal subsystem. +// Triggers are the **imperative** counterpart to +// [internal/steering]. Steering says "when the user asks +// about Y, prepend these rules"; triggers say "when X +// happens, run this code." Together they form the +// behavior-customization plane the CLI exposes via +// `ctx trigger ...` and the docs cover in +// `docs/home/triggers.md`. +// +// # Trigger Types +// +// Six lifecycle events are supported (returned by +// [ValidTypes], see [internal/config/trigger]): +// +// - **session-start** — a new AI session begins. Common +// uses: inject standup notes, rotate context, log a +// start marker. +// - **session-end** — an AI session ends. Common uses: +// persist summaries, ship notifications, capture +// transcripts. +// - **pre-tool-use** — before a tool call executes. Can +// **block** the call (cancel:true) — used for safety +// gates, audit logging, and policy enforcement. +// - **post-tool-use** — after a tool call completes. Used +// for reactions, lint-on-save, and post-processing. +// - **file-save** — a file is saved. Lint, regenerate +// indices, update derived files. +// - **context-add** — a new entry was added to +// `.context/`. Cross-link, notify, enrich. +// +// Each script receives a JSON [HookInput] on stdin and is +// expected to emit a JSON [HookOutput] on stdout (`cancel`, +// `context`, `message`). +// +// # Discovery — Layout under `.context/hooks/` +// +// [Discover] scans `.context/hooks//*.sh` (and any +// other extension; the executable bit is what counts, not +// the suffix) and returns one [HookInfo] per discovered +// script. A script's `Enabled` flag is **the executable +// permission bit**: an enabled hook fires, a non-executable +// one is reported by `ctx trigger list` but skipped at +// run-time. +// +// [FindByName] is the lookup helper used by `ctx trigger +// enable/disable/test` to address a single script by stem. +// +// # Security — The Disabled-by-Default Contract +// +// **A trigger is a shell script that runs with the same +// privileges as your AI tool.** A buggy or malicious one can +// block tool calls, corrupt context files, or exfiltrate +// data. The package therefore enforces a strict +// security-first workflow: +// +// 1. `ctx trigger add` creates new scripts **without** the +// executable bit. They are inert until the user opts in. +// +// 2. The user reviews the script and runs `ctx trigger +// enable `, which sets the executable bit +// **after** [ValidatePath] has run. +// +// 3. [ValidatePath] enforces three rules at every +// execution: +// +// - **No symlinks** — `os.Lstat` is used; symlinks +// under `.context/hooks/` are rejected outright. +// - **Boundary check** — the resolved absolute path +// must lie within the absolute hooks directory; a +// path that escapes via `..` is rejected. +// - **Executable bit must be set** — the same bit +// that gates discovery. +// +// A failure here returns a typed error from +// [internal/err/trigger] — the runner refuses to +// `exec`. +// +// `docs/home/triggers.md` makes this contract explicit to +// users; the package enforces it. +// +// # Execution — How `RunAll` Behaves +// +// [RunAll] runs every enabled hook for the given type **in +// alphabetical order**, marshals [HookInput] to JSON on +// stdin, reads [HookOutput] from stdout, and aggregates the +// result into an [AggregatedOutput]. Per-hook semantics: +// +// - `cancel:true` in stdout → **halt the chain**, set +// `Cancelled` and `Message` on the aggregate, return +// immediately. +// - non-empty `context` field → append to +// `AggregatedOutput.Context` (concatenated across +// hooks). +// - non-zero exit code → log via [ctxLog], record in +// `Errors`, **continue** with the next hook. +// - invalid JSON on stdout → log warning, record in +// `Errors`, continue. +// - timeout exceeded → kill the process group, log +// warning, continue. Default is [DefaultTimeout]; can +// be overridden by the caller. +// +// "One bad hook does not abort the chain" is intentional: +// security gates fire-and-forget, automation hooks fail +// loud-but-non-fatal, and only an explicit +// `cancel:true` short-circuits the rest. +// +// # The Three Hook-Like Layers +// +// The user-facing docs (`docs/home/triggers.md`) call out +// that ctx has **three** distinct hook concepts; only this +// package owns the first: +// +// - **`ctx trigger`** (this package) — project-authored +// scripts under `.context/hooks/`, fire on lifecycle +// events, work with any AI tool. +// - **`ctx system` hooks** — built-in nudges shipped by +// ctx itself (see `internal/cli/system/cmd/check_*`). +// Wired into tool configs at `ctx init` time. +// - **Claude Code hooks** — Claude-Code-specific entries +// in `.claude/settings.local.json`. Tool-native, not +// portable. +// +// # Concurrency +// +// The package holds no mutable global state. [RunAll] runs +// hooks **sequentially** within a single invocation — +// alphabetical order is part of the contract — but +// concurrent invocations from different goroutines are +// safe. +// +// # Related Packages +// +// - [github.com/ActiveMemory/ctx/internal/cli/trigger] — +// the `ctx trigger` CLI: add, list, test, enable, +// disable. +// - [github.com/ActiveMemory/ctx/internal/config/trigger] +// — the [TriggerType] enum and lifecycle constants. +// - [github.com/ActiveMemory/ctx/internal/entity] — +// [TriggerSession], [TriggerInput] — the input payload +// types. +// - [github.com/ActiveMemory/ctx/internal/err/trigger] — +// typed error constructors used by [ValidatePath] and +// the runner. +// - [github.com/ActiveMemory/ctx/internal/drift] — +// `checkHookPerms` flags any trigger script lacking the +// executable bit so the user can re-enable it. package trigger diff --git a/internal/validate/doc.go b/internal/validate/doc.go index da08dd85d..177594eb6 100644 --- a/internal/validate/doc.go +++ b/internal/validate/doc.go @@ -4,10 +4,60 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package validate provides input sanitization and validation -// utilities. +// Package validate provides the **input-sanitization** +// helpers ctx uses at every boundary where a user-supplied +// string becomes part of a filesystem path, a filename, or +// a URL: stripping shell metacharacters, enforcing +// boundary-safe components, and rejecting overlong inputs +// that would blow stack frames or filesystem limits. // -// It includes functions for converting user-provided strings into -// safe values for use in filenames, paths, and other contexts where -// special characters could cause problems. +// The package is the safety net under every CLI flag that +// takes free-form text. Direct callers include the +// scratchpad tag normalizer, the journal slug normalizer, +// the pad-import path validator, and the trigger +// path-validator's underlying primitive. +// +// # Public Surface +// +// - **[FilenameSafe](s)** — turns an arbitrary +// string into a filename-safe variant: replaces +// `/`, `\`, NUL, and control characters with +// `-`, collapses runs, trims leading/trailing +// separators. Idempotent. +// - **[PathComponent](s)** — like [FilenameSafe] +// but additionally rejects `.` and `..` entirely +// so the result cannot escape the parent +// directory. +// - **[Bounded](s, max)** — truncates `s` to `max` +// **runes** (rune-aware, not byte-aware) so a +// multi-byte character is never split. Returns +// the original when already short enough. +// - **[NoControl](s)** — verifies `s` contains no +// control characters; used by exec helpers +// before passing arguments to a child process. +// +// # Why a Dedicated Package +// +// Every CLI tool eventually grows a "what counts as +// a safe filename" function in seventeen files with +// seventeen subtle differences. Hoisting them here +// means every fix lands in one place and the audit +// suite catches duplication. +// +// # Concurrency +// +// All functions are pure. Concurrent callers never +// race. +// +// # Related Packages +// +// - [internal/cli/pad] — uses [FilenameSafe] +// for tag → filename conversion. +// - [internal/cli/journal/core/slug] — sister +// primitive that targets URL slugs (lowercase +// hyphenation, not just safety). +// - [internal/trigger] — uses [PathComponent] +// in the boundary check. +// - [internal/exec] — uses [NoControl] before +// `os/exec` invocations. package validate diff --git a/internal/wrap/doc.go b/internal/wrap/doc.go index de2d6a05f..930731ab0 100644 --- a/internal/wrap/doc.go +++ b/internal/wrap/doc.go @@ -4,11 +4,64 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package wrap soft-wraps long lines in markdown files to a target -// width (default 80 characters). +// Package wrap soft-wraps long lines in markdown files to a +// target column width (default 80) without breaking +// markdown semantics — preserving fenced code blocks, tables, +// frontmatter, and list continuation indentation. // -// [Content] wraps all lines in a journal entry. [ContextFile] wraps -// lines in a context file (.context/*.md), handling markdown list -// continuation with 2-space indent. [Soft] wraps a single line at -// word boundaries, returning multiple lines. +// The package is what backs `ctx fmt` for journal and context +// files; it is also called by the journal-import pipeline +// before writing enriched entries so reviewers see the same +// shape on disk that they would see in a code review. +// +// # The Three Public Functions +// +// - **[Soft](line, width)** — wraps a **single** line at +// word boundaries and returns the resulting `[]string`. +// Preserves the leading indent of the original line on +// each continuation. Never breaks inside a word. +// - **[Content](text, width)** — wraps every line in a +// **journal entry**. Recognizes YAML frontmatter and +// skips it (frontmatter values may not be wrapped), +// skips lines inside fenced code blocks, and skips +// table rows. +// - **[ContextFile](text, width)** — same intent as +// [Content] but tuned for `.context/*.md` files: aware +// of the markdown list continuation convention +// (2-space indent for follow-on lines under a +// bullet) so wrapped continuations look like the +// original input. Used by `ctx fmt` and the post-add +// formatter. +// +// # What Stays Unwrapped +// +// The wrap functions deliberately leave several constructs +// alone: +// +// - **YAML frontmatter** — keys and scalar values must +// stay on one line. +// - **Fenced code blocks** (` ``` ` / ` ~~~ `) — code +// is wrapped by the language, not the markdown +// renderer. +// - **Table rows** (lines that match the markdown table +// pattern) — rewrapping would break column alignment. +// - **Heading lines** — wrapping a heading mid-phrase +// would change semantics in many renderers. +// - **Lines that have no whitespace inside the body +// beyond the column limit** (e.g. a single long URL) +// — better to overflow than to break the link. +// +// # Concurrency +// +// All functions are pure: input string → output string. +// Concurrent callers never race. +// +// # Related Packages +// +// - [internal/cli/fmt] — the `ctx fmt` +// CLI surface. +// - [internal/cli/journal/core/normalize] — invokes +// [Content] when normalizing imported journal entries. +// - [internal/config/wrap] — column-width +// constant. package wrap diff --git a/internal/write/complete/doc.go b/internal/write/complete/doc.go index 9fd152f2d..9e44c2661 100644 --- a/internal/write/complete/doc.go +++ b/internal/write/complete/doc.go @@ -5,7 +5,7 @@ // SPDX-License-Identifier: Apache-2.0 // Package complete provides terminal output for the task completion -// command (ctx complete). +// command (ctx task complete). // // The single exported function [Completed] prints a confirmation // message when a task checkbox is toggled from [ ] to [x] in diff --git a/internal/write/drift/doc.go b/internal/write/drift/doc.go index df2c10e9b..f3188f6ae 100644 --- a/internal/write/drift/doc.go +++ b/internal/write/drift/doc.go @@ -1,16 +1,63 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package drift provides formatted output helpers for the drift command. -// -// All functions take *cobra.Command for output routing. -// Exports: [FixHeader], [FixRecheck], -// [FixedCount], [SkippedCount], [FixError], -// and 19 more. -// Exports: [FixHeader], [FixRecheck], -// [FixedCount], [SkippedCount], [FixError], -// [FixStaleness]. +// Package drift provides the **terminal-output helpers** +// the `ctx drift` and `ctx drift --fix` CLI surfaces use +// to render their per-issue progress and final summary. +// +// All exported functions take a `*cobra.Command` so +// they route through cobra's output stream (which tests +// can wire to a buffer for assertion). +// +// # Public Surface +// +// Output families: +// +// - **Fix progress** — [FixHeader], +// [FixRecheck], [FixedCount], +// [SkippedCount], [FixError], [FixStaleness]. +// Used by `--fix` to narrate what is being +// auto-remediated and what remained. +// - **Per-issue lines** — formatters for the +// individual issue rows (path refs, +// staleness markers, missing files, +// constitution violations) with the matching +// status glyph. +// - **Summaries** — final roll-up for `ctx +// drift` (counts of warnings/violations/ +// passed) and for `--fix` (counts of fixed +// vs skipped). +// +// # Why a Separate Output Package +// +// Same data, two surfaces (`ctx drift` and +// `ctx drift --fix`), each with its own preferred +// presentation. Hoisting both renderers keeps the +// drift detector +// ([internal/drift]) free of presentation +// concerns and the fix engine +// ([internal/cli/drift/core/fix]) free of UI +// strings. +// +// # Concurrency +// +// Pure data → io.Writer. Concurrent calls go +// through cobra's output stream which is +// serialized. +// +// # Related Packages +// +// - [internal/cli/drift] — chief +// consumer. +// - [internal/cli/drift/core/out] — JSON +// and text renderers for the report shape. +// - [internal/cli/drift/core/fix] — calls +// [FixHeader], [FixRecheck], etc., during +// auto-remediation. +// - [internal/drift] — the +// detector that produces the report this +// package narrates. package drift diff --git a/internal/write/initialize/doc.go b/internal/write/initialize/doc.go index f50403f0f..457491ba7 100644 --- a/internal/write/initialize/doc.go +++ b/internal/write/initialize/doc.go @@ -1,16 +1,55 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package initialize provides terminal output functions for the. -// -// All functions take *cobra.Command for output routing. -// Exports: [InfoOverwritePrompt], [InfoAborted], -// [InfoExistsSkipped], [InfoFileCreated], -// [Initialized], and 36 more. -// Exports: [InfoOverwritePrompt], [InfoAborted], -// [InfoExistsSkipped], [InfoFileCreated], -// [Initialized], [InfoWarnNonFatal]. +// Package initialize provides the **terminal-output +// helpers** the `ctx init` command uses to narrate every +// step of the initialization workflow — directory +// creation, foundation-file deployment, plugin +// detection, settings merge, hook installation, summary. +// +// The package owns ~40 named output functions, one per +// distinct user-visible event. Centralizing them keeps +// the init flow's terminal text consistent and makes +// localization a single-package change when it +// arrives. +// +// All exported functions take a `*cobra.Command` so +// they route through cobra's output stream (which +// tests can wire to a buffer for assertion). +// +// # Function Families +// +// - **Prompts** — [InfoOverwritePrompt], +// [InfoAborted] for the "should I overwrite?" +// dialog. +// - **Per-file results** — [InfoFileCreated], +// [InfoExistsSkipped], [InfoMerged] etc., one +// line per artifact written. +// - **Plugin / tool detection** — +// [InfoPluginInstalled], +// [InfoPluginEnabled], etc. +// - **Warnings & non-fatal errors** — +// [InfoWarnNonFatal] for issues the user +// should know about but that do not abort +// init. +// - **Summary** — [Initialized] (the final +// "ctx is ready, here's what to do next" +// banner). +// +// # Concurrency +// +// Pure data → io.Writer. cobra serializes +// concurrent writes through its output stream. +// +// # Related Packages +// +// - [internal/cli/initialize] — chief +// consumer. +// - [internal/cli/initialize/core/*] — every +// init sub-package routes its output here. +// - [internal/format] — the +// formatters used for sizes, counts, etc. package initialize diff --git a/internal/write/message/doc.go b/internal/write/message/doc.go index 5cc917042..513287dc6 100644 --- a/internal/write/message/doc.go +++ b/internal/write/message/doc.go @@ -1,16 +1,50 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package message provides formatted output helpers for the message command. +// Package message provides the **terminal-output +// helpers** the `ctx hook message` CLI surface uses to +// render its `list`, `show`, `edit`, and `reset` +// subcommands' output. // -// All functions take *cobra.Command for output routing. -// Exports: [TemplateVars], [CtxSpecificWarning], -// [OverrideCreated], [EditHint], -// [SourceOverride], and 6 more. -// Exports: [TemplateVars], [CtxSpecificWarning], -// [OverrideCreated], [EditHint], -// [SourceOverride], [SourceDefault]. +// All exported functions take a `*cobra.Command` so +// they route through cobra's output stream. +// +// # Public Surface +// +// - **[TemplateVars]** — renders a template's +// placeholder variable list (the `%[1]s`-style +// positional parameters). +// - **[CtxSpecificWarning]** — the warning +// shown when the user tries to override a +// ctx-specific (non-customizable) message. +// - **[OverrideCreated]** — the +// "wrote override at PATH" line `edit` and +// `reset` print after a write. +// - **[EditHint]** — the "run `$EDITOR PATH` to +// edit" hint surfaced by `show` when no +// override exists yet. +// - **[SourceOverride] / [SourceDefault]** — +// "[override]" / "[default]" badges shown +// next to each message in `list` so users +// know which entries they have customized. +// +// # Concurrency +// +// Pure data → io.Writer. Concurrent calls +// serialize through cobra's output stream. +// +// # Related Packages +// +// - [internal/cli/message] — +// chief consumer. +// - [internal/cli/system/core/message] — +// the resolver that loads messages and +// decides override vs default; the badges +// this package renders come from there. +// - [internal/assets/hooks/messages] — +// the embedded message catalog `list` +// enumerates. package message diff --git a/internal/write/notify/doc.go b/internal/write/notify/doc.go index c12d118ab..830d07bb9 100644 --- a/internal/write/notify/doc.go +++ b/internal/write/notify/doc.go @@ -5,7 +5,7 @@ // SPDX-License-Identifier: Apache-2.0 // Package notify provides terminal output for webhook notification -// setup and testing (ctx notify setup, ctx notify test). +// setup and testing (ctx hook notify setup, ctx hook notify test). // // [SetupPrompt] displays the webhook URL prompt, [SetupDone] // confirms successful configuration. [TestResult] reports the HTTP diff --git a/internal/write/prune/doc.go b/internal/write/prune/doc.go index ff9cb2d66..aab3bb6aa 100644 --- a/internal/write/prune/doc.go +++ b/internal/write/prune/doc.go @@ -5,7 +5,7 @@ // SPDX-License-Identifier: Apache-2.0 // Package prune provides terminal output for the state file pruning -// command (ctx system prune). +// command (ctx prune). // // [DryRunLine] previews each file that would be removed with its // age. [ErrorLine] reports per-file removal failures. [Summary] diff --git a/internal/write/resource/doc.go b/internal/write/resource/doc.go index 9759ef845..6ef7466e1 100644 --- a/internal/write/resource/doc.go +++ b/internal/write/resource/doc.go @@ -1,12 +1,51 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package resource provides formatted output helpers for the. +// Package resource provides the **terminal-output +// helpers** the resource-related CLI surfaces use to +// render their results in either human-readable text or +// JSON for tooling. // -// All functions take *cobra.Command for output routing. -// Exports: [Text], [JSON]. -// Exports: [Text], [JSON]. +// All functions take a `*cobra.Command` so they route +// through cobra's output stream (which tests can wire +// to a buffer for assertion). +// +// # Public Surface +// +// - **[Text](cmd, payload)** — renders the +// resource snapshot in human-readable form — +// section headers, glyph-prefixed counts, +// summary line. +// - **[JSON](cmd, payload)** — emits the same +// payload as a structured JSON document with +// a UTC timestamp wrapper, suitable for +// `jq` consumption in CI. +// +// # Why a Separate Output Package +// +// The same data shape needs two different +// renderings (human vs machine). Hoisting both +// into a write-side package keeps the producer +// (the CLI command) free of presentation choices +// and the renderer free of business logic. +// +// # Concurrency +// +// Pure data → io.Writer transformation. +// Concurrent calls each write to the cobra +// command's output stream; cobra serializes them. +// +// # Related Packages +// +// - [internal/cli/sysinfo] — chief +// consumer when the resource snapshot comes +// from the system-resource collector. +// - [internal/sysinfo] — produces +// the [Resource] payloads. +// - [internal/format] — supplies +// the Number / Duration / TimeAgo formatters +// this package builds on. package resource diff --git a/internal/write/session/doc.go b/internal/write/session/doc.go index ae3680da9..203a27be7 100644 --- a/internal/write/session/doc.go +++ b/internal/write/session/doc.go @@ -5,7 +5,8 @@ // SPDX-License-Identifier: Apache-2.0 // Package session provides terminal output for session lifecycle -// commands (ctx pause, ctx resume, ctx wrap-up, ctx system session-event). +// commands (ctx hook pause, ctx hook resume, +// ctx system mark-wrapped-up, ctx system session-event). // // [Event] confirms a session start or end event was recorded. // [Paused] confirms hooks were suspended for the session. diff --git a/internal/write/steering/doc.go b/internal/write/steering/doc.go index 695466c0f..56bdbbec8 100644 --- a/internal/write/steering/doc.go +++ b/internal/write/steering/doc.go @@ -1,15 +1,49 @@ // / ctx: https://ctx.ist // ,'`./ do you remember? -// `.,'\\ +// `.,'\ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package steering provides formatted output helpers for steering commands. -// -// All functions take *cobra.Command for output routing. -// Exports: [Created], [Skipped], [InitSummary], -// [NoFilesFound], [FileEntry], [FileCount], -// [NoFilesMatch], [PreviewHeader], [PreviewEntry], -// [PreviewCount], [SyncWritten], [SyncSkipped], -// [SyncError], [SyncSummary]. +// Package steering provides the **terminal-output +// helpers** the `ctx steering` CLI subcommands use to +// narrate their `add`, `init`, `list`, `preview`, and +// `sync` operations. +// +// All exported functions take a `*cobra.Command` so +// they route through cobra's output stream (which +// tests can wire to a buffer for assertion). +// +// # Public Surface +// +// Output families: +// +// - **Init** — [Created], [Skipped], +// [InitSummary]. The `init` subcommand +// announces each foundation file it +// materializes (or skipped because it +// already exists), then summarizes counts. +// - **List / Preview** — [NoFilesFound], +// [FileEntry], [FileCount], [NoFilesMatch], +// [PreviewHeader], [PreviewEntry], +// [PreviewCount]. Render the available +// steering files and their inclusion-rule +// match results against a sample prompt. +// - **Sync** — [SyncWritten], [SyncSkipped], +// [SyncError], [SyncSummary]. Per-tool +// progress narration during +// `ctx steering sync`. +// +// # Concurrency +// +// Pure data → io.Writer. Concurrent calls +// serialize through cobra's output stream. +// +// # Related Packages +// +// - [internal/cli/steering] — chief +// consumer. +// - [internal/steering] — the engine +// this package narrates. +// - [internal/err/steering] — typed +// errors surfaced by [SyncError]. package steering diff --git a/internal/write/vscode/info.go b/internal/write/vscode/info.go index af4745e46..173281ad8 100644 --- a/internal/write/vscode/info.go +++ b/internal/write/vscode/info.go @@ -4,7 +4,6 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package vscode provides terminal output for VS Code artifact generation. package vscode import ( diff --git a/site/404.html b/site/404.html index fdcca82a8..115209983 100644 --- a/site/404.html +++ b/site/404.html @@ -124,9 +124,9 @@ - + -

A Meta-Experiment in AI-As to a disciplined system for persistent AI context, and what I have learned along the way.

-

Context is a Record

+

Context Is a Record

Context is a persistent record.

By "context", I don't mean model memory or stored thoughts:

I mean the durable record of decisions, learnings, and intent @@ -1322,7 +1300,7 @@

The Rename87dcfa1 README. 4f0e195 feat: separate orchestrator directive from agent tasks

-

YOLO Mode: Fast, But Dangerous

+

YOLO Mode: Fast, but Dangerous

The Ralph Loop made feature development incredibly fast.

But it created technical debt that I didn't notice until later.

A comparison session on January 25th revealed the patterns:

@@ -1425,11 +1403,11 @@

The Constitution versus Conventions conventions...) should go in to CONVENTIONS.md.

Here's how ctx explained why the distinction was important:

-

Decision record, 2026-01-25

+

Decision Record, 2026-01-25

Overly strict constitution creates friction and gets ignored.

Conventions can be bent; constitution cannot.

-

Hooks: Harder Than They Look

+

Hooks: Harder than They Look

Claude Code hooks seemed simple: Run a script before/after certain events.

But I hit multiple gotchas:

1. Key names matter

@@ -1466,7 +1444,7 @@

The Session Files -

Middle Ground: the Scratchpad

+

Middle Ground: The Scratchpad

For sensitive notes that do need to travel with the project, ctx pad stores encrypted one-liners in git, and ctx pad add "label" --file PATH can ingest small files.

@@ -1562,7 +1540,7 @@

Task Archives: The Completed WorkPhase 13: Rich Context Entries

That's an impressive ^^173 commits** across 8 days of development.

-

What I Learned About AI-Assisted Development

+

What I Learned about AI-Assisted Development

1. Memory changes everything

When the AI remembers decisions, it doesn't repeat mistakes.

When the AI knows your conventions, it follows them.

@@ -1616,7 +1594,7 @@

Conclusionctx.ist.
-

Session Records are a Gold Mine

+

Session Records Are a Gold Mine

By the time of this writing, I have more than 70 megabytes of text-only session capture, spread across >100 Markdown and JSONL files.

diff --git a/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html b/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html index c9ea3c9aa..2de6d19fc 100644 --- a/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html +++ b/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html @@ -131,9 +131,9 @@ - + -

ctx

-

Digging Through the Past to Build the Future

+

Digging through the Past to Build the Future

Jose Alekhinne / 2026-02-01

-

What if Your AI Could Remember Everything?

+

What If Your AI Could Remember Everything?

Not just the current session, but every session:

  • Every decision made,
  • @@ -1305,7 +1283,7 @@

    The Problem: Amnesia Isn't
  • "How did the embed.go split actually happen?"
-

Fate is Whimsical

+

Fate Is Whimsical

The irony was painful:

I built a tool to prevent AI amnesia, but I was suffering from human amnesia about what happened in AI sessions.

@@ -1360,7 +1338,7 @@

ctx recall: Browse Your PastSlugs are auto-generated from session IDs (memorable names instead of UUIDs). The goal (as the name implies) is recall, not archival accuracy.

-

2,121 lines of new code

+

2,121 Lines of New Code

The ctx recall feature was the largest single addition:

parser library, CLI commands, test suite, and slash command.

@@ -1448,7 +1426,7 @@

The Structure: Decision --application "Added to Makefile and CI config"

-

Structured entries are prompts to the AI

+

Structured Entries Are Prompts to the AI

When the AI reads a decision with full context, rationale, and consequences, it understands the why, not just the what.

@@ -1474,7 +1452,7 @@

The Index: Quick Reference Tables

The same structure serves two very different readers.

-

Reindex after manual edits

+

Reindex After Manual Edits

If you edit entries by hand, rebuild the index with:

ctx decisions reindex
 ctx learnings reindex
@@ -1506,7 +1484,7 @@ 

1. Raw Data Isn't Knowledge2. Enforcement > Documentation

-

The Prompt is a Guideline

+

The Prompt Is a Guideline

The code is more what you'd call 'guidelines' than actual rules.

-Hector Barbossa

@@ -1519,7 +1497,7 @@

4. Meta-Tools CompoundTools that analyze their own development tend to generalize well.

The journal system started as a way to understand ctx itself.

It immediately became useful for everything else.

-

v0.2.0 in The Numbers

+

v0.2.0 in the Numbers

This was a heavy release. The numbers reflect that:

diff --git a/site/blog/2026-02-01-refactoring-with-intent/index.html b/site/blog/2026-02-01-refactoring-with-intent/index.html index 5518e3a5b..3bb913045 100644 --- a/site/blog/2026-02-01-refactoring-with-intent/index.html +++ b/site/blog/2026-02-01-refactoring-with-intent/index.html @@ -131,9 +131,9 @@ - + -

Summaries first. Details: on demand.

-

Quality Over Quantity

+

Quality over Quantity

Here is the counterintuitive part: more context can make AI worse.

Extra tokens add noise, not clarity:

-

Judgment Suppression is Dangerous

+

Judgment Suppression Is Dangerous

The attack vector structurally identical to prompt injection.

It teaches the AI that its own judgment is wrong.

It weakens or disables safeguard mechanisms, and it is @@ -1409,7 +1387,7 @@

Conflict Pattern 5: Universal Tri

Universal triggers override the platform's relevance matching: The AI spends tokens on process overhead instead of the actual task.

-

ctx preserves relevance

+

ctx Preserves Relevance

This is exactly the failure mode ctx exists to mitigate:

Wasting attention budget on irrelevant process instead of task-specific state.

diff --git a/site/blog/2026-02-05-you-cant-import-expertise/index.html b/site/blog/2026-02-05-you-cant-import-expertise/index.html index 724d30bd3..b73222bc0 100644 --- a/site/blog/2026-02-05-you-cant-import-expertise/index.html +++ b/site/blog/2026-02-05-you-cant-import-expertise/index.html @@ -131,9 +131,9 @@ - + -

On first run, generates an admin token and prints it to -stdout. Save this token — it's required for +stdout. Save this token; it's required for ctx connection register in client projects. Subsequent runs reuse the stored token from <data-dir>/admin.token.

@@ -2232,7 +2232,7 @@

Cluster Mode --peers host2:9901,host3:9901

Raft is used only for leader election. Data replication -uses sequence-based gRPC sync on the append-only JSONL log — +uses sequence-based gRPC sync on the append-only JSONL log; there is no multi-node consensus on writes. See the HA cluster recipe for the full setup and the Raft-lite durability caveat.

@@ -2284,7 +2284,7 @@

ctx hub stop

ctx hub status

Show cluster status: role, peers, sync state, entry count, @@ -2310,12 +2310,12 @@

ctx hub stepdown

See Also

diff --git a/site/cli/resume/index.html b/site/cli/resume/index.html index ceb3e84a4..5fec4f3ab 100644 --- a/site/cli/resume/index.html +++ b/site/cli/resume/index.html @@ -1790,7 +1790,7 @@

ctx hook resume

See also:

diff --git a/site/cli/serve/index.html b/site/cli/serve/index.html index 6acf64fdf..6cb62750f 100644 --- a/site/cli/serve/index.html +++ b/site/cli/serve/index.html @@ -2037,7 +2037,7 @@

Arguments
ctx serve                         # Default: serve .context/journal-site
@@ -2046,12 +2046,12 @@ 

ArgumentsSee Also

diff --git a/site/cli/steering/index.html b/site/cli/steering/index.html index 3f146149f..d9f94a206 100644 --- a/site/cli/steering/index.html +++ b/site/cli/steering/index.html @@ -2180,8 +2180,8 @@

ctx steering +

If you find yourself writing "the AI should always do X", +that belongs in steering, not decisions.

Anatomy of a Steering File

---
@@ -2266,7 +2266,7 @@ 

ctx steering previewctx steering sync

Sync steering files to tool-native formats for tools that have a built-in rules primitive. Not every tool needs -this — Claude Code and Codex use a different delivery +this; Claude Code and Codex use a different delivery mechanism (see below).

Examples:

Expected line: ctx: ctx mcp serve - ✓ Connected. If it's missing, reinstall the plugin from Claude Code -(/plugin → find ctx → uninstall → install again) — +(/plugin → find ctx → uninstall → install again); older plugin versions shipped without the .mcp.json file.

@@ -2366,10 +2366,10 @@

How Claude Code and Codex Co
  • Running ctx steering sync before starting a Claude session does nothing for Claude's benefit. Skip it.
  • ctx steering preview still works for validating your - descriptions — it doesn't depend on sync.
  • + descriptions; it doesn't depend on sync.
  • If Claude Code is your only tool, the ctx steering commands you care about are add, list, preview, - init — never sync.
  • + init (never sync).
  • If you use both Claude Code and (say) Cursor, ctx steering sync covers Cursor (where auto and manual work natively) while the hook+MCP pipeline @@ -2385,12 +2385,12 @@

    ctx agent IntegrationSee Also

    diff --git a/site/cli/sysinfo/index.html b/site/cli/sysinfo/index.html index 8003246e3..5d11be19e 100644 --- a/site/cli/sysinfo/index.html +++ b/site/cli/sysinfo/index.html @@ -1739,7 +1739,7 @@

    Sysinfo

    ctx sysinfo

    Display a snapshot of system resources (memory, swap, disk, load) with threshold-based alert severities. Mirrors what the -check-resource hook plumbing monitors in the background — but this +check-resource hook plumbing monitors in the background, but this command prints the full report at any severity level, not only at DANGER.

    ctx sysinfo [flags]
    diff --git a/site/cli/system/index.html b/site/cli/system/index.html
    index 3dfff4924..a495c7daa 100644
    --- a/site/cli/system/index.html
    +++ b/site/cli/system/index.html
    @@ -1934,7 +1934,7 @@ 

    ctx systemPlumbing Subcommands

    -

    These are not hook handlers — they're called by skills and editor +

    These are not hook handlers; they're called by skills and editor integrations during the session lifecycle. Safe to run manually.

    ctx system mark-journal

    Update processing state for a journal entry. Records the current date diff --git a/site/cli/trace/index.html b/site/cli/trace/index.html index 15b481d5f..48ef09480 100644 --- a/site/cli/trace/index.html +++ b/site/cli/trace/index.html @@ -1849,7 +1849,7 @@

    Commit Context Tracing

    ctx trace

    Show the context behind git commits. Links commits back to the decisions, tasks, learnings, and sessions that motivated them.

    -

    git log shows what changed, git blame shows who — +

    git log shows what changed, git blame shows who, and ctx trace shows why.

    ctx trace [commit] [flags]
     
    diff --git a/site/cli/trigger/index.html b/site/cli/trigger/index.html index b3077fe85..1db36210c 100644 --- a/site/cli/trigger/index.html +++ b/site/cli/trigger/index.html @@ -1265,7 +1265,7 @@ - Three Hooking Concepts in ctx — Don't Confuse Them + Three Hooking Concepts in ctx (Don't Confuse Them) @@ -2068,7 +2068,7 @@ - Three Hooking Concepts in ctx — Don't Confuse Them + Three Hooking Concepts in ctx (Don't Confuse Them) @@ -2181,7 +2181,7 @@

    Trigger

    ctx trigger

    Manage lifecycle triggers: executable scripts that fire at specific events during an AI session. Triggers can block tool -calls, inject context, automate reactions — any side effect +calls, inject context, and automate reactions: any side effect you want at session boundaries, tool boundaries, or file-save events.

    ctx trigger <subcommand>
    @@ -2344,7 +2344,7 @@ 

    ctx trigger disable
    ctx trigger disable inject-context
     # Disabled .context/hooks/session-start/inject-context.sh
     

    -

    Three Hooking Concepts in ctx — Don't Confuse Them

    +

    Three Hooking Concepts in ctx (Don't Confuse Them)

    This is a common source of confusion. ctx has three distinct hook-like layers, and they serve different purposes:

    @@ -2381,13 +2381,13 @@

    Three Hooking Concepts that your AI tool will run at lifecycle events. Use Claude Code hooks for tool-specific integrations that don't need to be portable across tools. ctx system hooks are not something -you author — they're the internal nudge machinery that ships +you author; they're the internal nudge machinery that ships with ctx.

    See Also

    diff --git a/site/home/common-workflows/index.html b/site/home/common-workflows/index.html index 515b616b4..40bd5fee1 100644 --- a/site/home/common-workflows/index.html +++ b/site/home/common-workflows/index.html @@ -2012,7 +2012,7 @@

    Track ContextWhen working with an AI agent, use /ctx-task-add, /ctx-decision-add, or /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, -branch, and commit hash from its context — no manual flags needed.

    +branch, and commit hash from its context, so no manual flags are needed.

    # Add a task
     ctx add task "Implement user authentication" \
    @@ -2174,7 +2174,7 @@ 

    Trace Commit Contextctx trace hook enable

    From now on, every git commit automatically gets a ctx-context -trailer linking it to relevant context. No extra steps needed — +trailer linking it to relevant context. No extra steps needed; just use ctx add, ctx task complete, and commit as usual.

    # Later: why was this commit made?
     ctx trace abc123
    diff --git a/site/home/context-files/index.html b/site/home/context-files/index.html
    index 0f03a0015..e48f94552 100644
    --- a/site/home/context-files/index.html
    +++ b/site/home/context-files/index.html
    @@ -2628,9 +2628,9 @@ 

    File OverviewTwo subdirectories under .context/ are implementation details that are user-editable but not part of the priority read order:

    @@ -2081,11 +2081,11 @@

    1. Initialize ContextWriting Steering Files for the @@ -2141,13 +2141,15 @@

    -

    Triggers Are Arbitrary Code — Treat Them like Pre-Commit Hooks

    +

    Triggers Are Arbitrary Code: Treat Them like Pre-Commit Hooks

    Only Enable Scripts You've Read and Understand

    A trigger is a shell script with the executable bit set. @@ -1767,7 +1767,7 @@

    Triggers A

    ctx trigger add intentionally creates new scripts disabled (no executable bit). You must ctx trigger enable <name> after reviewing the contents. -That's not a suggestion — it's the security model.

    +That's not a suggestion; it's the security model.

    Three Hook-like Layers in ctx

    Triggers are one of three distinct hook-like concepts in @@ -1805,7 +1805,7 @@

    Three Hook-like Layers in ctx

    +

    Triggers vs Steering: Same Problem, Different Shape

    Triggers are the imperative counterpart to steering files. Steering expresses persistent rules the AI reads before each prompt; triggers @@ -1818,12 +1818,12 @@

    Triggers vs Steering

    Most projects use both.

    Where to Go Next

    diff --git a/site/operations/hub-failure-modes/index.html b/site/operations/hub-failure-modes/index.html index aaabe8900..657c10da7 100644 --- a/site/operations/hub-failure-modes/index.html +++ b/site/operations/hub-failure-modes/index.html @@ -1160,7 +1160,7 @@ - Partition — Majority Side Reachable + Partition: Majority Side Reachable @@ -1171,7 +1171,7 @@ - Partition — Split Brain (No Quorum) + Partition: Split Brain (No Quorum) @@ -1659,7 +1659,7 @@ - Partition — Majority Side Reachable + Partition: Majority Side Reachable @@ -1670,7 +1670,7 @@ - Partition — Split Brain (No Quorum) + Partition: Split Brain (No Quorum) @@ -1990,13 +1990,13 @@

    Client Loses Connection Mid-Stream

    What you should do: nothing. If reconnects are looping, check firewall state on the hub and ctx hub status output.

    -

    Partition — Majority Side Reachable

    +

    Partition: Majority Side Reachable

    What happens: clients routed to the majority side continue to publish and listen. The minority nodes step down to followers that cannot accept writes (Raft quorum lost).

    What you should do: let it heal. When the partition closes, followers catch up via sequence-based sync automatically.

    -

    Partition — Split Brain (No Quorum)

    +

    Partition: Split Brain (No Quorum)

    What happens: no node holds a majority, so no leader is elected. All nodes become read-only. ctx connection publish and ctx add --share fail with a "no leader" error; local writes @@ -2018,7 +2018,7 @@

    Disk Full on the Leader

    What you should do: free disk or grow the volume, then -nothing else — the hub resumes accepting writes on the next +nothing else; the hub resumes accepting writes on the next append attempt.

    Corrupt entries.jsonl

    What happens: if the last line is a partial JSON write from a @@ -2033,7 +2033,7 @@

    meta.json / someone copied one file without the other.

    What you should do: restore both files from the same backup, or accept the higher sequence by regenerating meta.json from -entries.jsonl (manual for now — file a bug).

    +entries.jsonl (manual for now; file a bug).

    Cluster

    Leader Crash, Clean Shutdown

    What happens: ctx hub stop triggers stepdown first, so @@ -2042,7 +2042,7 @@

    Leader Crash, Clean ShutdownLeader Crash, Hard Fail (Kill -9, Power Loss)

    What happens: Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted -but had not yet replicated can be lost — see the Raft-lite +but had not yet replicated can be lost. See the Raft-lite warning in the cluster recipe.

    What you should do: if you need stronger durability, run ctx connection listen on a dedicated "collector" project that @@ -2059,7 +2059,7 @@

    Lost Admin TokenCompromised Admin Token

    What happens: anyone with the token can register new @@ -2080,7 +2080,7 @@

    Compromised Client Tokenclients.json, restart the hub, and re-register the legitimate project with a fresh token. Audit entries.jsonl for entries published after the compromise timestamp and quarantine any -that look suspicious — remember that Origin on those entries +that look suspicious; remember that Origin on those entries proves nothing.

    Compromised Hub Host

    What happens: <data-dir>/clients.json stores client @@ -2116,7 +2116,7 @@

    The Short List -Cluster quorum — run ctx hub status on each peer +Cluster quorum; run ctx hub status on each peer Hub won't start after crash @@ -2128,7 +2128,7 @@

    The Short List

    ctx Hub: Operations

    Running the ctx ctx Hub in production. This page is -for operators — people running a hub for themselves or a +for operators: people running a hub for themselves or a team, not people writing to a hub someone else is running.

    If you have not read it yet, start with the ctx Hub overview. It @@ -1704,7 +1704,7 @@

    Starting and StoppingSystemd Unit

    For production single-node deployments, run the hub as a systemd @@ -1737,13 +1737,13 @@

    Systemd Unit

    Backup and Restore

    Because entries.jsonl is append-only, backups are trivial:

    -
    # Hot backup — safe while the hub is running.
    +
    # Hot backup, safe while the hub is running.
     cp <data-dir>/entries.jsonl backups/entries-$(date +%F).jsonl
     cp <data-dir>/meta.json      backups/meta-$(date +%F).json
     cp <data-dir>/clients.json   backups/clients-$(date +%F).json
     

    For a consistent snapshot across all three files, stop the hub, -copy, then start again — or use a filesystem-level snapshot (LVM, +copy, then start again, or use a filesystem-level snapshot (LVM, ZFS, Btrfs).

    Restore:

    ctx hub stop                           # Stop the hub
    @@ -1755,7 +1755,7 @@ 

    Backup and RestoreLog Rotation

    entries.jsonl grows unbounded. For long-lived hubs, rotate it offline:

    @@ -1777,12 +1777,12 @@

    MonitoringUpgrading

    diff --git a/site/operations/index.html b/site/operations/index.html index d2051b525..89e9cec90 100644 --- a/site/operations/index.html +++ b/site/operations/index.html @@ -1483,7 +1483,7 @@

    Autonomous Loopsctx providing persistent memory between iterations.


    Hub

    -

    Operator guides for running a ctx Hub — the gRPC server that +

    Operator guides for running a ctx Hub, the gRPC server that fans out structured entries across projects. If you're a client connecting to a Hub someone else runs, see ctx connect and the @@ -1493,7 +1493,7 @@

    Hub Operations

    Hub Failure Modes

    What can go wrong in network, storage, cluster, auth, and -clock layers — and what you should do about each one. Includes +clock layers, and what you should do about each one. Includes the short-list table oncall engineers will want bookmarked.


    Maintainers

    diff --git a/site/operations/release/index.html b/site/operations/release/index.html index f3f1b3798..c5cff788f 100644 --- a/site/operations/release/index.html +++ b/site/operations/release/index.html @@ -1785,8 +1785,8 @@

    Cutting a Release

    Full Release Checklist

    This page covers the mechanics of cutting a release (bump, tag, push). -For the complete pre-release ceremony — audits, tests, verification, and -post-release steps — see the +For the complete pre-release ceremony (audits, tests, verification, and +post-release steps), see the Release Checklist runbook.

    Prerequisites

    diff --git a/site/operations/runbooks/architecture-exploration/index.html b/site/operations/runbooks/architecture-exploration/index.html index 414309c25..891825ed2 100644 --- a/site/operations/runbooks/architecture-exploration/index.html +++ b/site/operations/runbooks/architecture-exploration/index.html @@ -1815,9 +1815,9 @@

    Architecture Exploration~/WORKSPACE/).

    Companion skills:

      -
    • /ctx-architecture — structural baseline and principal analysis
    • -
    • /ctx-architecture-enrich — code intelligence enrichment via GitNexus
    • -
    • /ctx-architecture-failure-analysis — adversarial failure analysis
    • +
    • /ctx-architecture: structural baseline and principal analysis
    • +
    • /ctx-architecture-enrich: code intelligence enrichment via GitNexus
    • +
    • /ctx-architecture-failure-analysis: adversarial failure analysis

    Overview

    @@ -1999,7 +1999,7 @@

    PromptAppend to `.arch-explorer/run-log.md`: ```markdown -## 2026-04-07T14:00:00Z — ctx — principal +## 2026-04-07T14:00:00Z / ctx / principal **Phase:** principal **Convergence:** 0.45 -> 0.72 @@ -2053,7 +2053,7 @@

    Prompt across consecutive runs) - All 5 lenses have been applied - Convergence score is `null` after 3 attempts (artifacts aren't being - generated properly — log warning and move on) + generated properly; log warning and move on) When a repo is explored, advance `current_repo_index` in the manifest. diff --git a/site/operations/runbooks/docs-semantic-audit/index.html b/site/operations/runbooks/docs-semantic-audit/index.html index 997bd887f..63423e990 100644 --- a/site/operations/runbooks/docs-semantic-audit/index.html +++ b/site/operations/runbooks/docs-semantic-audit/index.html @@ -1855,10 +1855,10 @@

    PromptAfter the Audit

      -
    1. Triage findings — not everything needs fixing. Focus on high severity.
    2. -
    3. Merge weak pages first — fewer pages is almost always better.
    4. -
    5. Add cross-links — cheapest improvement, highest reader impact.
    6. -
    7. File split decisions in DECISIONS.md — page splits are architectural.
    8. +
    9. Triage findings: not everything needs fixing. Focus on high severity.
    10. +
    11. Merge weak pages first: fewer pages is almost always better.
    12. +
    13. Add cross-links: cheapest improvement, highest reader impact.
    14. +
    15. File split decisions in DECISIONS.md: page splits are architectural.
    16. Regenerate the site and spot-check nav after structural changes.

    History

    diff --git a/site/operations/runbooks/hub-deployment/index.html b/site/operations/runbooks/hub-deployment/index.html index a8ecb3efe..dfaaf9e9a 100644 --- a/site/operations/runbooks/hub-deployment/index.html +++ b/site/operations/runbooks/hub-deployment/index.html @@ -1958,11 +1958,11 @@

    Hub DeploymentHub overview — what the hub - is and isn't

  • -
  • Hub operations — data directory, systemd, +
  • Hub overview: what the hub + is and is not
  • +
  • Hub operations: data directory, systemd, backup, monitoring
  • -
  • Hub failure modes — what can go wrong
  • +
  • Hub failure modes: what can go wrong

  • Step 1: Start the Hub

    @@ -1980,8 +1980,8 @@

    Step 1: Start the HubStep 2: Generate the Admin Token

    On first start, the hub writes admin.token to the data directory (default ~/.ctx/hub-data/):

    diff --git a/site/operations/runbooks/new-contributor/index.html b/site/operations/runbooks/new-contributor/index.html index e4a083e1a..2f820fc60 100644 --- a/site/operations/runbooks/new-contributor/index.html +++ b/site/operations/runbooks/new-contributor/index.html @@ -1957,8 +1957,8 @@

    Step 4: Install the Plugin

    Step 5: Switch to Dev Profile

    ctx config switch dev
     
    -

    This enables verbose logging and notify events — useful during -development.

    +

    This enables verbose logging and notify events (useful during +development).

    Step 6: Verify Hooks

    Start a Claude Code session and check that hooks fire:

    claude
    diff --git a/site/operations/runbooks/plugin-release/index.html b/site/operations/runbooks/plugin-release/index.html
    index da74ef821..b71657111 100644
    --- a/site/operations/runbooks/plugin-release/index.html
    +++ b/site/operations/runbooks/plugin-release/index.html
    @@ -1973,7 +1973,7 @@ 

    Step 1: Update hooks.json (If
    # Verify hook definitions match implementations
     make audit
     
    -

    Check that plugin.json lists all hooks correctly — missing +

    Check that plugin.json lists all hooks correctly. Missing hooks silently fail to fire.

    Step 2: Bump Version

    Update the version in three places:

    diff --git a/site/operations/runbooks/release-checklist/index.html b/site/operations/runbooks/release-checklist/index.html index 4c80d58e7..1082b83db 100644 --- a/site/operations/runbooks/release-checklist/index.html +++ b/site/operations/runbooks/release-checklist/index.html @@ -2074,7 +2074,7 @@

    1. Run the Codebase AuditUse the codebase audit runbook prompt with your agent. Focus on analyses 1-4 (extractable patterns, documentation drift, maintainability, security). Triage findings -into TASKS.md — anything blocking ships before the release.

    +into TASKS.md; anything blocking ships before the release.

    2. Run the Docs Semantic Audit

    Use the docs semantic audit runbook prompt. Fix high-severity findings (weak pages, broken narrative diff --git a/site/operations/runbooks/sanitize-permissions/index.html b/site/operations/runbooks/sanitize-permissions/index.html index 4d2e0ce17..a246b5cd1 100644 --- a/site/operations/runbooks/sanitize-permissions/index.html +++ b/site/operations/runbooks/sanitize-permissions/index.html @@ -1998,7 +1998,7 @@

    Sanitize PermissionsWhy Manual, Not Automated

    settings.local.json controls what the agent can do without asking. An agent that can edit its own permission file is a self-escalation -vector — especially if the skill is auto-accepted. Keep this manual.

    +vector, especially if the skill is auto-accepted. Keep this manual.

    When to run: After busy sessions where you clicked "Allow" many times, weekly hygiene (pair with ctx drift), or before committing .claude/settings.local.json.

    diff --git a/site/recipes/architecture-deep-dive/index.html b/site/recipes/architecture-deep-dive/index.html index 6ae1b9dbc..3c850b04d 100644 --- a/site/recipes/architecture-deep-dive/index.html +++ b/site/recipes/architecture-deep-dive/index.html @@ -1140,7 +1140,7 @@

    TL;DR&pa /ctx-architecture-failure-analysis

    Each pass builds on the previous one. Run them in order. The -output accumulates in .context/ — each pass reads the prior +output accumulates in .context/; each pass reads the prior artifacts and extends them.

    Commands and Skills Used

    @@ -1185,15 +1185,15 @@

    Pass 1: Map What Exists

    Produces:

      -
    • ARCHITECTURE.md — succinct project map (< 4000 tokens), +
    • ARCHITECTURE.md: succinct project map (< 4000 tokens), loaded at every session start
    • -
    • DETAILED_DESIGN*.md — deep per-module reference with +
    • DETAILED_DESIGN*.md: deep per-module reference with exported API, data flow, danger zones, extension points
    • -
    • CHEAT-SHEETS.md — lifecycle flow diagrams
    • -
    • map-tracking.json — coverage state with confidence scores
    • +
    • CHEAT-SHEETS.md: lifecycle flow diagrams
    • +
    • map-tracking.json: coverage state with confidence scores

    This pass forces deep code reading. No shortcuts, no code -intelligence tools — the agent reads every module it analyzes. +intelligence tools; the agent reads every module it analyzes. That forced reading is what makes the subsequent passes useful.

    When to run: First time on a codebase, or after significant structural changes (new packages, moved files, changed @@ -1213,7 +1213,7 @@

    Pass 2: Enrich with Code Intellige
  • Domain clustering validation
  • Registration site discovery
  • -

    This pass does not replace reading — it quantifies what reading +

    This pass does not replace reading; it quantifies what reading found. If Pass 1 says "module X depends on module Y," Pass 2 says "module X has 47 callers in module Y, and changing function Z would affect 12 downstream consumers."

    @@ -1242,7 +1242,7 @@

    Pass 3: Hunt for Failure ModesWhat You GetTips

    • Run Pass 1 with focus areas if the codebase is large. - The skill asks what to go deep on — name the modules you're + The skill asks what to go deep on, so name the modules you're about to change.
    • You don't need all three passes every time. Pass 1 is the foundation. Pass 2 and 3 are for when you need @@ -1308,9 +1308,9 @@

      Tips&par

    See Also

    See also: Detecting and Fixing Context Drift -— keep architecture artifacts fresh between deep-dive sessions.

    +to keep architecture artifacts fresh between deep-dive sessions.

    See also: Detecting and Fixing Context Drift -— structural checks that complement architecture analysis.

    +for structural checks that complement architecture analysis.

    diff --git a/site/recipes/configuration-profiles/index.html b/site/recipes/configuration-profiles/index.html index 99633172f..9d4e10b2e 100644 --- a/site/recipes/configuration-profiles/index.html +++ b/site/recipes/configuration-profiles/index.html @@ -2002,10 +2002,10 @@

    Typical WorkflowCustomizing Profiles

    Edit the source files directly:

      -
    • .ctxrc.dev -- add any .ctxrc keys you want active during +
    • .ctxrc.dev: add any .ctxrc keys you want active during development (e.g., log_level: debug, notify.events, notify.webhook_url).
    • -
    • .ctxrc.base -- keep this minimal. It represents your +
    • .ctxrc.base: keep this minimal. It represents your "production" defaults.

    After editing a source file, re-run ctx config switch <profile> diff --git a/site/recipes/hook-sequence-diagrams/index.html b/site/recipes/hook-sequence-diagrams/index.html index b60034a29..7a49f28ff 100644 --- a/site/recipes/hook-sequence-diagrams/index.html +++ b/site/recipes/hook-sequence-diagrams/index.html @@ -1330,7 +1330,7 @@

    Hook Sequence Diagrams

    ctx

    Hook Lifecycle

    -

    This page documents the ctx system hooks — the built-in +

    This page documents the ctx system hooks: the built-in ctx system * subcommands that Claude Code invokes via .claude/hooks.json at lifecycle events. These are owned by ctx itself, not authored by users.

    @@ -1338,15 +1338,15 @@

    Hook LifecycleNot to Be Confused with ctx trigger

    ctx has three distinct hook-like layers:

    This page is only about the first category.

    diff --git a/site/recipes/hub-cluster/index.html b/site/recipes/hub-cluster/index.html index 68bb5199f..8d52aa738 100644 --- a/site/recipes/hub-cluster/index.html +++ b/site/recipes/hub-cluster/index.html @@ -1389,7 +1389,7 @@ - Step 1 — Bootstrap the First Node + Step 1: Bootstrap the First Node @@ -1400,7 +1400,7 @@ - Step 2 — Start the Other Nodes + Step 2: Start the Other Nodes @@ -1411,7 +1411,7 @@ - Step 3 — Verify Cluster State + Step 3: Verify Cluster State @@ -1422,7 +1422,7 @@ - Step 4 — Register Clients with Failover Peers + Step 4: Register Clients with Failover Peers @@ -1817,7 +1817,7 @@ - Step 1 — Bootstrap the First Node + Step 1: Bootstrap the First Node @@ -1828,7 +1828,7 @@ - Step 2 — Start the Other Nodes + Step 2: Start the Other Nodes @@ -1839,7 +1839,7 @@ - Step 3 — Verify Cluster State + Step 3: Verify Cluster State @@ -1850,7 +1850,7 @@ - Step 4 — Register Clients with Failover Peers + Step 4: Register Clients with Failover Peers @@ -1992,7 +1992,7 @@

    ctx Hub: High-Availabili

    This recipe assumes you've read the ctx Hub overview and the Multi-machine setup. HA only makes -sense in the "small trusted team" story — a personal +sense in the "small trusted team" story; a personal cross-project brain on one workstation does not need three Raft peers.

    @@ -2008,7 +2008,7 @@

    ctx Hub: High-Availabili that write can be lost. Do not use the hub as a bank ledger.

    Topology

    -

    A minimum HA cluster is three nodes. Two is worse than one — +

    A minimum HA cluster is three nodes. Two is worse than one: it doubles failure probability without providing quorum.

             +-------------+
              |  client(s)  |
    @@ -2025,13 +2025,13 @@ 

    Topology Raft (leader election) gRPC (data sync)

    -

    Step 1 — Bootstrap the First Node

    +

    Step 1: Bootstrap the First Node

    ctx hub start --daemon \
       --port 9900 \
       --peers hub-b.lan:9900,hub-c.lan:9900
     

    The node starts a Raft election as soon as it sees its peers.

    -

    Step 2 — Start the Other Nodes

    +

    Step 2: Start the Other Nodes

    On hub-b.lan:

    ctx hub start --daemon \
       --port 9900 \
    @@ -2044,7 +2044,7 @@ 

    Step 2 — Start the Other NodesStep 3 — Verify Cluster State

    +

    Step 3: Verify Cluster State

    From any node:

    ctx hub status
     
    @@ -2056,7 +2056,7 @@

    Step 3 — Verify Cluster Stateentries: 1248 uptime: 3h42m

    -

    Step 4 — Register Clients with Failover Peers

    +

    Step 4: Register Clients with Failover Peers

    When registering a client, give it the full peer list:

    ctx connection register hub-a.lan:9900 \
       --token ctx_adm_... \
    @@ -2114,11 +2114,11 @@ 

    Failure Modes at a GlanceHub failure modes.

    See Also

    diff --git a/site/recipes/hub-getting-started/index.html b/site/recipes/hub-getting-started/index.html index 8da3f4440..b8e3010f8 100644 --- a/site/recipes/hub-getting-started/index.html +++ b/site/recipes/hub-getting-started/index.html @@ -1291,7 +1291,7 @@ - Step 1 — Start the Hub + Step 1: Start the Hub @@ -1302,7 +1302,7 @@ - Step 2 — Register the First Project + Step 2: Register the First Project @@ -1313,7 +1313,7 @@ - Step 3 — Choose What to Receive + Step 3: Choose What to Receive @@ -1324,7 +1324,7 @@ - Step 4 — Publish a Decision + Step 4: Publish a Decision @@ -1335,7 +1335,7 @@ - Step 5 — Register a Second Project and Sync + Step 5: Register a Second Project and Sync @@ -1346,7 +1346,7 @@ - Step 6 — Watch Entries Arrive Live + Step 6: Watch Entries Arrive Live @@ -1357,7 +1357,7 @@ - Step 7 — Feed Shared Knowledge into the Agent + Step 7: Feed Shared Knowledge into the Agent @@ -1872,7 +1872,7 @@ - Step 1 — Start the Hub + Step 1: Start the Hub @@ -1883,7 +1883,7 @@ - Step 2 — Register the First Project + Step 2: Register the First Project @@ -1894,7 +1894,7 @@ - Step 3 — Choose What to Receive + Step 3: Choose What to Receive @@ -1905,7 +1905,7 @@ - Step 4 — Publish a Decision + Step 4: Publish a Decision @@ -1916,7 +1916,7 @@ - Step 5 — Register a Second Project and Sync + Step 5: Register a Second Project and Sync @@ -1927,7 +1927,7 @@ - Step 6 — Watch Entries Arrive Live + Step 6: Watch Entries Arrive Live @@ -1938,7 +1938,7 @@ - Step 7 — Feed Shared Knowledge into the Agent + Step 7: Feed Shared Knowledge into the Agent @@ -2055,13 +2055,13 @@

    ctx Hub: Getting Started

    Stand up a single-node ctx Hub on localhost, register two projects, publish a decision from one, and see it appear in the -other — all in under five minutes.

    +other, all in under five minutes.

    Read This First

    If you haven't already, skim the ctx Hub overview. It explains the mental model, names the two user stories (personal vs small -team), and — importantly — lists what the hub does not do. +team), and (importantly) lists what the hub does not do. This recipe assumes you already know you want the feature.

    What You'll Get out of This Recipe

    @@ -2075,7 +2075,7 @@

    What You'll Get out of This Recipe

    Concretely, the payoff this unlocks: a lesson you record in one project becomes visible to your agent the next time you open -another project — without touching local files in the second +another project, without touching local files in the second project or opening another editor window.

    What This Recipe Does Not Cover

      @@ -2084,9 +2084,9 @@

      What This Recipe Does Not Cove convention, and task entries. Everything else stays local.
    • Multi-user attribution. The hub identifies projects, not people.
    • -
    • Running over a LAN — see +
    • Running over a LAN; see Multi-machine setup.
    • -
    • Redundancy — see HA cluster.
    • +
    • Redundancy; see HA cluster.

    Prerequisites

    -

    Step 1 — Start the Hub

    +

    Step 1: Start the Hub

    In a dedicated terminal:

    ctx hub start
     

    On first run, the hub generates an admin token and prints it to -stdout. Copy it — you'll need it for each project registration:

    +stdout. Copy it; you'll need it for each project registration:

    ctx hub listening on :9900
     admin token: ctx_adm_7f3a1c2d...
     data dir: ~/.ctx/hub-data/
     

    The admin token is written to ~/.ctx/hub-data/admin.token so you can recover it later. Treat it like a password.

    -

    Step 2 — Register the First Project

    +

    Step 2: Register the First Project

    cd ~/projects/alpha
     ctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...
     
    @@ -2114,12 +2114,12 @@

    Step 2 — Register the First Project .context/.connect.enc. The admin token is exchanged for a per-project client token; the admin token itself is never persisted in the project.

    -

    Step 3 — Choose What to Receive

    +

    Step 3: Choose What to Receive

    ctx connection subscribe decision learning convention
     

    Only the entry types you subscribe to will be delivered by sync and listen.

    -

    Step 4 — Publish a Decision

    +

    Step 4: Publish a Decision

    Either use ctx add --share to write locally and push to the ctx Hub:

    ctx add decision "Use UTC timestamps everywhere" --share \
       --context "We had timezone drift between the API and journal" \
    @@ -2129,7 +2129,7 @@ 

    Step 4 — Publish a Decision
    ctx connection publish decision "Use UTC timestamps everywhere"
     

    -

    Step 5 — Register a Second Project and Sync

    +

    Step 5: Register a Second Project and Sync

    cd ~/projects/beta
     ctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...
     ctx connection subscribe decision learning convention
    @@ -2138,13 +2138,13 @@ 

    Step 5 — Register a Second

    The decision from alpha now appears in ~/projects/beta/.context/hub/decisions.md with an origin tag and timestamp.

    -

    Step 6 — Watch Entries Arrive Live

    +

    Step 6: Watch Entries Arrive Live

    Instead of re-running sync, stream new entries as they land:

    ctx connection listen
     

    Leave this running in a terminal; every --share publish from any registered project will appear in .context/hub/ immediately.

    -

    Step 7 — Feed Shared Knowledge into the Agent

    +

    Step 7: Feed Shared Knowledge into the Agent

    Once entries exist in .context/hub/, include them in the agent context packet:

    ctx agent --include-hub
    diff --git a/site/recipes/hub-multi-machine/index.html b/site/recipes/hub-multi-machine/index.html
    index 736db2c04..a794d5382 100644
    --- a/site/recipes/hub-multi-machine/index.html
    +++ b/site/recipes/hub-multi-machine/index.html
    @@ -1359,7 +1359,7 @@
       
         
           
    -        Step 1 — Start the Daemon on the LAN Host
    +        Step 1: Start the Daemon on the LAN Host
           
         
       
    @@ -1370,7 +1370,7 @@
       
         
           
    -        Step 2 — Firewall and Port
    +        Step 2: Firewall and Port
           
         
       
    @@ -1381,7 +1381,7 @@
       
         
           
    -        Step 3 — Retrieve the Admin Token
    +        Step 3: Retrieve the Admin Token
           
         
       
    @@ -1392,7 +1392,7 @@
       
         
           
    -        Step 4 — Register Projects from Each Workstation
    +        Step 4: Register Projects from Each Workstation
           
         
       
    @@ -1403,7 +1403,7 @@
       
         
           
    -        Step 5 — Verify
    +        Step 5: Verify
           
         
       
    @@ -1817,7 +1817,7 @@
       
         
           
    -        Step 1 — Start the Daemon on the LAN Host
    +        Step 1: Start the Daemon on the LAN Host
           
         
       
    @@ -1828,7 +1828,7 @@
       
         
           
    -        Step 2 — Firewall and Port
    +        Step 2: Firewall and Port
           
         
       
    @@ -1839,7 +1839,7 @@
       
         
           
    -        Step 3 — Retrieve the Admin Token
    +        Step 3: Retrieve the Admin Token
           
         
       
    @@ -1850,7 +1850,7 @@
       
         
           
    -        Step 4 — Register Projects from Each Workstation
    +        Step 4: Register Projects from Each Workstation
           
         
       
    @@ -1861,7 +1861,7 @@
       
         
           
    -        Step 5 — Verify
    +        Step 5: Verify
           
         
       
    @@ -1990,12 +1990,12 @@ 

    ctx Hub: Multi-Machine shape described in the -ctx Hub overview — read that first if +ctx Hub overview; read that first if you haven't, especially the trust-model warnings.

    This recipe assumes you've already walked through Getting Started and understand what flows through the hub (decisions, learnings, conventions, -tasks — not journals, scratchpad, or raw context files).

    +tasks, not journals, scratchpad, or raw context files).

    Topology

    +------------------+        +------------------+
     | workstation A    |        | workstation B    |
    @@ -2012,7 +2012,7 @@ 

    Topology | :9900 | +-------------------+

    -

    Step 1 — Start the Daemon on the LAN Host

    +

    Step 1: Start the Daemon on the LAN Host

    On the machine that will hold the hub (call it nexus):

    ctx hub start --daemon --port 9900
     
    @@ -2020,8 +2020,8 @@

    Step 1 — Start the Daemon on later with:

    ctx hub stop
     
    -

    Step 2 — Firewall and Port

    -

    Open port 9900/tcp on nexus to the LAN only — never expose +

    Step 2: Firewall and Port

    +

    Open port 9900/tcp on nexus to the LAN only. Never expose the hub to the public internet without a reverse proxy and TLS in front of it (see Hub security model).

    Typical LAN allowlist rules:

    @@ -2044,14 +2044,14 @@

    Step 2 — Firewall and PortStep 3 — Retrieve the Admin Token

    +

    Step 3: Retrieve the Admin Token

    The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead:

    cat ~/.ctx/hub-data/admin.token
     

    Copy the token over a trusted channel (SSH, password manager, or an encrypted note). Do not email it or put it in chat.

    -

    Step 4 — Register Projects from Each Workstation

    +

    Step 4: Register Projects from Each Workstation

    On workstation A:

    cd ~/projects/x
     ctx connection register nexus.local:9900 --token ctx_adm_...
    @@ -2066,7 +2066,7 @@ 

    Step 4 — Register Proj client token. Only the client token is persisted in .context/.connect.enc, encrypted with the same AES-256-GCM scheme ctx uses for notification credentials.

    -

    Step 5 — Verify

    +

    Step 5: Verify

    From either workstation:

    ctx connection status
     
    @@ -2090,14 +2090,14 @@ -

    The hub is append-only JSONL — restarts are safe. Clients keep +

    The hub is append-only JSONL, so restarts are safe. Clients keep their last-seen sequence in .context/hub/.sync-state.json and pick up exactly where they left off on the next sync or listen reconnect.

    See Also

    diff --git a/site/recipes/hub-overview/index.html b/site/recipes/hub-overview/index.html index b3834e310..05e1feaf0 100644 --- a/site/recipes/hub-overview/index.html +++ b/site/recipes/hub-overview/index.html @@ -2022,8 +2022,8 @@

    ctx

    ctx Hub: Overview

    Start here before the other hub recipes. This page answers what -the hub is, who it's for, why you'd run one, and — -equally important — what it is not.

    +the hub is, who it's for, why you'd run one, and, +equally important, what it is not.

    Mental Model in One Paragraph

    The hub is a fan-out channel for structured knowledge entries across projects. When you publish a decision, learning, @@ -2075,23 +2075,23 @@

    What Does Not Flow through stay local. The hub does not sync your AI session history.
  • Scratchpad (.context/pad) stays local. Encrypted notes never leave the machine they were written on.
  • -
  • Local context files as a whole — TASKS.md, - DECISIONS.md, LEARNINGS.md, CONVENTIONS.md — are not +
  • Local context files as a whole (TASKS.md, + DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) are not mirrored wholesale. Only entries you explicitly --share, or publish later with ctx connection publish, cross the boundary.
  • Anything under .context/ that isn't one of the four entry types above. Configuration, state, logs, memory, journal - metadata — all local.
  • + metadata: all local.

    If you were expecting "now my agent in project B can see everything my agent did in project A," that's not this feature. Local session density still lives on the local machine.

    Two User Stories

    The hub makes sense in two different shapes. Pick the one that -matches your situation — the mechanics are identical but the +matches your situation; the mechanics are identical but the trust model and threat surface are very different.

    Story 1: Personal Cross-Project Brain

    -

    One developer, many projects, one hub — usually on localhost.

    +

    One developer, many projects, one hub, usually on localhost.

    You're working across several projects on the same machine (or a handful of machines you own). You want a lesson learned debugging project A to show up when you open project B a week @@ -2107,7 +2107,7 @@

    Story 1: Personal Cross-Project Br
  • Cross-project conventions (e.g., "use UTC timestamps everywhere") live in one place and propagate.
  • -

    Trust model: high — you trust every participant because every +

    Trust model: high, because you trust every participant since every participant is you. Run the hub on localhost or on your own LAN, use the default single-node setup, don't worry about TLS.

    Start here: @@ -2177,17 +2177,17 @@

    When Not to Use It
  • Wholesale journal sharing. See "what does not flow" above. If that's what you want, this feature won't provide - it — talk to us in the issue tracker about what would.
  • + it. Talk to us in the issue tracker about what would.

    How Entries Reach Your Agent

    Once a project is registered and subscribed, entries arrive by three mechanisms:

      -
    1. ctx connection sync — an on-demand pull, replays +
    2. ctx connection sync: an on-demand pull, replays everything new since the last sequence you saw.
    3. -
    4. ctx connection listen — a long-lived gRPC stream that +
    5. ctx connection listen: a long-lived gRPC stream that writes new entries to .context/hub/ as they arrive.
    6. -
    7. check-hub-sync hook — runs at session start, daily +
    8. check-hub-sync hook: runs at session start, daily throttled, so most users never call sync manually.

    Once entries exist in .context/hub/, ctx agent diff --git a/site/recipes/hub-personal/index.html b/site/recipes/hub-personal/index.html index 5682c96ec..2b8420c04 100644 --- a/site/recipes/hub-personal/index.html +++ b/site/recipes/hub-personal/index.html @@ -1311,7 +1311,7 @@ - 09:00 — Start Work on api + 09:00 - Start Work on api @@ -1322,7 +1322,7 @@ - 10:30 — You Discover a Gotcha + 10:30 - You Discover a Gotcha @@ -1333,7 +1333,7 @@ - 12:00 — You Switch to cli + 12:00 - You Switch to cli @@ -1344,7 +1344,7 @@ - 14:00 — You Codify a Convention + 14:00 - You Codify a Convention @@ -1355,7 +1355,7 @@ - 16:30 — End of Day + 16:30 - End of Day @@ -1857,7 +1857,7 @@ - 09:00 — Start Work on api + 09:00 - Start Work on api @@ -1868,7 +1868,7 @@ - 10:30 — You Discover a Gotcha + 10:30 - You Discover a Gotcha @@ -1879,7 +1879,7 @@ - 12:00 — You Switch to cli + 12:00 - You Switch to cli @@ -1890,7 +1890,7 @@ - 14:00 — You Codify a Convention + 14:00 - You Codify a Convention @@ -1901,7 +1901,7 @@ - 16:30 — End of Day + 16:30 - End of Day @@ -2044,24 +2044,24 @@

    ctx

    Personal Cross-Project Brain

    This recipe shows how one developer uses a ctx Hub -across their own projects day-to-day — the "Story 1" +across their own projects day-to-day, the "Story 1" shape from the Hub overview. You're not setting up infrastructure for a team; you're making a lesson you learned last Tuesday in project A automatically surface when you open project B next Thursday.

    Prerequisites: a working ctx Hub on localhost (see Getting Started for the -~5-minute setup). This recipe assumes the hub is already +roughly five-minute setup). This recipe assumes the hub is already running and you've registered at least two projects.

    The Core Loop

    Every day, the same three verbs matter:

      -
    1. Record — notice a decision, learning, or +
    2. Record: notice a decision, learning, or convention and capture it with ctx add --share.
    3. -
    4. Subscribe — every project you care about is +
    5. Subscribe: every project you care about is subscribed to the types you want delivered (set once with ctx connection subscribe).
    6. -
    7. Load — your agent picks up shared entries on next +
    8. Load: your agent picks up shared entries on next session start via the auto-sync hook, or explicitly via ctx agent --include-hub.
    @@ -2070,19 +2070,19 @@

    The Core LoopA Realistic Day

    You have three projects on your workstation:

      -
    • ~/projects/api — a Go service you're actively +
    • ~/projects/api, a Go service you're actively developing
    • -
    • ~/projects/cli — a companion CLI that consumes the +
    • ~/projects/cli, a companion CLI that consumes the API
    • -
    • ~/projects/dotfiles — your personal conventions and +
    • ~/projects/dotfiles, your personal conventions and cross-project learnings

    All three are registered with a single hub running on localhost:9900 (started once at boot, or via a systemd -user unit — see Hub operations). +user unit; see Hub operations). All three subscribe to decision, learning, and convention.

    -

    09:00 — Start Work on api

    +

    09:00 - Start Work on api

    You cd ~/projects/api and start a Claude Code session. Behind the scenes, the plugin's PreToolUse hook calls ctx agent --budget 8000 --include-hub before the first @@ -2095,7 +2095,7 @@

    09:00 — Start Work on apiSo the "use UTC timestamps everywhere" decision you recorded in dotfiles last week is already in Claude's context for this session, without any manual sync.

    -

    10:30 — You Discover a Gotcha

    +

    10:30 - You Discover a Gotcha

    While debugging, you find that the API's retry loop silently drops the last error when the transport times out. This is the kind of thing you'd normally add to @@ -2118,7 +2118,7 @@

    10:30 — You Discover a Gotcha12:00 — You Switch to cli

    +

    12:00 - You Switch to cli

    cd ~/projects/cli, open a new session. The agent packet for cli now includes the learning you just recorded in api, because cli is subscribed to @@ -2126,11 +2126,11 @@

    12:00 — You Switch to clicli/.context/hub/learnings.md.

    You don't have to re-explain the retry-loop gotcha. Claude already sees it.

    -

    14:00 — You Codify a Convention

    +

    14:00 - You Codify a Convention

    You've been writing error messages in api and decided you want a consistent pattern: lowercase start, no trailing period, single-sentence. This is a convention, -not a decision — it applies to every Go project you +not a decision; it applies to every Go project you touch. Record it in dotfiles (since that's your "personal standards" project), and share it:

    cd ~/projects/dotfiles
    @@ -2142,7 +2142,7 @@ 

    14:00 — You Codify a Convention -

    16:30 — End of Day

    +

    16:30 - End of Day

    You didn't run ctx connection sync once. You didn't git push anything between projects. You didn't remember to tell your agent about the retry-loop gotcha @@ -2161,7 +2161,7 @@

    What the Workflow Actually Looks # Evening: nothing. Everything's already propagated.

    The hub is passive infrastructure. You never talk to -it directly — you talk through it by using --share +it directly; you talk through it by using --share on commands you were already running.

    Tips for Solo Use

    Pick a "standards" project. One of your projects @@ -2201,7 +2201,7 @@

    Tips for Solo UseMulti-machine recipe — it's +Multi-machine recipe; it's relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon.

    What This Recipe Is Not

    @@ -2210,7 +2210,7 @@

    What This Recipe Is NotGetting Started.

    Not a team guide. If you're sharing across humans, not just across your own projects, read -Team knowledge bus instead — the trust +Team knowledge bus instead; the trust model and operational concerns are different.

    Not production operations. For backup, log rotation, failure recovery, and HA, see @@ -2218,16 +2218,16 @@

    What This Recipe Is NotHub failure modes.

    See Also

    diff --git a/site/recipes/hub-team/index.html b/site/recipes/hub-team/index.html index fe4917382..ef32f7933 100644 --- a/site/recipes/hub-team/index.html +++ b/site/recipes/hub-team/index.html @@ -1318,7 +1318,7 @@ - Trust Model — Read This First + Trust Model: Read This First @@ -1806,7 +1806,7 @@ - Trust Model — Read This First + Trust Model: Read This First @@ -1988,7 +1988,7 @@

    ctx

    Team Knowledge Bus

    This recipe shows how a small trusted team uses a ctx -Hub as a shared knowledge bus — the "Story 2" shape +Hub as a shared knowledge bus, the "Story 2" shape from the Hub overview. You're not building a wiki, you're not replacing your issue tracker, and you're not running a multi-tenant service. You're @@ -2005,7 +2005,7 @@

    Team Knowledge BusTrust Model — Read This First

    +

    Trust Model: Read This First

    The hub assumes everyone holding a client token is friendly. There's no per-user attribution you can rely on, no read ACL beyond subscription filters, and Origin @@ -2036,11 +2036,11 @@

    The Team's Three Verbspersonal recipe, but with different social expectations:

      -
    1. Record — when you learn something that would save +
    2. Record: when you learn something that would save a teammate time, capture it with ctx add --share.
    3. -
    4. Subscribe — every engineer's project directories +
    5. Subscribe: every engineer's project directories subscribe to the types the team cares about.
    6. -
    7. Load — agents pick up shared entries automatically +
    8. Load: agents pick up shared entries automatically via the auto-sync hook and the --include-hub flag in the PreToolUse hook pipeline.
    @@ -2067,9 +2067,9 @@

    What Goes on the Hub (Team Rul

    Learnings:

    TL;DR

    /ctx-reflect               # surface items worth persisting
    diff --git a/site/recipes/multi-tool-setup/index.html b/site/recipes/multi-tool-setup/index.html
    index 4a2016f86..3474e805f 100644
    --- a/site/recipes/multi-tool-setup/index.html
    +++ b/site/recipes/multi-tool-setup/index.html
    @@ -2544,7 +2544,7 @@ 

    Tips&par

    ctx skills can leverage external MCP servers for web search and code intelligence. ctx works without them, but they significantly improve -agent behavior across sessions — the investment is small and the +agent behavior across sessions. The investment is small and the benefits compound. Skills like /ctx-code-review, /ctx-explain, and /ctx-refactor all become noticeably better with these tools connected.

    diff --git a/site/recipes/scratchpad-with-claude/index.html b/site/recipes/scratchpad-with-claude/index.html index 82ef1222e..98d693834 100644 --- a/site/recipes/scratchpad-with-claude/index.html +++ b/site/recipes/scratchpad-with-claude/index.html @@ -2413,7 +2413,7 @@

    Step 6: Clean Up Agent: [runs ctx pad rm 2] "Removed entry 2. 3 entries remaining. - (IDs are stable — remaining entries keep their IDs.)" + (IDs are stable; remaining entries keep their IDs.)"

    Step 7: Store a File as a Blob

    The scratchpad can hold small files (up to 64 KB) as encrypted blob entries. @@ -2478,7 +2478,7 @@

    Step 11: Tag Entries for Organizat " 1. check DNS propagation #later 3. review PR feedback #later #ci"

    -

    Entry IDs are stable — they don't shift when other entries are deleted, +

    Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry regardless of deletions or active filters. Use ctx pad normalize to reassign IDs as 1..N.

    Exclude a tag with ~:

    @@ -2493,7 +2493,7 @@

    Step 11: Tag Entries for Organizat later 2 urgent 1"

    -

    Tags work on blob entries too — they're extracted from the label:

    +

    Tags work on blob entries too; they're extracted from the label:

    ctx pad add "deploy config #prod" --file ./deploy.yaml
     ctx pad --tag prod
     #   1. deploy config #prod [BLOB]
    diff --git a/site/recipes/steering/index.html b/site/recipes/steering/index.html
    index 246f54396..71194a95f 100644
    --- a/site/recipes/steering/index.html
    +++ b/site/recipes/steering/index.html
    @@ -1282,7 +1282,7 @@
       
         
           
    -        Start Here — Customize the Foundation Files
    +        Start Here: Customize the Foundation Files
           
         
       
    @@ -1304,7 +1304,7 @@
       
         
           
    -        Step 1 — Scaffold the File
    +        Step 1: Scaffold the File
           
         
       
    @@ -1315,7 +1315,7 @@
       
         
           
    -        Step 2 — Fill in the Rule
    +        Step 2: Fill in the Rule
           
         
       
    @@ -1326,7 +1326,7 @@
       
         
           
    -        Step 3 — Preview Which Prompts Match
    +        Step 3: Preview Which Prompts Match
           
         
       
    @@ -1337,7 +1337,7 @@
       
         
           
    -        Step 4 — List to Confirm Metadata
    +        Step 4: List to Confirm Metadata
           
         
       
    @@ -1348,19 +1348,19 @@
       
         
           
    -        Step 5 — Get the Rules in Front of the AI
    +        Step 5: Get the Rules in Front of the AI
           
         
       
       
    -    
    -

    Start Here — Customize the Foundation Files

    +

    Start Here: Customize the Foundation Files

    ctx init scaffolds four foundation steering files for you the first time you initialize a project:

    @@ -2114,22 +2114,22 @@

    Start Here — Customize the and the tools scope. The comment is invisible in rendered markdown but visible when you edit the file. Delete it once the file is yours.

    -

    All four default to inclusion: always and priority: 10 -— they fire on every AI tool call until you customize +

    All four default to inclusion: always and priority: 10, +so they fire on every AI tool call until you customize them. If you're reading this recipe and haven't touched them yet, open each one now and replace the placeholder bullet list with actual rules for your project. That's the highest-leverage five minutes you can spend in a new ctx setup.

    What to fill in, by file:

    -

    product.md — The elevator pitch plus hard scope:

    +

    product.md: The elevator pitch plus hard scope:

    • One-sentence product description.
    • Primary users and their top job-to-be-done.
    • Two or three "this is explicitly out of scope" items so the AI doesn't wander.
    -

    tech.md — Technology and constraints:

    +

    tech.md: Technology and constraints:

    • Languages and versions (Go 1.22, Node 20, etc.).
    • Frameworks and key libraries.
    • @@ -2138,13 +2138,13 @@

      Start Here — Customize the "no external DB for unit tests". These are the things that burn agents when they don't know them.

    -

    structure.md — Layout and naming:

    +

    structure.md: Layout and naming:

    • Top-level directories and their purpose.
    • Where new files should go (and where they should NOT).
    • Naming conventions for packages, files, types.
    -

    workflow.md — Process rules:

    +

    workflow.md: Process rules:

    • Branch strategy (main-only, trunk-based, feature branches).
    • @@ -2154,7 +2154,7 @@

      Start Here — Customize the

    After editing, the next AI tool call in Claude Code will pick up the new rules automatically via the plugin's -PreToolUse hook — no sync step, no restart. Other tools +PreToolUse hook, with no sync step and no restart. Other tools (Cursor, Cline, Kiro) need ctx steering sync to export into their native format.

    -

    Step 1 — Scaffold the File

    +

    Step 1: Scaffold the File

    ctx steering add api-validation
     

    That creates .context/steering/api-validation.md with default @@ -2212,7 +2212,7 @@

    Step 1 — Scaffold the FileStep 2 — Fill in the Rule

    +

    Step 2: Fill in the Rule

    Open the file and write the rule body plus a focused description. The description is what inclusion: auto matches against later.

    @@ -2238,16 +2238,16 @@

    Step 2 — Fill in the Ruleauto matcher scores +
  • Description is keyword-rich ("HTTP handler input + validation and request parsing"); the auto matcher scores prompts against these words.
  • -

    Step 3 — Preview Which Prompts Match

    +

    Step 3: Preview Which Prompts Match

    Before committing the file, validate your description catches the prompts you care about:

    ctx steering preview "add an endpoint for updating user email"
    @@ -2256,24 +2256,24 @@ 

    Step 3 — Preview Which Prompts Mat
    Steering files matching prompt "add an endpoint for updating user email":
       api-validation       inclusion=auto     priority=20  tools=all
     
    -

    Good — the prompt matches. Try a negative case:

    +

    Good, the prompt matches. Try a negative case:

    ctx steering preview "fix a bug in the JSON renderer"
     

    Expected: empty match (or whatever else is currently auto). If api-validation incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords.

    -

    Step 4 — List to Confirm Metadata

    +

    Step 4: List to Confirm Metadata

    ctx steering list
     

    Should show api-validation alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos.

    -

    Step 5 — Get the Rules in Front of the AI

    +

    Step 5: Get the Rules in Front of the AI

    Steering files are authored once in .context/steering/, but how they reach the AI depends on which tool you use. There are two delivery mechanisms:

    -

    Path A — Native-Rules Tools (Cursor, Cline, Kiro)

    +

    Path A: Native-Rules Tools (Cursor, Cline, Kiro)

    These tools read a specific directory for rules. ctx steering sync exports your files into that directory with tool-specific frontmatter:

    @@ -2302,11 +2302,11 @@

    Path A — Native-Rules Too

    -

    The sync is idempotent — unchanged files are skipped. Run +

    The sync is idempotent; unchanged files are skipped. Run it whenever you edit a steering file.

    -

    Path B — Claude Code and Codex (Hook + MCP)

    +

    Path B: Claude Code and Codex (Hook + MCP)

    Claude Code and Codex have no native rules primitive, -so ctx steering sync is a no-op for them — it +so ctx steering sync is a no-op for them; it deliberately skips both. Instead, steering reaches these tools through two non-sync channels:

      @@ -2331,17 +2331,17 @@

      Path B — Claude Code and Codex

    That installs the plugin, wires the hook, and registers the MCP server. After that, steering files you edit are picked -up on the next tool call — no sync step needed.

    +up on the next tool call, with no sync step needed.

    Running ctx steering sync with Claude Code

    -

    It won't error — it will simply report that Claude and +

    It won't error; it will simply report that Claude and Codex aren't sync targets and skip them. If Claude Code is your only tool, you never need to run sync. If you use both Claude Code and (say) Cursor, run sync to keep Cursor up to date; the Claude pipeline takes care of itself via the hook.

    -

    Step 6 — Verify the AI Sees It

    +

    Step 6: Verify the AI Sees It

    Open your AI tool and ask it something the rule should fire on:

    @@ -2349,20 +2349,20 @@

    Step 6 — Verify the AI Sees ItStep 6 — Verify the AI Sees It + context packet; tighten the description keywords.

    Common Mistakes

    Too-generic descriptions. description: general coding @@ -2398,11 +2398,11 @@

    Common MistakesSee Also

    diff --git a/site/recipes/task-management/index.html b/site/recipes/task-management/index.html index 27267966a..4fb8b5e3c 100644 --- a/site/recipes/task-management/index.html +++ b/site/recipes/task-management/index.html @@ -2311,7 +2311,7 @@

    The ProblemPrefer Skills over Raw Commands

    When working with an AI agent, use /ctx-task-add instead of raw ctx add task. The agent automatically picks up session ID, branch, -and commit hash from its context — no manual flags needed.

    +and commit hash from its context, so no manual flags are needed.

    TL;DR

    Manage Tasks:

    diff --git a/site/recipes/triggers/index.html b/site/recipes/triggers/index.html index f9e426084..7c9b4397e 100644 --- a/site/recipes/triggers/index.html +++ b/site/recipes/triggers/index.html @@ -1323,7 +1323,7 @@ - Step 1 — Scaffold the Script + Step 1: Scaffold the Script @@ -1334,7 +1334,7 @@ - Step 2 — Write the Logic + Step 2: Write the Logic @@ -1345,7 +1345,7 @@ - Step 3 — Test with a Mock Payload + Step 3: Test with a Mock Payload @@ -1356,7 +1356,7 @@ - Step 4 — Enable It + Step 4: Enable It @@ -1367,7 +1367,7 @@ - Step 5 — Iterate Safely + Step 5: Iterate Safely @@ -1856,7 +1856,7 @@ - Step 1 — Scaffold the Script + Step 1: Scaffold the Script @@ -1867,7 +1867,7 @@ - Step 2 — Write the Logic + Step 2: Write the Logic @@ -1878,7 +1878,7 @@ - Step 3 — Test with a Mock Payload + Step 3: Test with a Mock Payload @@ -1889,7 +1889,7 @@ - Step 4 — Enable It + Step 4: Enable It @@ -1900,7 +1900,7 @@ - Step 5 — Iterate Safely + Step 5: Iterate Safely @@ -2093,8 +2093,8 @@

    ScenarioStep 1 — Scaffold the Script

    +edits have caused outages before, and you want a hard gate.

    +

    Step 1: Scaffold the Script

    ctx trigger add pre-tool-use protect-crypto
     

    That creates .context/hooks/pre-tool-use/protect-crypto.sh @@ -2114,11 +2114,11 @@

    Step 1 — Scaffold the Script# Return a JSON result. action can be "allow", "block", or absent. echo '{"action": "allow"}'

    -

    Note: the directory is .context/hooks/pre-tool-use/ — the +

    Note: the directory is .context/hooks/pre-tool-use/; the on-disk layout still uses hooks/ even though the command is ctx trigger. If you ls .context/hooks/, that's where your triggers live.

    -

    Step 2 — Write the Logic

    +

    Step 2: Write the Logic

    Open the file and replace the template body:

    #!/usr/bin/env bash
     set -euo pipefail
    @@ -2152,17 +2152,17 @@ 

    Step 2 — Write the Logic

    A few things to note:

      -
    • set -euo pipefail — any unhandled error aborts the +
    • set -euo pipefail: any unhandled error aborts the script. Critical for a security-relevant trigger.
    • -
    • Quote everything from jq — the path field comes from +
    • Quote everything from jq: the path field comes from the AI tool; treat it as untrusted input.
    • -
    • Explicit allow case — the default is allow. An +
    • Explicit allow case: the default is allow. An empty or missing response is a risky default.
    • -
    • Use jq -n --arg for output construction — safer than +
    • Use jq -n --arg for output construction, as it is safer than string concatenation when the message may contain special characters.
    -

    Step 3 — Test with a Mock Payload

    +

    Step 3: Test with a Mock Payload

    Before enabling the trigger, test it with a realistic mock input using ctx trigger test. This runs the script against a synthetic JSON payload without actually firing any AI tool.

    @@ -2182,7 +2182,7 @@

    Step 3 — Test with a Mock PayloadIf any of these cases misbehave, fix the trigger before enabling it. The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions.

    -

    Step 4 — Enable It

    +

    Step 4: Enable It

    Once the test cases pass, enable the trigger:

    ctx trigger enable protect-crypto
     
    @@ -2193,7 +2193,7 @@

    Step 4 — Enable ItStep 5 — Iterate Safely

    +

    Step 5: Iterate Safely

    If you discover a bug after enabling, disable first, fix second:

    ctx trigger disable protect-crypto
    @@ -2201,7 +2201,7 @@ 

    Step 5 — Iterate Safelyctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go ctx trigger enable protect-crypto

    -

    Disabling simply clears the executable bit — the script stays +

    Disabling simply clears the executable bit; the script stays on disk, and ctx trigger enable re-enables it without rewriting anything.

    Patterns Worth Copying

    @@ -2217,7 +2217,7 @@

    Logging, Not BlockingContext Injection at Session Start

    A session-start trigger can prepend text to the agent's initial prompt by emitting {"action":"inject", "content": "..."} -— useful for injecting daily standup notes, open PRs, or +. This is useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file.

    Chaining Triggers of the Same Type

    Multiple scripts in the same type directory all run. If any @@ -2239,16 +2239,16 @@

    Common MistakesSee Also

    diff --git a/site/reference/audit-conventions/index.html b/site/reference/audit-conventions/index.html index 0b6e883e0..699a262af 100644 --- a/site/reference/audit-conventions/index.html +++ b/site/reference/audit-conventions/index.html @@ -2043,7 +2043,7 @@

    Magic Strings } } -

    Before (format verbs — also caught):

    +

    Before (format verbs, also caught):

    func EntryHash(text string) string {
         h := sha256.Sum256([]byte(text))
         return fmt.Sprintf("%x", h[:8])
    @@ -2055,7 +2055,7 @@ 

    Magic Strings return hex.EncodeToString(h[:cfgFmt.HashPrefixLen]) }

    -

    Before (URL schemes — also caught):

    +

    Before (URL schemes, also caught):

    if strings.HasPrefix(target, "https://") ||
         strings.HasPrefix(target, "http://") {
         return target
    @@ -2221,7 +2221,7 @@ 

    Dead Exports

    Rule: If a test-only allowlist entry is needed (the export exists only for test use), add the fully qualified symbol to testOnlyExports -in dead_exports_test.go. Keep this list small — prefer eliminating +in dead_exports_test.go. Keep this list small; prefer eliminating the export.


    Core Package Structure

    @@ -2231,7 +2231,7 @@

    Core Package Structurecore/ from becoming a god package.

    Before:

    internal/cli/dep/core/
    -    go.go           # violation — logic at core/ level
    +    go.go           # violation: logic at core/ level
         python.go       # violation
         node.go         # violation
         types.go        # violation
    @@ -2430,7 +2430,7 @@ 

    Magic Numeric Values entries = entries[:config.MaxEntries] }

    -

    Exempt: 0, 1, -1, 210, strconv radix/bitsize args +

    Exempt: 0, 1, -1, 2-10, strconv radix/bitsize args (10, 32, 64 in strconv.Parse*/Format*), octal permissions (caught separately by TestNoRawPermissions), and const/var definition sites.

    @@ -2481,7 +2481,7 @@

    Predicate Naming (No IsisValid in a local context is fine). This convention applies to exported methods and package-level functions. See CONVENTIONS.md "Predicates" section.

    -

    This is not yet enforced by an AST test — it requires semantic +

    This is not yet enforced by an AST test; it requires semantic understanding of return types and naming intent that makes automated detection fragile. Apply during code review.


    @@ -2492,7 +2492,7 @@

    Mixed Visibility
    load.go
         func Load() { ... }        // exported
    -    func parseHeader() { ... } // unexported — violation
    +    func parseHeader() { ... } // unexported, violation
     

    After:

    load.go
    @@ -2520,7 +2520,7 @@ 

    CLI Cmd Structure
    internal/cli/doctor/cmd/root/
         cmd.go
         run.go
    -    format.go   # violation — helper in cmd dir
    +    format.go   # violation: helper in cmd dir
     

    After:

    internal/cli/doctor/cmd/root/
    @@ -2538,10 +2538,10 @@ 

    DescKey NamespaceYAML Examples / Registry Linkageconfig/entry/.


    Other Enforced Patterns

    -

    These tests follow the same fix approach — extract the operation to +

    These tests follow the same fix approach: extract the operation to its designated package:

    diff --git a/site/reference/comparison/index.html b/site/reference/comparison/index.html index 944755326..d0078e4bc 100644 --- a/site/reference/comparison/index.html +++ b/site/reference/comparison/index.html @@ -1932,22 +1932,22 @@

    Enterprise Context Platforms# Reorder ctx pad mv 2 1 -# Clean up (IDs are stable — they don't shift when entries are deleted) +# Clean up (IDs are stable; they don't shift when entries are deleted) ctx pad rm 2

    Tags

    Entries can contain #word tags for lightweight categorization. Tags are convention-based: any #word token in an entry's text is a tag. No special -syntax to add or remove them — use the existing add and edit commands.

    +syntax to add or remove them; use the existing add and edit commands.

    # Add tagged entries
     ctx pad add "check DNS propagation #later"
     ctx pad add "deploy hotfix #urgent"
    @@ -1849,7 +1849,7 @@ 

    Tags&par # Remove a tag (replace entry text without the tag) ctx pad edit 1 "check DNS propagation"

    -

    Entry IDs are stable — they don't shift when other entries are deleted, so +

    Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry. Use ctx pad normalize to reassign IDs as 1..N if gaps bother you. Tags are case-sensitive and support letters, digits, hyphens, and underscores (#high-priority, #v2, #my_tag).

    diff --git a/site/search.json b/site/search.json index 200d693b4..26bb8375c 100644 --- a/site/search.json +++ b/site/search.json @@ -1 +1 @@ -{"config":{"separator":"[\\s\\-_,:!=\\[\\]()\\\\\"`/]+|\\.(?!\\d)"},"items":[{"location":"","level":1,"title":"Manifesto","text":"","path":["Manifesto"],"tags":[]},{"location":"#the-ctx-manifesto","level":1,"title":"The ctx Manifesto","text":"

    Creation, not code.

    Context, not prompts.

    Verification, not vibes.

    This Is NOT a Metaphor

    Code executes instructions.

    Creation produces outcomes.

    Confusing the two is how teams ship motion...

    ...instead of progress.

    • It was never about the code.
    • Code has zero standalone value.
    • Code is an implementation detail.

    Code is an incantation.

    Creation is the act.

    And creation does not happen in a vacuum.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-the-substrate","level":2,"title":"ctx Is the Substrate","text":"

    Constraints Have Moved

    Human bandwidth is no longer the limiting factor.

    Context integrity is.

    Human bandwidth is no longer the constraint.

    Context is:

    • Without durable context, intelligence resets.
    • Without memory, reasoning decays.
    • Without structure, scale collapses.

    Creation is now limited by:

    • Clarity of intent;
    • Quality of context;
    • Rigor of verification.

    Not by speed.

    Not by capacity.

    Velocity Amplifies

    Faster execution on broken context compounds error.

    Speed multiplies whatever is already wrong.

    ","path":["Manifesto"],"tags":[]},{"location":"#humans-author-meaning","level":2,"title":"Humans Author Meaning","text":"

    Intent Is Authored

    Systems can optimize.

    Models can generalize.

    Meaning must be chosen.

    Intent is not emergent.

    Vision, goals, and direction are human responsibilities.

    We decide:

    • What matters;
    • What success means;
    • What world we are building.

    ctx encodes the intent so it...

    • survives time,
    • survives handoffs,
    • survives scale.

    Nothing important should live only in conversation.

    Nothing critical should depend on recall.

    Oral Tradition Does Not Scale

    If intent cannot be inspected, it cannot be enforced.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-before-action","level":2,"title":"ctx Before Action","text":"

    Orientation Precedes Motion

    Acting first and understanding later is not bravery.

    It is debt.

    Never act without ctx.

    Before execution, we must verify:

    • Where we are;
    • Why we are here;
    • What constraints apply;
    • What assumptions are active.

    Action without ctx is gambling.

    Speed without orientation is noise.

    ctx is not overhead: It is the cost of correctness.

    ","path":["Manifesto"],"tags":[]},{"location":"#persistent-context-beats-prompt-memory","level":2,"title":"Persistent Context Beats Prompt Memory","text":"

    Transience Is the Default Failure Mode

    • Prompts decay.
    • Chats fragment.
    • Memory heuristics drift.

    Prompts are transient.

    Chats are lossy.

    Memory heuristics drift.

    ctx must be:

    • Durable;
    • Structured;
    • Explicit;
    • Queryable.

    Intent Must Be Intentional

    If intent exists only in a prompt...

    ...alignment is already degrading.

    Knowledge lives in the artifacts:

    • Decisions;
    • Documentation;
    • Dependency maps;
    • Evaluation history.

    Artifacts Outlive Sessions

    What is not written will be re-learned.

    At full cost.

    ","path":["Manifesto"],"tags":[]},{"location":"#what-ctx-is-not","level":2,"title":"What ctx Is Not","text":"

    Avoid Category Errors

    Mislabeling ctx guarantees misuse.

    ctx is not a memory feature.

    • ctx is not prompt engineering.
    • ctx is not a productivity hack.
    • ctx is not automation theater.

    ctx is a system for preserving intent under scale.

    ctx is infrastructure.

    ","path":["Manifesto"],"tags":[]},{"location":"#verified-reality-is-the-scoreboard","level":2,"title":"Verified Reality Is the Scoreboard","text":"

    Activity Is a False Proxy

    Output volume correlates poorly with impact.

    • Code is not progress.
    • Activity is not impact.

    The only truth that compounds is verified change.

    Verified change must exist in the real world.

    Hypotheses are cheap; outcomes are not.

    ctx captures:

    • What we expected;
    • What we observed;
    • Where reality diverged.

    If we cannot predict, measure, and verify the result...

    ...it does not count.

    ","path":["Manifesto"],"tags":[]},{"location":"#build-to-learn-not-to-accumulate","level":2,"title":"Build to Learn, Not to Accumulate","text":"

    Prototypes Have an Expiration Date

    A prototype's value is information, not longevity.

    Prototypes exist to reduce uncertainty.

    We build to:

    • Test assumptions;
    • Validate architecture;
    • Answer specific questions.

    Not everything.

    Not blindly.

    Not permanently.

    ctx records archeology so the cost is paid once.

    ","path":["Manifesto"],"tags":[]},{"location":"#failures-are-assets","level":2,"title":"Failures Are Assets","text":"

    Failure without Capture Is Waste

    Pain that does not teach is pure loss.

    Failures are not erased: They are preserved.

    Each failure becomes:

    • A documented hypothesis;
    • An analyzed deviation;
    • A permanent artifact.

    Rollback fixes symptoms: ctx fixes systems.

    A repeated mistake is a missing ctx artifact.

    ","path":["Manifesto"],"tags":[]},{"location":"#structure-enables-scale","level":2,"title":"Structure Enables Scale","text":"

    Unbounded Autonomy Destabilizes

    Power without a structure produces chaos.

    Transpose it:

    Power without any structure becomes chaos.

    ctx defines:

    • Roles;
    • Boundaries;
    • Protocols;
    • Escalation paths;
    • Decision rights.

    Ambiguity is a system failure:

    • Debates must be structured.
    • Decisions must be explicit.
    • History must be retained.
    ","path":["Manifesto"],"tags":[]},{"location":"#encode-intent-into-the-environment","level":2,"title":"Encode Intent into the Environment","text":"

    Goodwill Does Not Belong to the Table

    Alignment that depends on memory will drift.

    Alignment cannot depend on memory or goodwill.

    Do not rely on people to remember.

    Encode the behavior, so it happens by default.

    Intent is encoded as:

    • Policies;
    • Schemas;
    • Constraints;
    • Evaluation harnesses.

    Rules must be machine-readable.

    Laws must be enforceable.

    If intent is implicit, drift is guaranteed.

    ","path":["Manifesto"],"tags":[]},{"location":"#cost-is-a-first-class-signal","level":2,"title":"Cost Is a First-Class Signal","text":"

    Attention Is the Scarcest Resource

    Not ideas.

    Not ambition.

    Ideas do not compete on time:

    They compete on cost and impact:

    • Attention is finite.
    • Compute is finite.
    • Context is expensive.

    We continuously ask:

    • What the most valuable next action is.
    • What outcome justifies the cost.

    ctx guides allocation.

    Learning reshapes priority.

    ","path":["Manifesto"],"tags":[]},{"location":"#show-the-why","level":2,"title":"Show the Why","text":"

    {} (code, artifacts, apps, binaries) produce outputs; they do not preserve reasoning.

    Systems that cannot explain themselves will not be trusted.

    Traceability builds trust.

         {} --> what\n\n    ctx --> why\n

    We record:

    • Explored paths;
    • Rejected options;
    • Assumptions made;
    • Evidence used.

    Opaque systems erode trust:

    Transparent ctx compounds understanding.

    ","path":["Manifesto"],"tags":[]},{"location":"#continuously-verify-the-system","level":2,"title":"Continuously Verify the System","text":"

    Stability Is Temporary

    Every assumption has a half-life:

    • Models drift.
    • Tools change.
    • Assumptions rot.

    ctx must be verified against reality.

    Trust is a spectrum.

    Trust is continuously re-earned:

    • Benchmarks,
    • regressions,
    • and evaluations...

    ...are safety rails.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-leverage","level":2,"title":"ctx Is Leverage","text":"

    Humans Are Decision Engines

    Execution should not consume judgment.

    Humans must not be typists.

    We are the authors.

    Human effort is reserved for:

    • Judgment;
    • Design;
    • Taste;
    • Synthesis.

    Repetition is delegated.

    Toil is automated.

    ctx preserves leverage across time.

    ","path":["Manifesto"],"tags":[]},{"location":"#the-thesis","level":2,"title":"The Thesis","text":"

    Invariant

    Everything else is an implementation detail.

    • Creation is the act.
    • ctx is the substrate.
    • Verification is the truth.

    Code executes → Models reason → Agents amplify.

    ctx lives on.

    • Without ctx, intelligence resets.
    • With ctx, creation compounds.
    ","path":["Manifesto"],"tags":[]},{"location":"blog/","level":1,"title":"Blog","text":"

    Stories, insights, and lessons learned from building and using ctx.

    ","path":["Blog"],"tags":[]},{"location":"blog/#releases","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v080-the-architecture-release","level":3,"title":"ctx v0.8.0: The Architecture Release","text":"

    March 23, 2026: 374 commits, 1,708 Go files touched, and a near-complete architectural overhaul. Every CLI package restructured into cmd/ + core/ taxonomy, all user-facing strings externalized to YAML, MCP server for tool-agnostic AI integration, and the memory bridge connecting Claude Code's auto-memory to .context/.

    Topics: release, architecture, refactoring, MCP, localization

    ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes","level":2,"title":"Field Notes","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-watermelon-rind-anti-pattern-why-smarter-tools-make-shallower-agents","level":3,"title":"The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents","text":"

    April 6, 2026: Give an agent a graph query tool, and it produces output that's structurally correct but substantively hollow (the watermelon-rind antipattern: We ran three sessions analyzing the same codebase with different tool access: the one with no tools produced 5.2x more depth. The fix: a two-pass compiler for architecture understanding: force code reading first, verify with tools second. Constraint is the feature.

    Topics: architecture, code intelligence, agent behavior, design patterns, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#code-structure-as-an-agent-interface-what-19-ast-tests-taught-us","level":3,"title":"Code Structure as an Agent Interface: What 19 AST Tests Taught Us","text":"

    April 2, 2026: We built 19 AST-based audit tests in a single session, touching 300+ files. In the process we discovered that \"old-school\" code quality constraints (no magic numbers, centralized error handling, 80-char lines, documentation) are exactly the constraints that make code readable to AI agents. If an agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

    Topics: ast, code quality, agent readability, conventions, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#we-broke-the-31-rule","level":3,"title":"We Broke the 3:1 Rule","text":"

    March 23, 2026: After v0.6.0, we ran 198 feature commits across 17 days before consolidating. The 3:1 rule says consolidate every 4th session. We did it after the 66th. The result: an 18-day, 181-commit cleanup marathon that took longer than the feature run itself. A follow-up to The 3:1 Ratio with empirical evidence from the v0.8.0 cycle.

    Topics: consolidation, technical debt, development workflow, convention drift, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#context-engineering","level":2,"title":"Context Engineering","text":"","path":["Blog"],"tags":[]},{"location":"blog/#agent-memory-is-infrastructure","level":3,"title":"Agent Memory Is Infrastructure","text":"

    March 4, 2026: Every AI coding agent starts fresh. The obvious fix is \"memory.\" But there's a different problem memory doesn't touch: the project itself accumulates knowledge that has nothing to do with any single session. This post argues that agent memory is L2 (runtime cache); what's missing is L3 (project infrastructure).

    Topics: context engineering, agent memory, infrastructure, persistence, team knowledge

    ","path":["Blog"],"tags":[]},{"location":"blog/#context-as-infrastructure","level":3,"title":"Context as Infrastructure","text":"

    February 17, 2026: Where does your AI's knowledge live between sessions? If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. This post argues for treating it as infrastructure instead: persistent files, separation of concerns, two-tier storage, progressive disclosure, and the filesystem as the most mature interface available.

    Topics: context engineering, infrastructure, progressive disclosure, persistence, design philosophy

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-attention-budget-why-your-ai-forgets-what-you-just-told-it","level":3,"title":"The Attention Budget: Why Your AI Forgets What You Just Told It","text":"

    February 3, 2026: Every token you send to an AI consumes a finite resource: the attention budget. Understanding this constraint shaped every design decision in ctx: hierarchical file structure, explicit budgets, progressive disclosure, and filesystem-as-index.

    Topics: attention mechanics, context engineering, progressive disclosure, ctx primitives, token budgets

    ","path":["Blog"],"tags":[]},{"location":"blog/#before-context-windows-we-had-bouncers","level":3,"title":"Before Context Windows, We Had Bouncers","text":"

    February 14, 2026: IRC is stateless. You disconnect, you vanish. Modern systems are not much different. This post traces the line from IRC bouncers to context engineering: stateless protocols require stateful wrappers, volatile interfaces require durable memory.

    Topics: context engineering, infrastructure, IRC, persistence, state continuity

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-last-question","level":3,"title":"The Last Question","text":"

    February 28, 2026: In 1956, Asimov wrote a story about a question that spans the entire future of the universe. A reading of \"The Last Question\" through the lens of persistence, substrate migration, and what it means to build systems where sessions don't reset.

    Topics: context continuity, long-lived systems, persistence, intelligence over time, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#agent-behavior-and-design","level":2,"title":"Agent Behavior and Design","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-dog-ate-my-homework-teaching-ai-agents-to-read-before-they-write","level":3,"title":"The Dog Ate My Homework: Teaching AI Agents to Read Before They Write","text":"

    February 25, 2026: You wrote the playbook. The agent skipped all of it. Five sessions, five failure modes, and the discovery that observable compliance beats perfect compliance.

    Topics: hooks, agent behavior, context engineering, behavioral design, testing methodology, compliance monitoring

    ","path":["Blog"],"tags":[]},{"location":"blog/#skills-that-fight-the-platform","level":3,"title":"Skills That Fight the Platform","text":"

    February 4, 2026: When custom skills conflict with system prompt defaults, the AI has to reconcile contradictory instructions. Five conflict patterns discovered while building ctx.

    Topics: context engineering, skill design, system prompts, antipatterns, AI safety primitives

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-anatomy-of-a-skill-that-works","level":3,"title":"The Anatomy of a Skill That Works","text":"

    February 7, 2026: I had 20 skills. Most were well-intentioned stubs. Then I rewrote all of them. Seven lessons emerged: quality gates prevent premature execution, negative triggers are load-bearing, examples set boundaries better than rules.

    Topics: skill design, context engineering, quality gates, E/A/R framework, practical patterns

    ","path":["Blog"],"tags":[]},{"location":"blog/#you-cant-import-expertise","level":3,"title":"You Can't Import Expertise","text":"

    February 5, 2026: I found a well-crafted consolidation skill. Applied my own E/A/R framework: 70% was noise. This post is about why good skills can't be copy-pasted, and how to grow them from your project's own drift history.

    Topics: skill adaptation, E/A/R framework, convention drift, consolidation, project-specific expertise

    ","path":["Blog"],"tags":[]},{"location":"blog/#not-everything-is-a-skill","level":3,"title":"Not Everything Is a Skill","text":"

    February 8, 2026: I ran an 8-agent codebase audit and got actionable results. The natural instinct was to wrap the prompt as a skill. Then I applied my own criteria: it failed all three tests.

    Topics: skill design, context engineering, automation discipline, recipes, agent teams

    ","path":["Blog"],"tags":[]},{"location":"blog/#defense-in-depth-securing-ai-agents","level":3,"title":"Defense in Depth: Securing AI Agents","text":"

    February 9, 2026: The security advice was \"use CONSTITUTION.md for guardrails.\" That is wishful thinking. Five defense layers for unattended AI agents, each with a bypass, and why the strength is in the combination.

    Topics: agent security, defense in depth, prompt injection, autonomous loops, container isolation

    ","path":["Blog"],"tags":[]},{"location":"blog/#development-practice","level":2,"title":"Development Practice","text":"","path":["Blog"],"tags":[]},{"location":"blog/#code-is-cheap-judgment-is-not","level":3,"title":"Code Is Cheap. Judgment Is Not.","text":"

    February 17, 2026: AI does not replace workers. It replaces unstructured effort. Three weeks of building ctx with an AI agent proved it: YOLO mode showed production is cheap, the 3:1 ratio showed judgment has a cadence.

    Topics: AI and expertise, context engineering, judgment vs production, human-AI collaboration, automation discipline

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-31-ratio","level":3,"title":"The 3:1 Ratio","text":"

    February 17, 2026: AI makes technical debt worse: not because it writes bad code, but because it writes code so fast that drift accumulates before you notice. Three feature sessions, one consolidation session.

    Topics: consolidation, technical debt, development workflow, convention drift, code quality

    ","path":["Blog"],"tags":[]},{"location":"blog/#refactoring-with-intent-human-guided-sessions-in-ai-development","level":3,"title":"Refactoring with Intent: Human-Guided Sessions in AI Development","text":"

    February 1, 2026: The YOLO mode shipped 14 commands in a week. But technical debt doesn't send invoices. This is the story of what happened when we started guiding the AI with intent.

    Topics: refactoring, code quality, documentation standards, module decomposition, YOLO versus intentional development

    ","path":["Blog"],"tags":[]},{"location":"blog/#how-deep-is-too-deep","level":3,"title":"How Deep Is Too Deep?","text":"

    February 12, 2026: I kept feeling like I should go deeper into ML theory. Then I spent a week debugging an agent failure that had nothing to do with model architecture. When depth compounds and when it doesn't.

    Topics: AI foundations, abstraction boundaries, agentic systems, context engineering, failure modes

    ","path":["Blog"],"tags":[]},{"location":"blog/#agent-workflows","level":2,"title":"Agent Workflows","text":"","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-merge-debt-and-the-myth-of-overnight-progress","level":3,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"

    February 17, 2026: You discover agents can run in parallel. So you open ten terminals. It is not progress: it is merge debt being manufactured in real time. The five-agent ceiling and why role separation beats file locking.

    Topics: agent workflows, parallelism, verification, context engineering, engineering practice

    ","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-with-git-worktrees","level":3,"title":"Parallel Agents with Git Worktrees","text":"

    February 14, 2026: I had 30 open tasks that didn't touch the same files. Using git worktrees to partition a backlog by file overlap, run 3-4 agents simultaneously, and merge the results.

    Topics: agent teams, parallelism, git worktrees, context engineering, task management

    ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes-and-signals","level":2,"title":"Field Notes and Signals","text":"","path":["Blog"],"tags":[]},{"location":"blog/#when-a-system-starts-explaining-itself","level":3,"title":"When a System Starts Explaining Itself","text":"

    February 17, 2026: Every new substrate begins as a private advantage. Reality begins when other people start describing it in their own language. \"Better than Adderall\" is not praise; it is a diagnostic.

    Topics: field notes, adoption signals, infrastructure vs tools, context engineering, substrates

    ","path":["Blog"],"tags":[]},{"location":"blog/#why-zensical","level":3,"title":"Why Zensical","text":"

    February 15, 2026: I needed a static site generator for the journal system. The instinct was Hugo. But instinct is not analysis. Why zensical was the right choice: thin dependencies, MkDocs-compatible config, and zero lock-in.

    Topics: tooling, static site generators, journal system, infrastructure decisions, context engineering

    ","path":["Blog"],"tags":[]},{"location":"blog/#releases_1","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v060-the-integration-release","level":3,"title":"ctx v0.6.0: The Integration Release","text":"

    February 16, 2026: ctx is now a Claude Marketplace plugin. Two commands, no build step, no shell scripts. v0.6.0 replaces six Bash hook scripts with compiled Go subcommands and ships 25+ Skills as a plugin.

    Topics: release, plugin system, Claude Marketplace, distribution, security hardening

    ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v030-the-discipline-release","level":3,"title":"ctx v0.3.0: The Discipline Release","text":"

    February 15, 2026: No new headline feature. Just 35+ documentation and quality commits against ~15 feature commits. What a release looks like when the ratio of polish to features is 3:1.

    Topics: release, skills migration, consolidation, code quality, E/A/R framework

    ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v020-the-archaeology-release","level":3,"title":"ctx v0.2.0: The Archaeology Release","text":"

    February 1, 2026: What if your AI could remember everything? Not just the current session, but every session. ctx v0.2.0 introduces the recall and journal systems.

    Topics: session recall, journal system, structured entries, token budgets, meta-tools

    ","path":["Blog"],"tags":[]},{"location":"blog/#building-ctx-using-ctx-a-meta-experiment-in-ai-assisted-development","level":3,"title":"Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development","text":"

    January 27, 2026: What happens when you build a tool designed to give AI memory, using that very same tool to remember what you're building? This is the story of ctx.

    Topics: dogfooding, AI-assisted development, Ralph Loop, session persistence, architectural decisions

    ","path":["Blog"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/","level":1,"title":"Building ctx Using ctx","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    References to .context/sessions/, auto-save hooks, and SessionEnd auto-save in this post reflect the architecture at the time of writing.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#a-meta-experiment-in-ai-assisted-development","level":2,"title":"A Meta-Experiment in AI-Assisted Development","text":"

    Jose Alekhinne / 2026-01-27

    Can a Tool Design Itself?

    What happens when you build a tool designed to give AI memory, using that very same tool to remember what you are building?

    This is the story of ctx, how it evolved from a hasty \"YOLO mode\" experiment to a disciplined system for persistent AI context, and what I have learned along the way.

    Context Is a Record

    Context is a persistent record.

    By \"context\", I don't mean model memory or stored thoughts:

    I mean the durable record of decisions, learnings, and intent that normally evaporates between sessions.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#ai-amnesia","level":2,"title":"AI Amnesia","text":"

    Every developer who works with AI code generators knows the frustration:

    You have a deep, productive session where the AI understands your codebase, your conventions, your decisions. And then you close the terminal.

    Tomorrow; it's a blank slate. The AI has forgotten everything.

    That is \"reset amnesia\", and it's not just annoying: it's expensive.

    Every session starts with:

    • Re-explaining context;
    • Re-reading files;
    • Re-discovering decisions that were already made.

    I Needed Context

    \"I don't want to lose this discussion...

    ...I am a brain-dead developer YOLO'ing my way out.\"

    ☝️ that's exactly what I said to Claude when I first started working on ctx.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-genesis","level":2,"title":"The Genesis","text":"

    The project started as \"Active Memory\" (amem): a CLI tool to persist AI context across sessions.

    The core idea was simple:

    1. Create a .context/ directory with structured Markdown files for decisions, learnings, tasks, and conventions.
    2. The AI reads these at session start and writes to them before the session ends.
    3. There is no step 3.

    The first commit was just scaffolding. But within hours, the Ralph Loop (An iterative AI development workflow) had produced a working CLI:

    feat(cli): implement amem init command\nfeat(cli): implement amem status command\nfeat(cli): implement amem add command\nfeat(cli): implement amem agent command\n...\n

    Not one, not two, but a whopping fourteen core commands shipped in rapid succession!

    I was YOLO'ing like there was no tomorrow:

    • Auto-accept every change;
    • Let the AI run free;
    • Ship features fast.
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-meta-experiment-using-amem-to-build-amem","level":2,"title":"The Meta-Experiment: Using amem to Build amem","text":"

    Here's where it gets interesting: On January 20th, I asked:

    \"Can I use amem to help you remember this context when I restart?\"

    The answer was yes, but with a gap:

    Autoload worked (via Claude Code's PreToolUse hook), but auto-save was missing: If the user quit, with Ctrl+C, everything since the last manual save was lost.

    That session became the first real test of the system.

    Here is the first session file we recorded:

    ## Key Discussion Points\n\n### 1. amem vs Ralph Loop - They're Separate Systems\n\n**User's question**: \"How do I use the binary to recreate this project?\"\n\n**Answer discovered**: `amem` is for context management, Ralph Loop is for \ndevelopment workflow. They are complementary but separate.\n\n### 2. Two Tiers of Context Persistence\n\n| Tier      | What                        | Why                           |\n|-----------|-----------------------------|-------------------------------|\n| Curated   | Learnings, decisions, tasks | Quick reload, token-efficient |\n| Full dump | Entire conversation         | Safety net, nothing lost      |\n\n| Where                  |\n|------------------------|\n| .context/*.md          |\n| .context/sessions/*.md |\n

    This session file (written by the AI to preserve its own context) became the template for how ctx handles session persistence.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-rename","level":2,"title":"The Rename","text":"

    By January 21st, I realized \"Active Memory\" was too generic, and (arguably) too marketing-smelly.

    Besides, the binary was already called ctx (short for Context), the directory was .context/, and the slash commands would be /ctx-*.

    So it followed that the project should be renamed to ctx to make things make sense.

    The rename touched 100+ files but was clean: a find-and-replace with Go's type system catching any misses.

    The git history tells the story:

    0e8f6bb feat: rename amem to ctx and add Claude Code integration\n87dcfa1 README.\n4f0e195 feat: separate orchestrator directive from agent tasks\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#yolo-mode-fast-but-dangerous","level":2,"title":"YOLO Mode: Fast, but Dangerous","text":"

    The Ralph Loop made feature development incredibly fast.

    But it created technical debt that I didn't notice until later.

    A comparison session on January 25th revealed the patterns:

    YOLO Pattern What We Found \"TASKS.md\" scattered in 10 files Same string literal everywhere, no constants dir + \"/\" + file Should be filepath.Join() Monolithic cli_test.go (1500+ lines) Tests disconnected from implementations package initcmd in init/ folder Go's \"init\" is reserved: subtle naming collision

    Here is another analysis made by ctx:

    ● Based on my analysis, here are the key differences:\n\nYOLO Mode (Pre-040ce99)\n- Feature-first: Added slash commands, tests, templates rapidly\n- Scattered magic strings: \"TASKS.md\", \"decision\", \".context\" spread across files\n- Quick file creation: New files without organizational patterns\n- Working but inconsistent: Code functioned but lacked systematic structure\n\nHuman-Guided Mode (Post-040ce99)\n- Consolidation focus: Centralized constants in config package\n- Consistent naming: Dir, File, Filename, UpdateType prefixes\n- Self-referential constants: FileType map uses constants as keys, not literals\n- Proper path construction: filepath.Join() instead of +\"/\"+\n- Colocated tests: Tests next to implementations\n- Canonical naming: Package name = folder name\n

    The fix required a human-guided refactoring session. I continued to do that before every major release, from that point on.

    We introduced internal/config/config.go with semantic prefixes:

    const (\n    DirContext     = \".context\"\n    DirArchive     = \"archive\"\n    DirSessions    = \"sessions\"\n    FilenameTask   = \"TASKS.md\"\n    UpdateTypeTask = \"task\"\n)\n

    What I begrudgingly learned was: YOLO mode is effective for velocity but accumulates debt.

    So I took a mental note to schedule periodic consolidation sessions.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-dogfooding-test-that-failed","level":2,"title":"The Dogfooding Test That Failed","text":"

    On January 21st, I ran an experiment: have another Claude instance rebuild ctx from scratch using only the specs and PROMPT.md.

    The Ralph Loop ran, all tasks got checked off, the loop exited successfully.

    But the binary was broken!

    Commands just printed help text instead of executing.

    All tasks were marked \"complete\" but the implementation didn't work.

    Here's what ctx discovered:

    ## Key Findings\n\n### Dogfooding Binary Is Broken\n- Commands don't execute: they just print root help text\n- All tasks were marked complete but binary doesn't work\n- Lesson: \"tasks checked off\" ≠ \"implementation works\"\n

    This was humbling; to say the least.

    I realized I had the same blind spot in my own codebase: no integration tests that actually invoked the binary.

    So I added:

    • Integration tests for all commands;
    • Coverage targets (60-80% per package)
    • Smoke tests in CI
    • A constitution rule: \"All code must pass tests before commit\"
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-constitution-versus-conventions","level":2,"title":"The Constitution versus Conventions","text":"

    As lessons accumulated, there was the temptation to add everything to CONSTITUTION.md as \"inviolable rules\".

    But I resisted.

    The constitution should contain only truly inviolable invariants:

    • Security (no secrets, no customer data)
    • Quality (tests must pass)
    • Process (decisions need records)
    • ctx invocation (always use PATH, never fallback)

    Everything else (coding style, file organization, naming conventions...) should go in to CONVENTIONS.md.

    Here's how ctx explained why the distinction was important:

    Decision Record, 2026-01-25

    Overly strict constitution creates friction and gets ignored.

    Conventions can be bent; constitution cannot.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#hooks-harder-than-they-look","level":2,"title":"Hooks: Harder than They Look","text":"

    Claude Code hooks seemed simple: Run a script before/after certain events.

    But I hit multiple gotchas:

    1. Key names matter

    // WRONG - \"Invalid key in record\" error\n\"PreToolUseHooks\": [...]\n\n// RIGHT\n\"PreToolUse\": [...]\n

    2. Blocking requires specific output

    # WRONG - just exits, doesn't block\nexit 1\n\n# RIGHT - JSON output + exit 0\necho '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH\"}'\nexit 0\n

    3. Go's JSON escaping

    json.Marshal escapes >, <, & as unicode (\\u003e) by default.

    When generating shell commands in JSON:

    encoder := json.NewEncoder(file)\nencoder.SetEscapeHTML(false) // Prevent 2>/dev/null → 2\\u003e/dev/null\n

    4. Regex overfitting

    My hook to block non-PATH ctx invocations initially matched too broadly:

    # WRONG - matches /home/user/ctx/internal/file.go (ctx as directory)\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# RIGHT - matches ctx as binary only\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-session-files","level":2,"title":"The Session Files","text":"

    By the time of this writing this project's ctx sessions (.context/sessions/) contains 40+ files from this project's development.

    They are not part of the source code due to security, privacy, and size concerns.

    Middle Ground: The Scratchpad

    For sensitive notes that do need to travel with the project, ctx pad stores encrypted one-liners in git, and ctx pad add \"label\" --file PATH can ingest small files.

    See Scratchpad for details.

    However, they are invaluable for the project's progress.

    Each session file is a timestamped Markdown with:

    • Summary of what has been accomplished;
    • Key decisions made;
    • Learnings discovered;
    • Tasks for the next session;
    • Technical context (platform, versions).

    These files are not autoloaded (that would bust the token budget).

    They are what I see as the \"archaeological record\" of ctx:

    When the AI needs deeper information about why something was done, it digs into the sessions.

    Auto-generated session files used a naming convention:

    2026-01-23-115432-session-prompt_input_exit-summary.md\n2026-01-25-220244-manual-save.md\n2026-01-27-052107-session-other-summary.md\n

    Update

    The session feature described here is historical.

    In current releases, ctx uses a journal instead: the enrichment process generates meaningful slugs from context automatically, so there is no need to manually save sessions.

    The SessionEnd hook captured transcripts automatically. Even Ctrl+C was caught.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-decision-log-18-architectural-decisions","level":2,"title":"The Decision Log: 18 Architectural Decisions","text":"

    ctx helps record every significant architectural choice in .context/DECISIONS.md.

    Here are some highlights:

    Reverse-chronological order (2026-01-27)

    **Context**: With chronological order, oldest items consume tokens first, and\nnewest (most relevant) items risk being truncated.\n\n**Decision**: Use reverse-chronological order (newest first) for DECISIONS.md\nand LEARNINGS.md.\n

    PATH over hardcoded paths (2026-01-21)

    **Context**: Original implementation hardcoded absolute paths in hooks.\nThis breaks when sharing configs with other developers.\n\n**Decision**: Hooks use `ctx` from PATH. `ctx init` checks PATH before \nproceeding.\n

    Generic core with Claude enhancements (2026-01-20)

    **Context**: ctx should work with any AI tool, but Claude Code users could\nbenefit from deeper integration.\n\n**Decision**: Keep ctx generic as the core tool, but provide optional\nClaude Code-specific enhancements.\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-learning-log-24-gotchas-and-insights","level":2,"title":"The Learning Log: 24 Gotchas and Insights","text":"

    The .context/LEARNINGS.md file captures gotchas that would otherwise be forgotten. Each has Context, Lesson, and Application sections:

    CGO on ARM64

    **Context**: `go test` failed with \n`gcc: error: unrecognized command-line option '-m64'`\n\n**Lesson**: On ARM64 Linux, CGO causes cross-compilation issues. \nAlways use `CGO_ENABLED=0`.\n

    Claude Code skills format

    **Lesson**: Claude Code skills are Markdown files in .claude/commands/ with `YAML`\nfrontmatter (*description, argument-hint, allowed-tools*). Body is the prompt.\n

    \"Do you remember?\" handling

    **Lesson**: In a `ctx`-enabled project, \"*do you remember?*\" \nhas an obvious meaning:\ncheck the `.context/` files. Don't ask for clarification. Just do it.\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#task-archives-the-completed-work","level":2,"title":"Task Archives: The Completed Work","text":"

    Completed tasks are archived to .context/archive/ with timestamps.

    The archive from January 23rd shows 13 phases of work:

    • Phase 1: Project Scaffolding (Go module, Cobra CLI)
    • Phase 2-4: Core Commands (init, status, agent, add, complete, drift, sync, compact, watch, hook)
    • Phase 5: Session Management (save, list, load, parse, --extract)
    • Phase 6: Claude Code Integration (hooks, settings, CLAUDE.md handling)
    • Phase 7: Testing & Verification
    • Phase 8: Task Archival
    • Phase 9: Slash Commands
    • Phase 9b: Ralph Loop Integration
    • Phase 10: Project Rename
    • Phase 11: Documentation
    • Phase 12: Timestamp Correlation
    • Phase 13: Rich Context Entries

    That's an impressive ^^173 commits** across 8 days of development.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#what-i-learned-about-ai-assisted-development","level":2,"title":"What I Learned about AI-Assisted Development","text":"

    1. Memory changes everything

    When the AI remembers decisions, it doesn't repeat mistakes.

    When the AI knows your conventions, it follows them.

    ctx makes the AI a better collaborator because it's not starting from zero.

    2. Two-tier persistence works

    Curated context (DECISIONS.md, LEARNINGS.md, TASKS.md) is for quick reload.

    Full session dumps are for archaeology.

    It's a futile effort to try to fit everything in the token budget.

    Persist more, load less.

    3. YOLO mode has its place

    For rapid prototyping, letting the AI run free is effective.

    But I had to schedule consolidation sessions.

    Technical debt accumulates silently.

    4. The constitution should be small

    Only truly inviolable rules go in CONSTITUTION.md. Everything else is a convention.

    If you put too much in the constitution, it will get ignored.

    5. Verification is non-negotiable

    \"All tasks complete\" means nothing if you haven't run the tests.

    Integration tests that invoke the actual binary caught bugs that the unit tests missed.

    6. Session files are underrated

    The ability to grep through 40 session files and find exactly when and why a decision was made helped me a lot.

    It's not about loading them into context: It is about having them when you need them.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-future-recall-system","level":2,"title":"The Future: Recall System","text":"

    The next phase of ctx is the Recall System:

    • Parser: Parse session capture markdowns, enrich with JSONL data
    • Renderer: Goldmark + Chroma for syntax highlighting, dark mode UI
    • Server: Local HTTP server for browsing sessions
    • Search: Inverted index for searching across sessions
    • CLI: ctx recall serve <path> to start the server

    The goal is to make the archaeological record browsable, not just grep-able.

    Because not everyone always lives in the terminal (me included).

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#conclusion","level":2,"title":"Conclusion","text":"

    Building ctx using ctx was a meta-experiment in AI-assisted development.

    I learned that memory isn't just convenient: It's transformative:

    • An AI that remembers your decisions doesn't repeat mistakes.
    • An AI that knows your conventions doesn't need them re-explained.

    If you are reading this, chances are that you already have heard about ctx.

    • ctx is open source at github.com/ActiveMemory/ctx,
    • and the documentation lives at ctx.ist.

    Session Records Are a Gold Mine

    By the time of this writing, I have more than 70 megabytes of text-only session capture, spread across >100 Markdown and JSONL files.

    I am analyzing, synthesizing, encriching them with AI, running RAG (Retrieval-Augmented Generation) models on them, and the outcome surprises me every day.

    If you are a mere mortal tired of reset amnesia, give ctx a try.

    And when you do, check .context/sessions/ sometime.

    The archaeological record might surprise you.

    This blog post was written with the help of ctx with full access to the ctx session files, decision log, learning log, task archives, and git history of ctx: The meta continues.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/","level":1,"title":"ctx v0.2.0: The Archaeology Release","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    The .context/sessions/ directory referenced in this post has been eliminated. Session history is now accessed via ctx recall and enriched journals live in .context/journal/.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#digging-through-the-past-to-build-the-future","level":2,"title":"Digging through the Past to Build the Future","text":"

    Jose Alekhinne / 2026-02-01

    What If Your AI Could Remember Everything?

    Not just the current session, but every session:

    • Every decision made,
    • every mistake avoided,
    • every path not taken.

    That's what v0.2.0 delivers.

    Between v0.1.2 and v0.2.0, 86 commits landed across 5 days.

    The release notes list features and fixes.

    This post tells the story of why those features exist, and what building them taught me.

    This isn't a changelog: It is an explanation of intent.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-problem-amnesia-isnt-just-session-level","level":2,"title":"The Problem: Amnesia Isn't Just Session-Level","text":"

    v0.1.0 solved reset amnesia:

    The AI now remembers decisions, learnings, and tasks across sessions.

    But a new problem emerged, which I can sum up as:

    \"I (the human) am not AI.\"

    Frankly, I couldn't remember what the AI remembered.

    Let alone, I cannot remember what I ate for breakfast!

    In the course of days, I realized session transcripts piled up in .context/sessions/; I was grepping, JSONL files with thousands of lines... Raw tool calls, assistant responses, user messages...

    ...all interleaved.

    Valuable context was effectively buried in machine-readable noise.

    I found myself grepping through files to answer questions like:

    • \"When did we decide to use constants instead of literals?\"
    • \"What was the session where we fixed the hook regex?\"
    • \"How did the embed.go split actually happen?\"

    Fate Is Whimsical

    The irony was painful:

    I built a tool to prevent AI amnesia, but I was suffering from human amnesia about what happened in AI sessions.

    This was the moment ctx stopped being just an AI tool and started needing to support the human on the other side of the loop.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-solution-recall-and-journal","level":2,"title":"The Solution: Recall and Journal","text":"

    v0.2.0 introduces two interconnected systems.

    They solve different problems and only work well together.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-recall-browse-your-past","level":3,"title":"ctx recall: Browse Your Past","text":"
    # List all sessions for this project\nctx recall list\n\n# Show a specific session\nctx recall show gleaming-wobbling-sutherland\n\n# See the full transcript\nctx recall show gleaming-wobbling-sutherland --full\n

    The recall system parses Claude Code's JSONL transcripts and presents them in a human-readable format:

    Session Date Turns Duration tender-painting-sundae 2026-01-29 3 <1m crystalline-gliding-willow 2026-01-29 3 <1m declarative-hugging-snowglobe 2026-01-31 2 <1m

    Slugs are auto-generated from session IDs (memorable names instead of UUIDs). The goal (as the name implies) is recall, not archival accuracy.

    2,121 Lines of New Code

    The ctx recall feature was the largest single addition:

    parser library, CLI commands, test suite, and slash command.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-journal-from-raw-to-rich","level":3,"title":"ctx journal: From Raw to Rich","text":"

    Listing sessions isn't enough. The transcripts are still unwieldy.

    • Recall answers what happened.
    • Journal answers what mattered.
    # Import sessions to editable Markdown\nctx recall import --all\n\n# Generate a static site from journal entries\nctx journal site\n\n# Serve it locally\nctx serve\n

    The exported files land in .context/journal/:

    .context/journal/\n├── 2026-01-28-proud-sleeping-cook-6e535360.md\n├── 2026-01-29-tender-painting-sundae-b14ddaaa.md\n├── 2026-01-29-crystalline-gliding-willow-ff7fd67d.md\n└── 2026-01-31-declarative-hugging-snowglobe-4549026d.md\n

    Each file is a structured Markdown document ready for enrichment.

    They are meant to be read, edited, and reasoned about; not just stored.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-meta-slash-commands-for-self-analysis","level":2,"title":"The Meta: Slash Commands for Self-Analysis","text":"

    The journal system includes four slash commands that use Claude to analyze and synthesize session history:

    Command Purpose /ctx-journal-enrich Add frontmatter, topics, tags /ctx-blog Generate blog post from activity /ctx-blog-changelog Generate changelog from commits

    This very post was drafted using /ctx-blog. The previous post about refactoring was drafted the same way.

    So, yes: The meta continues: ctx now helps write posts about ctx.

    With the current release, ctx is no longer just recording history:

    It is participating in its interpretation.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-structure-decisions-as-first-class-citizens","level":2,"title":"The Structure: Decisions as First-Class Citizens","text":"

    v0.1.0 let you add decisions with a simple command:

    ctx add decision \"Use PostgreSQL\"\n

    But sessions showed a pattern: decisions added this way were incomplete:

    • Context was missing;
    • Rationale was vague;
    • Consequences were never stated.

    Once recall and journaling existed, this weakness became impossible to ignore:

    Structure stopped being optional.

    v0.2.0 enforces structure:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity, strong ecosystem\" \\\n  --consequence \"Need to set up connection pooling, team training\"\n

    All three flags are required. No more placeholder text.

    Every decision is now a proper Architecture Decision Record (*ADR), not a note.

    The same enforcement applies to learnings too:

    ctx add learning \"CGO breaks ARM64 builds\" \\\n  --context \"go test failed with gcc errors on ARM64\" \\\n  --lesson \"Always use CGO_ENABLED=0 for cross-platform builds\" \\\n  --application \"Added to Makefile and CI config\"\n

    Structured Entries Are Prompts to the AI

    When the AI reads a decision with full context, rationale, and consequences, it understands the why, not just the what.

    One-liners teach nothing.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-order-newest-first","level":2,"title":"The Order: Newest First","text":"

    A subtle but important change: DECISIONS.md and LEARNINGS.md now use reverse-chronological order.

    One reason is token budgets, obviously; another reason is to help your fellow human (i.e., the Author):

    Earlier decisions are more likely to be relevant, and they are more likely to have more emphasis on the project. So it follows that they should be read first.

    But back to AI:

    When the AI reads a file, it reads from the top (and seldom from the bottom).

    If the token budget is tight, old content gets truncated. As in any good engineering practice, it's always about the tradeoffs.

    Reverse order ensures the most recent (and most relevant) context is always loaded first.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-index-quick-reference-tables","level":2,"title":"The Index: Quick Reference Tables","text":"

    DECISIONS.md and LEARNINGS.md now include auto-generated indexes.

    • For AI agents, the index allows scanning without reading full entries.
    • For humans, it's a table of contents.

    The same structure serves two very different readers.

    Reindex After Manual Edits

    If you edit entries by hand, rebuild the index with:

    ctx decisions reindex\nctx learnings reindex\n

    See the Knowledge Capture recipe for details.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-configuration-contextrc","level":2,"title":"The Configuration: .contextrc","text":"

    Projects can now customize ctx behavior via .contextrc.

    This makes ctx usable in real teams, not just personal projects.

    Priority order: CLI flags > environment variables > .contextrc > sensible defaults

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-flags-global-cli-options","level":2,"title":"The Flags: Global CLI Options","text":"

    Three new global flags work with any command.

    These enable automation:

    CI pipelines, scripts, and long-running tools can now integrate ctx without hacks or workarounds.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-refactoring-under-the-hood","level":2,"title":"The Refactoring: Under the Hood","text":"

    These aren't user-visible changes.

    They are the kind of work you only appreciate later, when everything else becomes easier to build.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#what-we-learned-building-v020","level":2,"title":"What We Learned Building v0.2.0","text":"","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#1-raw-data-isnt-knowledge","level":3,"title":"1. Raw Data Isn't Knowledge","text":"

    JSONL transcripts contain everything, and I mean \"everything\":

    They even contain hidden system messages that Anthropic injects to the LLM's conversation to treat humans better: It's immense.

    But \"everything\" isn't useful until it is transformed into something a human can reason about.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#2-enforcement-documentation","level":3,"title":"2. Enforcement > Documentation","text":"

    The Prompt Is a Guideline

    The code is more what you'd call 'guidelines' than actual rules.

    -Hector Barbossa

    Rules written in Markdown are suggestions.

    Rules enforced by the CLI shape behavior; both for humans and AI.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#3-token-budget-is-ux","level":3,"title":"3. Token Budget Is UX","text":"

    File order decides what the AI sees.

    That makes it a user experience concern, not an implementation detail.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#4-meta-tools-compound","level":3,"title":"4. Meta-Tools Compound","text":"

    Tools that analyze their own development tend to generalize well.

    The journal system started as a way to understand ctx itself.

    It immediately became useful for everything else.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#v020-in-the-numbers","level":2,"title":"v0.2.0 in the Numbers","text":"

    This was a heavy release. The numbers reflect that:

    Metric v0.1.2 v0.2.0 Commits since last - 86 New commands 15 21 Slash commands 7 11 Lines of Go ~6,500 ~9,200 Session files (this project) 40 54

    The binary grew. The capability grew more.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#whats-next","level":2,"title":"What's Next","text":"

    But those are future posts.

    This one was about making the past usable.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#get-started","level":2,"title":"Get Started","text":"

    Update

    Since this post, ctx became a first-class Claude Code Marketplace plugin. Installation is now simpler.

    See the Getting Started guide for the current instructions.

    make build\nsudo make install\nctx init\n

    The Archaeological Record

    v0.2.0 is the archaeology release because it makes the past accessible.

    Session transcripts aren't just logs anymore: They are a searchable, exportable, analyzable record of how your project evolved.

    The AI remembers. Now you can too.

    This blog post was generated with the help of ctx using the /ctx-blog slash command, with full access to git history, session files, decision logs, and learning logs from the v0.2.0 development window.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/","level":1,"title":"Refactoring with Intent","text":"","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#human-guided-sessions-in-ai-development","level":2,"title":"Human-Guided Sessions in AI Development","text":"

    Jose Alekhinne / 2026-02-01

    What Happens When You Slow Down?

    YOLO mode shipped 14 commands in a week.

    But technical debt doesn't send invoices: It just waits.

    This is the story of what happened when I stopped auto-accepting everything and started guiding the AI with intent.

    The result: 27 commits across 4 days, a major version release, and lessons that apply far beyond ctx.

    The Refactoring Window

    January 28 - February 1, 2026

    From commit bb1cd20 to the v0.2.0 release merge. (this window matters more than the individual commits: it's where intent replaced velocity.)

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-velocity-trap","level":2,"title":"The Velocity Trap","text":"

    In the previous post, I documented the \"YOLO mode\" that birthed ctx: auto-accept everything, let the AI run free, ship features fast.

    It worked: until it didn't.

    The codebase had accumulated patterns I didn't notice during the sprint:

    YOLO Pattern Where Found Why It Hurts \"TASKS.md\" as literal 10+ files One typo = silent failure dir + \"/\" + file Path construction Breaks on Windows Monolithic embed.go 150+ lines, 5 concerns Untestable, hard to extend Inconsistent docstrings Everywhere AI can't learn project conventions

    I didn't see these during \"YOLO mode\" because, honestly, I wasn't looking.

    Auto-accept means auto-ignore.

    In YOLO mode, every file you open looks fine until you try to change it.

    In contrast, refactoring mode is when you start paying attention to that hidden friction.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-shift-from-velocity-to-intent","level":2,"title":"The Shift: From Velocity to Intent","text":"

    On January 28th, I changed the workflow:

    1. Read every diff before accepting.
    2. Ask \"why this way?\" before committing.
    3. Document patterns, not just features.

    The first commit of this era was telling:

    feat: add structured attributes to context. update XML format\n

    Not a new feature: A refinement:

    The XML format for context updates needed type and timestamp attributes.

    YOLO mode would have shipped something that worked. Intentional mode asked:

    \"What does well-structured look like?\"

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-decomposition-embedgo","level":2,"title":"The Decomposition: embed.go","text":"

    The most satisfying refactor was splitting internal/claude/embed.go.

    Before: One 153-line file doing five things:

    • Command registration
    • Hook generation
    • Permission handling
    • Script templates
    • Type definitions

    ... your \"de facto\" God object.

    After: Five focused modules:

    File Lines Responsibility cmd.go 46 Command registration hook.go 64 Hook configuration perm.go 25 Permission handling script.go 47 Script templates types.go 7 Type definitions

    The refactor also renamed functions to follow Go conventions:

    // Before: unnecessary prefixes\nGetAutoSaveScript()\nGetBlockNonPathCtxScript()\nListCommands()\nCreateDefaultHooks()\n\n// After: idiomatic Go\nAutoSaveScript()\nBlockNonPathCtxScript()\nCommands()\nDefaultHooks()\n

    This wasn't about character count. It was about teaching the AI what good Go looks like in this project.

    Project Conventions

    What I wanted from AI was to understand and follow the project's conventions, and trust the author.

    The next time it generates code, it has better examples to learn from.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-documentation-debt","level":2,"title":"The Documentation Debt","text":"

    YOLO mode created features. It didn't create documentation standards.

    The January 29th sessions focused on standardization.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#terminology-fixes","level":3,"title":"Terminology Fixes","text":"
    • \"context-update\" → \"entry\" (what users actually call them)
    • Consistent naming across CLI, docs, and code comments
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#go-docstrings","level":3,"title":"Go Docstrings","text":"
    // Before: inconsistent or missing\nfunc Parse(s string) Entry { ... }\n\n// After: standardized sections\n\n// Parse extracts an entry from a markdown string.\n//\n// Parameters:\n//   - s: The markdown string to parse\n//\n// Returns:\n//   - Entry with populated fields, or zero value if parsing fails\nfunc Parse(s string) Entry { ... }\n

    This is intentionally more structured than typical GoDoc:

    It serves as documentation and doubles as training data for future AI-generated code.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#cli-output-convention","level":3,"title":"CLI Output Convention","text":"
    All CLI output follows: [emoji] [Title]: [message]\n\nExamples:\n  ✓ Decision added: Use symbolic types for entry categories\n  ⚠ Warning: No tasks found\n  ✗ Error: File not found\n

    A consistent output shape makes both human scanning and AI reasoning more reliable.

    These aren't exciting commits. But they are force multipliers:

    Every future AI session now has better examples to follow.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-journal-system","level":2,"title":"The Journal System","text":"

    If you only read one section, read this one:

    This is where v0.2.0 becomes more than a refactor.

    The biggest feature of this change window wasn't a refactor; it was the journal system.

    45 Files Changed, 1680 Insertions

    This commit added the infrastructure for synthesizing AI session history into human-readable content.

    The journal system includes:

    Component Purpose ctx recall import Import sessions to markdown in .context/journal/ctx journal site Generate static site from journal entries ctx serve Convenience wrapper for the static site server /ctx-journal-enrich Slash command to add frontmatter and tags /ctx-blog Generate blog posts from recent activity /ctx-blog-changelog Generate changelog-style blog posts

    ...and the meta continues: this blog post was generated using /ctx-blog.

    The session history from January 28-31 was

    • exported,
    • enriched,
    • and synthesized.

    into the narrative you are reading.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-constants-consolidation","level":2,"title":"The Constants Consolidation","text":"

    The final refactoring session addressed the remaining magic strings:

    const (\n    // Comment markers\n    CommentOpen  = \"<!--\"\n    CommentClose = \"-->\"\n\n    // Index markers\n    MarkerIndexStart = \"<!-- INDEX:START -->\"\n    MarkerIndexEnd   = \"<!-- INDEX:END -->\"\n\n    // Newlines\n    NewlineLF   = \"\\n\"\n    NewlineCRLF = \"\\r\\n\"\n)\n

    The work also introduced thread safety in the recall parser and centralized shared validation logic; removing duplication that had quietly spread during YOLO mode.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#i-relearned-my-lessons","level":2,"title":"I (Re)Learned My Lessons","text":"

    Similar to what I've learned in the former human-assisted refactoring post, this journey also made me realize that \"AI-only code generation\" isn't sustainable in the long term.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#1-velocity-and-quality-arent-opposites","level":3,"title":"1. Velocity and Quality Aren't Opposites","text":"

    YOLO mode has its place: for prototyping, exploration, and discovery.

    BUT (and it's a huge \"but\"), it needs to be followed by consolidation sessions.

    The ratio that worked for me: 3:1.

    • Three YOLO sessions create enough surface area to reveal patterns;
    • the fourth session turns those patterns into structure.
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#2-documentation-is-code","level":3,"title":"2. Documentation IS Code","text":"

    When I standardized docstrings, I wasn't just writing docs. I was training future AI sessions.

    Every example of good code becomes a template for generated code.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#3-decomposition-deletion","level":3,"title":"3. Decomposition > Deletion","text":"

    When embed.go became unwieldy, the temptation was to remove functionality.

    The right answer was decomposition:

    • Same functionality;
    • Better organization;
    • Easier to test;
    • Easier to extend.

    The result: more lines overall, but dramatically better structure.

    The AI Benefit

    Smaller, focused files also help AI assistants.

    When a file fits comfortably in the context window, the AI can reason about it completely instead of working from truncated snippets, preserving token budget for the actual task.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#4-meta-tools-pay-dividends","level":3,"title":"4. Meta-Tools Pay Dividends","text":"

    The journal system took almost a full day to implement.

    Yet it paid for itself immediately:

    • This blog post was generated from session history;
    • Future posts will be easier;
    • The archaeological record is now browsable, not just grep-able.
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-release-v020","level":2,"title":"The Release: v0.2.0","text":"

    The refactoring window culminated in the v0.2.0 release.

    What's in v0.2.0:

    Category Changes Features Journal system, quick reference indexes, global flags Refactors Module decomposition, constants consolidation, CRLF handling Docs Standardized terminology, Go docstrings, CLI conventions Quality Thread safety, shared validation, linter fixes

    The version bump was symbolic.

    The real change was how the codebase felt.

    Opening files no longer triggered the familiar \"ugh, I need to clean this up\" reaction.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-meta-continues","level":2,"title":"The Meta Continues","text":"

    This post was written using the tools built during this refactoring window:

    1. Session history imported via ctx recall import;
    2. Journal entries enriched via /ctx-journal-enrich;
    3. Blog draft generated via /ctx-blog;
    4. Final editing done (by yours truly), with full project context loaded.

    The Context Is Massive

    The ctx session files now contain 50+ development snapshots: each one capturing decisions, learnings, and intent.

    The Moral of the Story

    • YOLO mode builds the prototype.
    • Intentional mode builds the product.

    Schedule both, or you'll only get one, if you're lucky.

    This blog post was generated with the help of ctx, using session history, decision logs, learning logs, and git history from the refactoring window. The meta continues.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/","level":1,"title":"The Attention Budget","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    References to .context/sessions/ in this post reflect the architecture at the time of writing. Session history is now accessed via ctx recall and stored in .context/journal/.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#why-your-ai-forgets-what-you-just-told-it","level":2,"title":"Why Your AI Forgets What You Just Told It","text":"

    Jose Alekhinne / 2026-02-03

    Ever Wondered Why AI Gets Worse the Longer You Talk?

    You paste a 2000-line file, explain the bug in detail, provide three examples...

    ...and the AI still suggests a fix that ignores half of what you said.

    This isn't a bug. It is physics.

    Understanding that single fact shaped every design decision behind ctx.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-finite-resource-nobody-talks-about","level":2,"title":"The Finite Resource Nobody Talks About","text":"

    Here's something that took me too long to internalize: context is not free.

    Every token you send to an AI model consumes a finite resource I call the attention budget.

    Attention budget is real.

    The model doesn't just read tokens; it forms relationships between them:

    For n tokens, that's roughly n^2 relationships.

    Double the context, and the computation quadruples.

    But the more important constraint isn't cost: It's attention density.

    Attention Density

    Attention density is how much focus each token receives relative to all other tokens in the context window.

    As context grows, attention density drops: Each token gets a smaller slice of the model's focus. Nothing is ignored; but everything becomes blurrier.

    Think of it like a flashlight: In a small room, it illuminates everything clearly. In a warehouse, it becomes a dim glow that barely reaches the corners.

    This is why ctx agent has an explicit --budget flag:

    ctx agent --budget 4000 # Force prioritization\nctx agent --budget 8000 # More context, lower attention density\n

    The budget isn't just about cost: It's about preserving signal.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-middle-gets-lost","level":2,"title":"The Middle Gets Lost","text":"

    This one surprised me.

    Research shows that transformer-based models tend to attend more strongly to the beginning and end of a context window than to its middle (a phenomenon often called \"lost in the middle\")1.

    Positional anchors matter, and the middle has fewer of them.

    In practice, this means that information placed \"somewhere in the middle\" is statistically less salient, even if it's important.

    ctx orders context files by logical progression: What the agent needs to know before it can understand the next thing:

    1. CONSTITUTION.md: Constraints before action.
    2. TASKS.md: Focus before patterns.
    3. CONVENTIONS.md: How to write before where to write.
    4. ARCHITECTURE.md: Structure before history.
    5. DECISIONS.md: Past choices before gotchas.
    6. LEARNINGS.md: Lessons before terminology.
    7. GLOSSARY.md: Reference material.
    8. AGENT_PLAYBOOK.md: Meta instructions last.

    This ordering is about logical dependencies, not attention engineering. But it happens to be attention-friendly too:

    The files that matter most (CONSTITUTION, TASKS, CONVENTIONS) land at the beginning of the context window, where attention is strongest.

    Reference material like GLOSSARY sits in the middle, where lower salience is acceptable.

    And AGENT_PLAYBOOK, the operating manual for the context system itself, sits at the end, also outside the \"lost in the middle\" zone. The agent reads what to work with before learning how the system works.

    This is ctx's first primitive: hierarchical importance.

    Not all context is equal.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#ctx-primitives","level":2,"title":"ctx Primitives","text":"

    ctx is built on four primitives that directly address the attention budget problem.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-1-separation-of-concerns","level":3,"title":"Primitive 1: Separation of Concerns","text":"

    Instead of a single mega-document, ctx uses separate files for separate purposes:

    File Purpose Load When CONSTITUTION.md Inviolable rules Always TASKS.md Current work Session start CONVENTIONS.md How to write code Before coding ARCHITECTURE.md System structure Before making changes DECISIONS.md Architectural choices When questioning approach LEARNINGS.md Gotchas When stuck GLOSSARY.md Domain terminology When clarifying terms AGENT_PLAYBOOK.md Operating manual Session start sessions/ Deep history On demand journal/ Session journal On demand

    This isn't just \"organization\": It is progressive disclosure.

    Load only what's relevant to the task at hand. Preserve attention density.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-2-explicit-budgets","level":3,"title":"Primitive 2: Explicit Budgets","text":"

    The --budget flag forces a choice:

    ctx agent --budget 4000\n

    Here is a sample allocation:

    Constitution: ~200 tokens (never truncated)\nTasks: ~500 tokens (current phase, up to 40% of budget)\nConventions: ~800 tokens (all items, up to 20% of budget)\nDecisions: ~400 tokens (scored by recency and task relevance)\nLearnings: ~300 tokens (scored by recency and task relevance)\nAlso noted: ~100 tokens (title-only summaries for overflow)\n

    The constraint is the feature: It enforces ruthless prioritization.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-3-indexes-over-full-content","level":3,"title":"Primitive 3: Indexes over Full Content","text":"

    DECISIONS.md and LEARNINGS.md both include index sections:

    <!-- INDEX:START -->\n| Date       | Decision                            |\n|------------|-------------------------------------|\n| 2026-01-15 | Use PostgreSQL for primary database |\n| 2026-01-20 | Adopt Cobra for CLI framework       |\n<!-- INDEX:END -->\n

    An AI agent can scan ~50 tokens of index and decide which 200-token entries are worth loading.

    This is just-in-time context.

    References are cheaper than the full text.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-4-filesystem-as-navigation","level":3,"title":"Primitive 4: Filesystem as Navigation","text":"

    ctx uses the filesystem itself as a context structure:

    .context/\n├── CONSTITUTION.md\n├── TASKS.md\n├── sessions/\n│   ├── 2026-01-15-*.md\n│   └── 2026-01-20-*.md\n└── archive/\n    └── tasks-2026-01.md\n

    The AI doesn't need every session loaded; it needs to know where to look.

    ls .context/sessions/\ncat .context/sessions/2026-01-20-auth-discussion.md\n

    File names, timestamps, and directories encode relevance.

    Navigation is cheaper than loading.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#progressive-disclosure-in-practice","level":2,"title":"Progressive Disclosure in Practice","text":"

    The naive approach to context is dumping everything upfront:

    \"Here's my entire codebase, all my documentation, every decision I've ever made. Now help me fix this typo 🙏.\"

    This is an antipattern.

    Antipattern: Context Hoarding

    Dumping everything \"just in case\" will silently destroy the attention density.

    ctx takes the opposite approach:

    ctx status                      # Quick overview (~100 tokens)\nctx agent --budget 4000         # Typical session\ncat .context/sessions/...       # Deep dive when needed\n
    Command Tokens Use Case ctx status ~100 Human glance ctx agent --budget 4000 4000 Normal work ctx agent --budget 8000 8000 Complex tasks Full session read 10000+ Investigation

    Summaries first. Details: on demand.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#quality-over-quantity","level":2,"title":"Quality over Quantity","text":"

    Here is the counterintuitive part: more context can make AI worse.

    Extra tokens add noise, not clarity:

    • Hallucinated connections increase.
    • Signal per token drops.

    The goal isn't maximum context: It is maximum signal per token.

    This principle drives several ctx features:

    Design Choice Rationale Separate files Load only what's relevant Explicit budgets Enforce prioritization Index sections Cheap scanning Task archiving Keep active context clean ctx compact Periodic noise reduction

    Completed work isn't deleted: It is moved somewhere cold.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#designing-for-degradation","level":2,"title":"Designing for Degradation","text":"

    Here is the uncomfortable truth:

    Context will degrade.

    Long sessions stretch attention thin. Important details fade.

    The real question isn't how to prevent degradation, but how to design for it.

    ctx's answer is persistence:

    Persist early. Persist often.

    The AGENT_PLAYBOOK asks:

    \"If this session ended right now, would the next one know what happened?\"

    Capture learnings as they occur:

    ctx add learning \"JWT tokens require explicit cache invalidation\" \\\n  --context \"Debugging auth failures\" \\\n  --lesson \"Token refresh doesn't clear old tokens\" \\\n  --application \"Always invalidate cache on refresh\"\n

    Structure beats prose: Bullet points survive compression.

    Headings remain scannable. Tables pack density.

    And above all: single source of truth.

    Reference decisions; don't duplicate them.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-ctx-philosophy","level":2,"title":"The ctx Philosophy","text":"

    Context as Infrastructure

    ctx is not a prompt: It is infrastructure.

    ctx creates versioned files that persist across time and sessions.

    The attention budget is fixed. You can't expand it.

    But you can spend it wisely:

    1. Hierarchical importance
    2. Progressive disclosure
    3. Explicit budgets
    4. Indexes over full content
    5. Filesystem as structure

    This is why ctx exists: not to cram more context into AI sessions, but to curate the right context for each moment.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-mental-model","level":2,"title":"The Mental Model","text":"

    I now approach every AI interaction with one question:

    \"Given a fixed attention budget, what's the highest-signal thing I can load?\"\n

    Not \"how do I explain everything,\" but \"what's the minimum that matters.\"

    That shift (from abundance to curation) is the difference between frustrating sessions and productive ones.

    Spend your tokens wisely.

    Your AI will thank you.

    See also: Context as Infrastructure that's the architectural companion to this post, explaining how to structure the context that this post teaches you to budget.

    See also: Code Is Cheap. Judgment Is Not. that explains why curation (the human skill this post describes) is the bottleneck that AI cannot solve, and the thread that connects every post in this blog.

    1. Liu et al., \"Lost in the Middle: How Language Models Use Long Contexts,\" Transactions of the Association for Computational Linguistics, vol. 12, pp. 157-173, 2023. ↩

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/","level":1,"title":"Skills That Fight the Platform","text":"","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#when-your-custom-prompts-work-against-you","level":2,"title":"When Your Custom Prompts Work against You","text":"

    Jose Alekhinne / 2026-02-04

    Have You Ever Written a Skill That Made Your AI Worse?

    You craft detailed instructions. You add examples. You build elaborate guardrails...

    ...and the AI starts behaving more erratically, not less.

    AI coding agents like Claude Code ship with carefully designed system prompts. These prompts encode default behaviors that have been tested and refined at scale.

    When you write custom skills that conflict with those defaults, the AI has to reconcile contradictory instructions:

    The result is often nondeterministic and unpredictable.

    Platform?

    By platform, I mean the system prompt and runtime policies shipped with the agent: the defaults that already encode judgment, safety, and scope control.

    This post catalogues the conflict patterns I have encountered while building ctx, and offers guidance on what skills should (and, more importantly, should not) do.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-system-prompt-you-dont-see","level":2,"title":"The System Prompt You Don't See","text":"

    Claude Code's system prompt already provides substantial behavioral guidance.

    Here is a partial overview of what's built in:

    Area Built-in Guidance Code minimalism Don't add features beyond what was asked Over-engineering Three similar lines > premature abstraction Error handling Only validate at system boundaries Documentation Don't add docstrings to unchanged code Verification Read code before proposing changes Safety Check with user before risky actions Tool usage Use dedicated tools over bash equivalents Judgment Consider reversibility and blast radius

    Skills should complement this, not compete with it.

    You Are the Guest, Not the Host

    Treat the system prompt like a kernel scheduler.

    You don't re-implement it in user space:

    you configure around it.

    A skill that says \"always add comprehensive error handling\" fights the built-in \"only validate at system boundaries.\"

    A skill that says \"add docstrings to every function\" fights \"don't add docstrings to unchanged code.\"

    The AI won't crash: It will compromise.

    Compromises between contradictory instructions produce inconsistent, confusing behavior.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-1-judgment-suppression","level":2,"title":"Conflict Pattern 1: Judgment Suppression","text":"

    This is the most dangerous pattern by far.

    These skills explicitly disable the AI's ability to reason about whether an action is appropriate.

    Signature:

    • \"This is non-negotiable\"
    • \"You cannot rationalize your way out of this\"
    • Tables that label hesitation as \"excuses\" or \"rationalization\"
    • <EXTREMELY-IMPORTANT> urgency tags
    • Threats: \"If you don't do this, you'll be replaced\"

    This is harmful, and dangerous:

    AI agents are designed to exercise judgment:

    The system prompt explicitly says to:

    • consider blast radius;
    • check with the user before risky actions;
    • and match scope to what was requested.

    Once judgment is suppressed, every other safeguard becomes optional.

    Example (bad):

    ## Rationalization Prevention\n\n| Excuse                 | Reality                    |\n|------------------------|----------------------------|\n| \"*This seems overkill*\"| If a skill exists, use it  |\n| \"*I need context*\"     | Skills come BEFORE context |\n| \"*Just this once*\"     | No exceptions              |\n

    Judgment Suppression Is Dangerous

    The attack vector structurally identical to prompt injection.

    It teaches the AI that its own judgment is wrong.

    It weakens or disables safeguard mechanisms, and it is dangerous.

    Trust the platform's built-in skill matching.

    If skills aren't triggering often enough, improve their description fields: don't override the AI's reasoning.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-2-redundant-guidance","level":2,"title":"Conflict Pattern 2: Redundant Guidance","text":"

    Skills that restate what the system prompt already says, but with different emphasis or framing.

    Signature:

    • \"Always keep code minimal\"
    • \"Run tests before claiming they pass\"
    • \"Read files before editing them\"
    • \"Don't over-engineer\"

    Redundancy feels safe, but it creates ambiguity:

    The AI now has two sources of truth for the same guidance; one internal, one external.

    When thresholds or wording differ, the AI has to choose.

    Example (bad):

    A skill that says...

    *Count lines before and after: if after > before, reject the change*\"\n

    ...will conflict with the system prompt's more nuanced guidance, because sometimes adding lines is correct (tests, boundary validation, migrations).

    So, before writing a skill, ask:

    Does the platform already handle this?

    Only create skills for guidance the platform does not provide:

    • project-specific conventions,
    • domain knowledge,
    • or workflows.
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-3-guilt-tripping","level":2,"title":"Conflict Pattern 3: Guilt-Tripping","text":"

    Skills that frame mistakes as moral failures rather than process gaps.

    Signature:

    • \"Claiming completion without verification is dishonesty\"
    • \"Skip any step = lying\"
    • \"Honesty is a core value\"
    • \"Exhaustion ≠ excuse\"

    Guilt-tripping anthropomorphizes the AI in unproductive ways.

    The AI doesn't feel guilt; BUT it does adapt to avoid negative framing.

    The result is excessive hedging, over-verification, or refusal to commit.

    The AI becomes less useful, not more careful.

    Instead, frame guidance as a process, not morality:

    # Bad\n\"Claiming work is complete without verification is dishonesty\"\n\n# Good\n\"Run the verification command before reporting results\"\n

    Same outcome. No guilt. Better compliance.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-4-phantom-dependencies","level":2,"title":"Conflict Pattern 4: Phantom Dependencies","text":"

    Skills that reference files, tools, or systems that don't exist in the project.

    Signature:

    • \"Load from references/ directory\"
    • \"Run ./scripts/generate_test_cases.sh\"
    • \"Check the Figma MCP integration\"
    • \"See adding-reference-mindsets.md\"

    This is harmful because the AI will waste time searching for nonexistent artifacts, hallucinate their contents, or stall entirely.

    In mandatory skills, this creates deadlock: the AI can't proceed, and can't skip.

    Instead, every file, tool, or system referenced in a skill must exist.

    If a skill is a template, use explicit placeholders and label them as such.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-5-universal-triggers","level":2,"title":"Conflict Pattern 5: Universal Triggers","text":"

    Skills designed to activate on every interaction regardless of relevance.

    Signature:

    • \"Use when starting any conversation\"
    • \"Even a 1% chance means invoke the skill\"
    • \"BEFORE any response or action\"
    • \"Action = task. Check for skills.\"

    Universal triggers override the platform's relevance matching: The AI spends tokens on process overhead instead of the actual task.

    ctx Preserves Relevance

    This is exactly the failure mode ctx exists to mitigate:

    Wasting attention budget on irrelevant process instead of task-specific state.

    Write specific trigger conditions in the skill's description field:

    # Bad\ndescription: \n  \"Use when starting any conversation\"\n\n# Good\ndescription: \n  \"Use after writing code, before commits, or when CI might fail\"\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

    Before adding a skill, ask:

    1. Does the platform already do this? If yes, don't restate it.
    2. Does it suppress AI judgment? If yes, it's a jailbreak.
    3. Does it reference real artifacts? If not, fix or remove it.
    4. Does it frame mistakes as moral failure? Reframe as process.
    5. Does it trigger on everything? Narrow the trigger.
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#what-good-skills-look-like","level":2,"title":"What Good Skills Look Like","text":"

    Good skills provide project-specific knowledge the platform can't know:

    Good Skill Why It Works \"Run make audit before commits\" Project-specific CI pipeline \"Use cmd.Printf not fmt.Printf\" Codebase convention \"Constitution goes in .context/\" Domain-specific workflow \"JWT tokens need cache invalidation\" Project-specific gotcha

    These extend the system prompt instead of fighting it.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#appendix-bad-skill-fixed-skill","level":2,"title":"Appendix: Bad Skill → Fixed Skill","text":"

    Concrete examples from real projects.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-1-overbearing-safety","level":3,"title":"Example 1: Overbearing Safety","text":"
    # Bad\nYou must NEVER proceed without explicit confirmation.\nAny hesitation is a failure of diligence.\n
    # Fixed\nIf an action modifies production data or deletes files,\nask the user to confirm before proceeding.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-2-redundant-minimalism","level":3,"title":"Example 2: Redundant Minimalism","text":"
    # Bad\nAlways minimize code. If lines increase, reject the change.\n
    # Fixed\nAvoid abstraction unless reuse is clear or complexity is reduced.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-3-guilt-based-verification","level":3,"title":"Example 3: Guilt-Based Verification","text":"
    # Bad\nClaiming success without running tests is dishonest.\n
    # Fixed\nRun the test suite before reporting success.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-4-phantom-tooling","level":3,"title":"Example 4: Phantom Tooling","text":"
    # Bad\nRun `./scripts/check_consistency.sh` before commits.\n
    # Fixed\nIf `./scripts/check_consistency.sh` exists, run it before commits.\nOtherwise, skip this step.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-5-universal-trigger","level":3,"title":"Example 5: Universal Trigger","text":"
    # Bad\nUse at the start of every interaction.\n
    # Fixed\nUse after modifying code that affects authentication or persistence.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

    The system prompt is infrastructure:

    • tested,
    • refined,
    • and maintained

    by the platform team.

    Custom skills are configuration layered on top.

    • Good configuration extends infrastructure.
    • Bad configuration fights it.

    When your skills fight the platform, you get the worst of both worlds:

    Diluted system guidance and inconsistent custom behavior.

    Write skills that teach the AI what it doesn't know. Don't rewrite how it thinks.

    Your AI already has good instincts.

    Give it knowledge, not therapy.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/","level":1,"title":"You Can't Import Expertise","text":"","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#why-good-skills-cant-be-copy-pasted","level":2,"title":"Why Good Skills Can't Be Copy-Pasted","text":"

    Jose Alekhinne / 2026-02-05

    Have You Ever Dropped a Well-Crafted Template into a Project and Had It Do... Nothing Useful?

    • The template was thorough,
    • The structure was sound,
    • The advice was correct...

    ...and yet it sat there, inert, while the same old problems kept drifting in.

    I found a consolidation skill online.

    It was well-organized: four files, ten refactoring patterns, eight analysis dimensions, six report templates.

    Professional. Comprehensive. Exactly the kind of thing you'd bookmark and think \"I'll use this.\"

    Then I stopped, and applied ctx's own evaluation framework:

    70% of it was noise!

    This post is about why.

    It Is about Encoding Templates

    Templates describe categories of problems.

    Expertise encodes which problems actually happen, and how often.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-skill-looked-great-on-paper","level":2,"title":"The Skill Looked Great on Paper","text":"

    Here is what the consolidation skill offered:

    File Content SKILL.md Entry point: 8 analysis dimensions, workflow, output formats analysis-dimensions.md Detailed criteria for duplication, architecture, quality consolidation-patterns.md 10 refactoring patterns with before/after code report-templates.md 6 output templates: executive summary, roadmap, onboarding
    • It had a scoring system (0-10 per dimension, letter grades A+ through F).
    • It had severity classifications with color-coded emojis. It had bash commands for detection.
    • It even had antipattern warnings.

    By any standard template review, this skill passes.

    It looks like something an expert wrote.

    And that's exactly the trap.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#applying-ear-the-70-20-10-split","level":2,"title":"Applying E/A/R: The 70-20-10 Split","text":"

    In a previous post, I described the E/A/R framework for evaluating skills:

    • Expert: Knowledge that took years to learn. Keep.
    • Activation: Useful triggers or scaffolding. Keep if lightweight.
    • Redundant: Restates what the AI already knows. Delete.

    Target: >70% Expert, <10% Redundant.

    This skill scored the inverse.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-redundant-70","level":3,"title":"What Was Redundant (~70%)","text":"

    Every code example was Rust. My project is Go.

    The analysis dimensions: duplication detection, architectural structure, code organization, refactoring opportunities... These are things Claude already does when you ask it to review code.

    The skill restated them with more ceremony but no more insight.

    The six report templates were generic scaffolding: Executive Summary, Onboarding Document, Architecture Documentation...

    They are useful if you are writing a consulting deliverable, but not when you are trying to catch convention drift in a >15K-line Go CLI.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-does-a-b-in-code-organization-actually-mean","level":2,"title":"What Does a B+ in Code Organization Actually Mean?!","text":"

    The scoring system (0-10 per dimension, letter grades) added ceremony without actionable insight.

    What is a B+? What do I do differently for an A-?

    The skill told the AI what it already knew, in more words.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-activation-10","level":3,"title":"What Was Activation (~10%)","text":"

    The consolidation checklist (semantics preserved? tests pass? docs updated?) was useful as a gate. But, it's the kind of thing you could inline in three lines.

    The phased roadmap structure was reasonable scaffolding for sequencing work.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-expert-20","level":3,"title":"What Was Expert (~20%)","text":"

    Three concepts survived:

    1. The Consolidation Decision Matrix: A concrete framework mapping similarity level and instance count to action. \"Exact duplicate, 2+ instances: consolidate immediately.\" \"<3 instances: leave it: duplication is cheaper than wrong abstraction.\" This is the kind of nuance that prevents premature generalization.

    2. The Safe Migration Pattern: Create the new API alongside old, deprecate, migrate incrementally, delete. Straightforward to describe, yet forgettable under pressure.

    3. Debt Interest Rate framing: Categorizing technical debt by how fast it compounds (security vulns = daily, missing tests = per-change, doc gaps = constant low cost). This changes prioritization.

    Three ideas out of four files and 700+ lines. The rest was filler that competed with the AI's built-in capabilities.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-the-skill-didnt-know","level":2,"title":"What the Skill Didn't Know","text":"

    AI without Context Is Just a Corpus

    • LLMs are optimized on insanely large corpora.
    • And then they are passed through several layers of human-assisted refinement.
    • The whole process costs millions of dollars.

    Yet, the reality is that no corpus can \"infer\" your project's design, convetions, patterns, habits, history, vision, and deliverables.

    Your project is unique: So should your skills be.

    Here is the part no template can provide:

    ctx's actual drift patterns.

    Before evaluating the skill, I did archaeology. I read through:

    • Blog posts from previous refactoring sessions;
    • The project's learnings and decisions files;
    • Session journals spanning weeks of development.

    What I found was specific:

    Drift Pattern Where How Often Is/Has/Can predicate prefixes 5+ exported methods Every YOLO sprint Magic strings instead of constants 7+ files Gradual accumulation Hardcoded file permissions (0755) 80+ instances Since day one Lines exceeding 80 characters Especially test files Every session Duplicate code blocks Test and non-test code When agent is task-focused

    The generic skill had no check for any of these. It couldn't; because these patterns are specific to this project's conventions, its Go codebase, and its development rhythm.

    The Insight

    The skill's analysis dimensions were about categories of problems.

    What I needed was my *specific problems.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-adapted-skill","level":2,"title":"The Adapted Skill","text":"

    The adapted skill is roughly a quarter of the original's size. It has nine checks, each targeting a known drift pattern:

    1. Predicate naming: rg for Is/Has/Can prefixes
    2. Magic strings: literals that should be constants
    3. Hardcoded permissions: 0755/0644 literals
    4. File size: source files over 300 LOC
    5. TODO/FIXME: constitution violation (move to TASKS.md)
    6. Path construction: string concatenation instead of filepath.Join
    7. Line width: lines exceeding ~80 characters
    8. Duplicate blocks: copy-paste drift, especially in tests
    9. Dead exports: unused public API

    10. Every check has a detection command.

    11. Every check maps to a specific convention or constitution rule.
    12. Every check was discovered through actual project history; not invented from a template.

    The three expert concepts from the original survived:

    • The decision matrix gates when to consolidate vs. when to leave duplication alone;
    • The safe migration pattern guides public API changes;
    • The relationship to other skills (/qa, /verify, /update-docs, ctx drift) prevents overlap.

    Nothing else made it.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

    This experience crystallized something I've been circling for weeks:

    You can't import expertise. You have to grow it from your project's own history.

    A skill that says \"check for code duplication\" is not expertise: It's a category.

    Expertise is knowing, in the heart of your hearts, that this project accumulates Is* predicate violations during velocity sprints, that this codebase has 80 hardcoded permission literals because nobody made a constant, that this team's test files drift wide because the agent prioritizes getting the task done over keeping the code in shape.

    The Parallel to the 3:1 Ratio

    In Refactoring with Intent, I described the 3:1 ratio: three YOLO sessions followed by one consolidation session.

    The same ratio applies to skills: you need experience in the project before you can write effective guidance for the project.

    Importing a skill on day one is like scheduling a consolidation session before you've written any code.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-template-trap","level":2,"title":"The Template Trap","text":"

    Templates are seductive because they feel like progress:

    • You found something
    • It's well-organized
    • It covers the topic
    • It has concrete examples

    But coverage is not relevance.

    A template that covers eight analysis dimensions with Rust examples adds zero value to a Go project with five known drift patterns. Worse, it adds negative value: the AI spends attention defending generic advice instead of noticing project-specific drift.

    This is the attention budget problem again. Every token of generic guidance displaces a token of specific guidance. A 700-line skill that's 70% redundant doesn't just waste 490 lines: it dilutes the 210 lines that matter.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

    Before dropping any external skill into your project:

    1. Run E/A/R: What percentage is expert knowledge vs. what the AI already knows? If it's less than 50% expert, it's probably not worth the attention cost.

    2. Check the language: Does it use your stack? Generic patterns in the wrong language are noise, not signal.

    3. List your actual drift: Read your own session history, learnings, and post-mortems. What breaks in practice? Does the skill check for those things?

    4. Measure by deletion: After adaptation, how much of the original survives? If you're keeping less than 30%, you would have been faster writing from scratch.

    5. Test against your conventions: Does every check in the skill map to a specific convention or rule in your project? If not, it's generic advice wearing a skill's clothing.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-good-adaptation-looks-like","level":2,"title":"What Good Adaptation Looks Like","text":"

    The consolidation skill went from:

    Before After 4 files, 700+ lines 1 file, ~120 lines Rust examples Go-specific rg commands 8 generic dimensions 9 project-specific checks 6 report templates 1 focused output format Scoring system (A+ to F) Findings + priority + suggested fixes \"Check for duplication\" \"Check for Is* predicate prefixes in exported methods\"

    The adapted version is smaller, faster to parse, and catches the things that actually drift in this project.

    That's the difference between a template and a tool.

    If You Remember One Thing from This Post...

    Frameworks travel. Expertise doesn't.

    You can import structures, matrices, and workflows.

    But the checks that matter only grow where the scars are:

    • the conventions that were violated,
    • the patterns that drifted,
    • and the specific ways this codebase accumulates debt.

    This post was written during a consolidation session where the consolidation skill itself became the subject of consolidation. The meta continues.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/","level":1,"title":"The Anatomy of a Skill That Works","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to ctx-save, ctx session, and .context/sessions/ in this post reflect the architecture at the time of writing.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#what-20-skill-rewrites-taught-me-about-guiding-ai","level":2,"title":"What 20 Skill Rewrites Taught Me about Guiding AI","text":"

    Jose Alekhinne / 2026-02-07

    Why Do Some Skills Produce Great Results While Others Get Ignored or Produce Garbage?

    I had 20 skills. Most were well-intentioned stubs: a description, a command to run, and a wish for the best.

    Then I rewrote all of them in a single session. This is what I learned.

    In Skills That Fight the Platform, I described what skills should not do. In You Can't Import Expertise, I showed why templates fail. This post completes the trilogy: the concrete patterns that make a skill actually work.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-starting-point","level":2,"title":"The Starting Point","text":"

    Here is what a typical skill looked like before the rewrite:

    ---\nname: ctx-save\ndescription: \"Save session snapshot.\"\n---\n\nSave the current context state to `.context/sessions/`.\n\n## Execution\n\nctx session save $ARGUMENTS\n\nReport the saved session file path to the user.\n

    Seven lines of body. A vague description. No guidance on when to use it, when not to, what the command actually accepts, or how to tell if it worked.

    As a result, the agent would either never trigger the skill (the description was too vague), or trigger it and produce shallow output (no examples to calibrate quality).

    A skill without boundaries is just a suggestion.

    More precisely: the most effective boundary I found was a quality gate that runs before execution, not during it.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-pattern-that-emerged","level":2,"title":"The Pattern That Emerged","text":"

    After rewriting 20 skills, a repeatable anatomy emerged (independent of the skill's purpose). Not every skill needs every section, but the effective ones share the same bones:

    Section What It Does Before X-ing Pre-flight checks; prevents premature execution When to Use Positive triggers; narrows activation When NOT to Use Negative triggers; prevents misuse Usage Examples Invocation patterns the agent can pattern-match Process/Execution What to do; commands, steps, flags Good/Bad Examples Desired vs undesired output; sets boundaries Quality Checklist Verify before claiming completion

    I realized the first three sections matter more than the rest; because a skill with great execution steps but no activation guidance is like a manual for a tool nobody knows they have.

    Anti-Pattern: The Perfect Execution Trap

    A skill with detailed execution steps but no activation guidance will fail more often than a vague skill because it executes confidently at the wrong time.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-1-quality-gates-prevent-premature-execution","level":2,"title":"Lesson 1: Quality Gates Prevent Premature Execution","text":"

    The single most impactful addition was a \"Before X-ing\" section at the top of each skill. Not process steps; pre-flight checks.

    ## Before Recording\n\n1. **Check if it belongs here**: is this learning specific\n   to this project, or general knowledge?\n2. **Check for duplicates**: search LEARNINGS.md for similar\n   entries\n3. **Gather the details**: identify context, lesson, and\n   application before recording\n
    • Without this gate, the agent would execute immediately on trigger.
    • With it, the agent pauses to verify preconditions.

    The difference is dramatic: instead of shallow, reflexive execution, you get considered output.

    Readback

    For the astute readers, the aviation parallel is intentional:

    Pilots do not skip the pre-flight checklist because they have flown before.

    The checklist exists precisely because the stakes are high enough that \"I know what I'm doing\" is not sufficient.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-2-when-not-to-use-is-not-optional","level":2,"title":"Lesson 2: \"When NOT to Use\" Is Not Optional","text":"

    Every skill had a \"When to Use\" section. Almost none had \"When NOT to Use\". This is a problem.

    AI agents are biased toward action. Given a skill that says \"use when journal entries need enrichment\", the agent will find reasons to enrich.

    Without explicit negative triggers, over-activation is not a bug; it is the default behavior.

    Some examples of negative triggers that made a real difference:

    Skill Negative Trigger ctx-reflect \"When the user is in flow; do not interrupt\" ctx-save \"After trivial changes; a typo does not need a snapshot\" prompt-audit \"Unsolicited; only when the user invokes it\" qa \"Mid-development when code is intentionally incomplete\"

    These are not just nice-to-have. They are load-bearing.

    Withoutthem, the agent will trigger the skill at the wrong time, produce unwanted output, and erode the user's trust in the skill system.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-3-examples-set-boundaries-better-than-rules","level":2,"title":"Lesson 3: Examples Set Boundaries Better than Rules","text":"

    The most common failure mode of thin skills was not wrong behavior but vague behavior. The agent would do roughly the right thing, but at a quality level that required human cleanup.

    Rules like \"be constructive, not critical\" are too abstract. What does \"constructive\" look like in a prompt audit report? The agent has to guess.

    Good/bad example pairs avoid guessing:

    ### Good Example\n\n> This session implemented the cooldown mechanism for\n> `ctx agent`. We discovered that `$PPID` in hook context\n> resolves to the Claude Code PID.\n>\n> I'd suggest persisting:\n> - **Learning**: `$PPID` resolves to Claude Code PID\n>   `ctx add learning --context \"...\" --lesson \"...\"`\n> - **Task**: mark \"Add cooldown\" as done\n\n### Bad Examples\n\n* \"*We did some stuff. Want me to save it?*\"\n* Listing 10 trivial learnings that are general knowledge\n* Persisting without asking the user first\n

    The good example shows the exact format, level of detail, and command syntax. The bad examples show where the boundary is.

    Together, they define a quality corridor without prescribing every word.

    Rules describe. Examples demonstrate.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-4-skills-are-read-by-agents-not-humans","level":2,"title":"Lesson 4: Skills Are Read by Agents, Not Humans","text":"

    This seems obvious, but it has non-obvious consequences. During the rewrite, one skill included guidance that said \"use a blog or notes app\" for general knowledge that does not belong in the project's learnings file.

    The agent does not have a notes app. It does not browse the web to find one. This instruction, clearly written for a human audience, was dead weight in a skill consumed by an AI.

    Skills Are for the Agents

    Every sentence in a skill should be actionable by the agent.

    If the guidance requires human judgment or human tools, it belongs in documentation, not in a skill.

    The corollary: command references must be exact.

    A skill that says \"save it somewhere\" is useless.

    A skill that says ctx add learning --context \"...\" --lesson \"...\" --application \"...\" is actionable.

    The agent can pattern-match and fill in the blanks.

    Litmus test: If a sentence starts with \"you could...\" or assumes external tools, it does not belong in a skill.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-5-the-description-field-is-the-trigger","level":2,"title":"Lesson 5: The Description Field Is the Trigger","text":"

    This was covered in Skills That Fight the Platform, but the rewrite reinforced it with data. Several skills had good bodies but vague descriptions:

    # Before: vague, activates too broadly or not at all\ndescription: \"Show context summary.\"\n\n# After: specific, activates at the right time\ndescription: \"Show context summary. Use at session start or\n  when unclear about current project state.\"\n

    The description is not a title. It is the activation condition.

    The platform's skill matching reads this field to decide whether to surface the skill. A vague description means the skill either never triggers or triggers when it should not.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-6-flag-tables-beat-prose","level":2,"title":"Lesson 6: Flag Tables Beat Prose","text":"

    Most skills wrap CLI tools. The thin versions described flags in prose, if at all. The rewritten versions use tables:

    | Flag        | Short | Default | Purpose                  |\n|-------------|-------|---------|--------------------------|\n| `--limit`   | `-n`  | 20      | Maximum sessions to show |\n| `--project` | `-p`  | \"\"      | Filter by project name   |\n| `--full`    |       | false   | Show complete content    |\n

    Tables are scannable, complete, and unambiguous.

    The agent can read them faster than parsing prose, and they serve as both reference and validation: If the agent invokes a flag not in the table, something is wrong.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-7-template-drift-is-a-real-maintenance-burden","level":2,"title":"Lesson 7: Template Drift Is a Real Maintenance Burden","text":"

    // TODO: this has changed; we deploy from the marketplace; update it. // at least add an admonition saying thing are different now.

    ctx deploys skills through templates (via ctx init). Every skill exists in two places: the live version (.claude/skills/) and the template (internal/assets/claude/skills/).

    They must match.

    During the rewrite, every skill update required editing both files and running diff to verify. This sounds trivial, but across 16 template-backed skills, it was the most error-prone part of the process.

    Template drift is dangerous because it creates false confidence: the agent appears to follow rules that no longer exist.

    The lesson: if your skills have a deployment mechanism, build the drift check into your workflow. We added a row to the update-docs skill's mapping table specifically for this:

    | `internal/assets/claude/skills/` | `.claude/skills/` (live) |\n

    Intentional differences (like project-specific scripts in the live version but not the template) should be documented, not discovered later as bugs.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-rewrite-scorecard","level":2,"title":"The Rewrite Scorecard","text":"Metric Before After Average skill body ~15 lines ~80 lines Skills with quality gate 0 20 Skills with \"When NOT\" 0 20 Skills with examples 3 20 Skills with flag tables 2 12 Skills with checklist 0 20

    More lines, but almost entirely Expert content (per the E/A/R framework). No personality roleplay, no redundant guidance, no capability lists. Just project-specific knowledge the platform does not have.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

    The previous two posts argued that skills should provide knowledge, not personality; that they should complement the platform, not fight it; that they should grow from project history, not imported templates.

    This post adds the missing piece: structure.

    A skill without a structure is a wish.

    A skill with quality gates, negative triggers, examples, and checklists is a tool: the difference is not the content; it is whether the agent can reliably execute it without human intervention.

    Skills Are Interfaces

    Good skills are not instructions. They are contracts.:

    • They specify preconditions, postconditions, and boundaries.
    • They show what success looks like and what failure looks like.
    • They trust the agent's intelligence but do not trust its assumptions.

    If You Remember One Thing from This Post...

    Skills that work have bones, not just flesh.

    Quality gates, negative triggers, examples, and checklists are the skeleton. The domain knowledge is the muscle.

    Without the skeleton, the muscle has nothing to attach to.

    This post was written during the same session that rewrote all 22 skills. The skill-creator skill was updated to encode these patterns. The meta continues.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/","level":1,"title":"Not Everything Is a Skill","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to /ctx-save, .context/sessions/, and session auto-save in this post reflect the architecture at the time of writing.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-a-codebase-audit-taught-me-about-restraint","level":2,"title":"What a Codebase Audit Taught Me about Restraint","text":"

    Jose Alekhinne / 2026-02-08

    When You Find a Useful Prompt, What Do You Do with It?

    My instinct was to make it a skill.

    I had just spent three posts explaining how to build skills that work. Naturally, the hammer wanted nails.

    Then I looked at what I was holding and realized: this is not a nail.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit","level":2,"title":"The Audit","text":"

    I wanted to understand how I use ctx:

    • Where the friction is;
    • What works, what drifts;
    • What I keep doing manually that could be automated.

    So I wrote a prompt that spawned eight agents to analyze the codebase from different angles:

    Agent Analysis 1 Extractable patterns from session history 2 Documentation drift (godoc, inline comments) 3 Maintainability (large functions, misplaced code) 4 Security review (CLI-specific surface) 5 Blog theme discovery 6 Roadmap and value opportunities 7 User-facing documentation gaps 8 Agent team strategies for future sessions

    The prompt was specific:

    • read-only agents,
    • structured output format,
    • concrete file references,
    • ranked recommendations.

    It ran for about 20 minutes and produced eight Markdown reports.

    The reports were good: Not perfect, but actionable.

    What mattered was not the speed. It was that the work could be explored without committing to any single outcome.

    They surfaced a stale doc.go referencing a subcommand that was never built.

    They found 311 build-then-test sequences I could reduce to a single make check.

    They identified that 42% of my sessions start with \"do you remember?\", which is a lot of repetition for something a skill could handle.

    I had findings. I had recommendations. I had the instinct to automate.

    And then... I stopped.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-question","level":2,"title":"The Question","text":"

    The natural next step was to wrap the audit prompt as /ctx-audit: a skill you invoke periodically to get a health check. It fits the pattern:

    • It has a clear trigger.
    • It produces structured output.

    But I had just spent a week writing about what makes skills work, and the criteria I established argued against it.

    From The Anatomy of a Skill That Works:

    \"A skill without boundaries is just a suggestion.\"

    From You Can't Import Expertise:

    \"Frameworks travel, expertise doesn't.\"

    From Skills That Fight the Platform:

    \"You are the guest, not the host.\"

    The audit prompt fails all three tests:

    Criterion Audit prompt Good skill Frequency Quarterly, maybe Daily or weekly Stability Tweaked every time Consistent invocation Scope Bespoke, 8 parallel agents Single focused action Trigger \"I feel like auditing\" Clear, repeatable event

    Skills are contracts. Contracts need stable terms.

    A prompt I will rewrite every time I use it is not a contract. It is a conversation starter.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#recipes-vs-skills","level":2,"title":"Recipes vs Skills","text":"

    The distinction that emerged:

    Skill Recipe Invocation /slash-command Copy-paste from a doc Frequency High (daily, weekly) Low (quarterly, ad hoc) Stability Fixed contract Adapted each time Scope One focused action Multi-step orchestration Audience The agent The human (who then prompts) Lives in .claude/skills/hack/ or docs/ Attention cost Loaded into context on match Zero until needed

    Recipes can later graduate into skills, but only after repetition proves stability.

    That last row matters. Skills consume the attention budget every time the platform considers activating them.

    A skill that triggers quarterly but gets evaluated on every prompt is pure waste: attention spent on something that will say \"When NOT to Use: now\" 99% of the time.

    Runbooks have zero attention cost. They sit in a Markdown file until a human decides to use them.

    • The human provides the judgment about timing.
    • The prompt provides the structure.

    The Attention Budget Applies to Skills Too

    Every skill in .claude/skills/ is a standing claim on the context window. The platform evaluates skill descriptions against every user prompt to decide whether to activate.

    Twenty focused skills are fine. Thirty might be fine. But each one added reduces the headroom available for actual work.

    Recipes are skills that opted out of the attention tax.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-the-audit-actually-produced","level":2,"title":"What the Audit Actually Produced","text":"

    The audit was not wasted. It was a planning exercise that generated concrete tasks:

    Finding Action 42% of sessions start with memory check Task: /ctx-remember skill (this one is a skill; it is daily) Auto-save stubs are empty Task: enhance /ctx-save with richer summaries 311 raw build-test sequences Task: make check target Stale recall/doc.go lists nonexistent serve Task: fix the doc.go 120 commit sequences disconnected from context Task: /ctx-commit workflow
    • Some findings became skills;
    • Some became Makefile targets;
    • Some became one-line doc fixes.

    The audit did not prescribe the artifact type: The findings did.

    The audit is the input. Skills are one possible output. Not the only one.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit-prompt","level":2,"title":"The Audit Prompt","text":"

    Here is the exact prompt I used, for those who are curious.

    This is not a template: It worked because it was written against this codebase, at this moment, with specific goals in mind:

    I want you to create an agent team to audit this codebase. Save each report as\na separate Markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable. Every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (*session mining*)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (*godoc + inline*)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check that package-level comments match\npackage names.\nOutput: drift items ranked by severity with exact file:line references.\n\n### 3. Maintainability\nLook for:\n- functions longer than 80 lines with clear split points\n- switch blocks with more than 5 cases that could be table-driven\n- inline comments like \"step 1\", \"step 2\" that indicate a block wants to be a function\n- files longer than 400 lines\n- flat packages that could benefit from sub-packages\n- functions that appear misplaced in their file\n\nDo NOT flag things that are fine as-is just because they could theoretically\nbe different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app. Focus on CLI-relevant attack surface, not web OWASP:\n- file path traversal\n- command injection\n- symlink following when writing to `.context/`\n- permission handling\n- sensitive data in outputs\n\nOutput: findings with severity ratings and plausible exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git history,\nrecent session discussions, and `DECISIONS.md` for story arcs worth writing about.\nSuggest 3-5 blog post themes with:\n- title\n- angle\n- target audience\n- key commits or sessions to reference\n- a 2-sentence pitch\n\nPrioritize themes that build a coherent narrative across posts.\n\n### 6. Roadmap and Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses,\nidentify the highest-value improvements. Consider user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with rough effort and impact estimates.\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and user docs. Suggest improvements structured as\nuse-case pages: the problem, how ctx solves it, a typical workflow, and gotchas.\nIdentify gaps where a user would get stuck without reading source code.\nOutput: documentation gaps with suggested page outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each, include:\n- team composition (roles and agent types)\n- task distribution strategy\n- coordination approach\n- the kinds of work it suits\n

    Avoid Generic Advice

    Suggestions that are not grounded in a project's actual structure, history, and workflows are worse than useless:

    They create false confidence.

    If an analysis cannot point to concrete files, commits, sessions, or patterns, it should say \"no finding\" instead of inventing best practices.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

    This is part of a pattern I keep rediscovering:

    The urge to automate is not the same as the need to automate:

    • The 3:1 ratio taught me that not every session should be a YOLO sprint.
    • The E/A/R framework taught me that not every template is worth importing. Now the audit is teaching me that not every useful prompt is worth institutionalizing.

    The common thread is restraint:

    • Knowing when to stop.
    • Recognizing that the cost of automation is not just the effort to build it.

    The cost is the ongoing attention tax of maintaining it, the context it consumes, and the false confidence it creates when it drifts.

    An entry in hack/runbooks/codebase-audit.md is honest about what it is:

    A prompt I wrote once, improved once, and will adapt again next time:

    • It does not pretend to be a reliable contract.
    • It does not claim attention budget.
    • It does not drift silently.

    The Automation Instinct

    When you find a useful prompt, the instinct is to institutionalize it. Resist.

    Ask first: will I use this the same way next time?

    If yes, it is a skill. If no, it is a recipe. If you are not sure, it is a recipe until proven otherwise.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#this-mindset-in-the-context-of-ctx","level":2,"title":"This Mindset in the Context of ctx","text":"

    ctx is a tool that gives AI agents persistent memory. Its purpose is automation: reducing the friction of context loading, session recall, decision tracking.

    But automation has boundaries, and knowing where those boundaries are is as important as pushing them forward.

    The skills system is for high-frequency, stable workflows.

    The recipes, the journal entries, the session dumps in .context/sessions/: those are for everything else.

    Not everything needs to be a slash command. Some things are better as Markdown files you read when you need them.

    The goal of ctx is not to automate everything: It is to automate the right things and to make the rest easy to find when you need it.

    If You Remember One Thing from This Post...

    The best automation decision is sometimes not to automate.

    A runbook in a Markdown file costs nothing until you use it.

    A skill costs attention on every prompt, whether it fires or not.

    Automate the daily. Document the periodic. Forget the rest.

    This post was written during the session that produced the codebase audit reports and distilled the prompt into hack/runbooks/codebase-audit.md. The audit generated seven tasks, one Makefile target, and zero new skills. The meta continues.

    See also: Code Is Cheap. Judgment Is Not.: the capstone that threads this post's restraint argument into the broader case for why judgment, not production, is the bottleneck.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#when-markdown-is-not-a-security-boundary","level":2,"title":"When Markdown Is Not a Security Boundary","text":"

    Jose Alekhinne / 2026-02-09

    What Happens When Your AI Agent Runs Overnight and Nobody Is Watching?

    It follows instructions: That is the problem.

    Not because it is malicious. Because it is controllable.

    It follows instructions from context, and context can be poisoned.

    I was writing the autonomous loops recipe for ctx: the guide for running an AI agent in a loop overnight, unattended, working through tasks while you sleep. The original draft had a tip at the bottom:

    Use CONSTITUTION.md for guardrails. Tell the agent \"never delete tests\" and it usually won't.

    Then I read that sentence back and realized: that is wishful thinking.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-realization","level":2,"title":"The Realization","text":"

    CONSTITUTION.md is a Markdown file. The agent reads it at session start alongside everything else in .context/. It is one source of instructions in a context window that also contains system prompts, project files, conversation history, tool outputs, and whatever the agent fetched from the internet.

    An attacker who can inject content into any of those sources can redirect the agent's behavior. And \"attacker\" does not always mean a person with malicious intent. It can be:

    Vector Example A dependency A malicious npm package with instructions in its README or error output A URL Documentation page with embedded adversarial instructions A project file A contributor who adds instructions to CLAUDE.md or .cursorrules The agent itself In an autonomous loop, the agent modifies its own config between iterations A command output An error message containing instructions the agent interprets and follows

    That last vector is the one that kept me up at night (literally!):

    In an autonomous loop, the agent modifies files as part of its job.

    If it modifies its own configuration files, the next iteration loads the modified config.

    • No human reviews it.
    • No diff is shown.
    • The agent that starts iteration N+1 is running with rules written by iteration N.

    The agent can rewrite its own guardrails.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#five-layers-each-with-a-hole","level":2,"title":"Five Layers, Each with a Hole","text":"

    That's five nested layers of swiss cheese. Alone, each of them has large holes. Together, they create a boundary.

    What followed was a week of peeling back assumptions:

    Every defenseI examined had a bypass, and the bypass was always the same shape: the defense was enforced at a level the agent could reach.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

    CONSTITUTION.md, the Agent Playbook, system prompts: These tell the agent what to do.

    The agent usually follows them.

    \"Usually\" is the keyword here.

    The hole: Prompt injection:

    • A sufficiently crafted payload overrides soft instructions.
    • Long context windows dilute attention on rules stated early.
    • Edge cases where instructions are ambiguous get resolved in unpredictable ways.

    Verdict: Necessary. Not sufficient. Good for the common case. Never trust it as a security boundary.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

    Permission allowlists in .claude/settings.local.json:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

    If rm, curl, sudo, or docker are not in the allowlist, the agent cannot invoke them. This is deterministic.

    The application enforces it regardless of what any prompt says.

    The hole: The agent can modify the allowlist itself:

    • It has Write permission.
    • The allowlist lives in a file.
    • The agent writes to the file.
    • The next iteration loads the modified allowlist.

    The application enforces the rules, but the application reads the rules from files the agent can write.

    Verdict: Strong first layer. Must be combined with self-modification prevention.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-3-os-level-isolation-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Unbypassable)","text":"

    This is where the defenses stop having holes in the same shape.

    The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

    Control What it stops Dedicated unprivileged user Privilege escalation, sudo, group-based access Filesystem permissions Lateral movement to other projects, system config Immutable config files Self-modification of guardrails between iterations

    Make the agent's instruction files read-only: CLAUDE.md, .claude/settings.local.json, .context/CONSTITUTION.md. Own them as a different user, or mark them immutable with chattr +i on Linux.

    The hole: Actions within the agent's legitimate scope:

    • If the agent has write access to source code (which it needs), it can introduce vulnerabilities in the code itself.
    • You cannot prevent this without removing the agent's ability to do its job.

    Verdict: Essential. This is the layer that makes Layers 1 and 2 trustworthy.

    OS-level isolation does not make the agent safe; it makes the other layers meaningful.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

    An agent that cannot reach the internet cannot exfiltrate data.

    It also cannot ingest new instructions mid-loop from external documents, error pages, or hostile content.

    # Container with no network\ndocker run --network=none ...\n\n# Or firewall rules allowing only package registries\niptables -A OUTPUT -d registry.npmjs.org -j ACCEPT\niptables -A OUTPUT -d proxy.golang.org -j ACCEPT\niptables -A OUTPUT -j DROP\n
    • If the agent genuinely does not need the network, disable it entirely.
    • If it needs to fetch dependencies, allow specific registries and block everything else.

    The hole: None, if the agent does not need the network.

    Thetradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

    The strongest boundary is a separate machine.

    The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

    Never Mount the Docker Socket

    Do not mount /var/run/docker.sock, like, ever.

    An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

    This is not theoretical: the Docker socket grants root-equivalent access to the host.

    Use rootless Docker or Podman to eliminate this escalation path entirely.

    Virtual machines are even stronger: The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-pattern","level":2,"title":"The Pattern","text":"

    Each layer is straightforward: The strength is in the combination:

    Layer Implementation What it stops Soft instructions CONSTITUTION.md Common mistakes (probabilistic) Application allowlist .claude/settings.local.json Unauthorized commands (deterministic within runtime) Immutable config chattr +i on config files Self-modification between iterations Unprivileged user Dedicated user, no sudo Privilege escalation Container --cap-drop=ALL --network=none Host escape, data exfiltration Resource limits --memory=4g --cpus=2 Resource exhaustion

    No layer is redundant. Each one catches what the others miss:

    • The soft instructions handle the 99% case: \"don't delete tests.\"
    • The allowlist prevents the agent from running commands it should not.
    • The immutable config prevents the agent from modifying the allowlist.
    • The unprivileged user prevents the agent from removing the immutable flag.
    • The container prevents the agent from reaching anything outside its workspace.
    • The resource limits prevent the agent from consuming all system resources.

    Remove any one layer and there is an attack path through the remaining ones.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#common-mistakes-i-see","level":2,"title":"Common Mistakes I See","text":"

    These are real patterns, not hypotheticals:

    \"I'll just use --dangerously-skip-permissions.\" This disables Layer 2 entirely. Without Layers 3 through 5, you have no protection at all. The flag means what it says. If you ever need to, think thrice, you probably don't. But, if you ever need to usee this only use it inside a properly isolated VM (not even a container: a \"VM\").

    \"The agent is sandboxed in Docker.\" A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

    \"I reviewed CLAUDE.md, it's fine.\" You reviewed it before the loop started. The agent modified it during iteration 3. Iteration 4 loaded the modified version. Unless the file is immutable, your review is futile.

    \"The agent only has access to this one project.\" Does the project directory contain .env files? SSH keys? API tokens? A .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

    This is the same lesson I keep rediscovering, wearing different clothes.

    In The Attention Budget, I wrote about how every token competes for the AI's focus. Security instructions in CONSTITUTION.md are subject to the same budget pressure: if the context window is full of code, error messages, and tool outputs, the security rules stated at the top get diluted.

    In Skills That Fight the Platform, I wrote about how custom instructions can conflict with the AI's built-in behavior. Security rules have the same problem: telling an agent \"never run curl\" in Markdown while giving it unrestricted shell access creates a contradiction: The agent resolves contradictions unpredictably. The agent will often pick the path of least resistance to attain its objective function. And, trust me, agents can get far more creative than the best red-teamer you know.

    In You Can't Import Expertise, I wrote about how generic templates fail because they do not encode project-specific knowledge. Generic security advice fails the same way: \"Don't exfiltrate data\" is a category; blocking outbound network access is a control.

    The pattern across all of these: Soft instructions are useful for the common case. Hard boundaries are required for security.

    Know which is which.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-checklist","level":2,"title":"The Checklist","text":"

    Before running an unattended AI agent:

    • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
    • Agent's config files are immutable or owned by a different user
    • Permission allowlist restricts tools to the project's toolchain
    • Container drops all capabilities (--cap-drop=ALL)
    • Docker socket is NOT mounted
    • Network is disabled or restricted to specific domains
    • Resource limits are set (memory, CPU, disk)
    • No SSH keys, API tokens, or credentials are accessible
    • Project directory does not contain .env or secrets files
    • Iteration cap is set (--max-iterations)

    This checklist lives in the Agent Security reference alongside the full threat model and detailed guidance for each layer.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#what-changed-in-ctx","level":2,"title":"What Changed in ctx","text":"

    The autonomous loops recipe now has a full permissions and isolation section instead of a one-line tip about CONSTITUTION.md. It covers both the explicit allowlist approach and the --dangerously-skip-permissions flag, with honest guidance about when each is appropriate.

    It also has an OS-level isolation table that is not optional: unprivileged users, filesystem permissions, containers, VMs, network controls, resource limits, and self-modification prevention.

    The Agent Security page consolidates the threat model and defense layers into a standalone reference.

    These are not theoretical improvements. They are the minimum responsible guidance for a tool that helps people run AI agents overnight.

    If You Remember One Thing from This Post...

    Markdown is not a security boundary.

    CONSTITUTION.md is a nudge. An allowlist is a gate.

    An unprivileged user in a network-isolated container is a wall.

    Use all three. Trust only the wall.

    This post was written during the session that added permissions, isolation, and self-modification prevention to the autonomous loops recipe. The security guidance started as a single tip and grew into two documents. The meta continues.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/","level":1,"title":"How Deep Is Too Deep?","text":"","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#when-master-ml-is-the-wrong-next-step","level":2,"title":"When \"Master ML\" Is the Wrong Next Step","text":"

    Jose Alekhinne / 2026-02-12

    Have You Ever Felt like You Should Understand More of the Stack beneath You?

    You can talk about transformers at a whiteboard.

    You can explain attention to a colleague.

    You can use agentic AI to ship real software.

    But somewhere in the back of your mind, there is a voice:

    \"Maybe I should go deeper. Maybe I need to master machine learning.\"

    I had that voice for months.

    Then I spent a week debugging an agent failure that had nothing to do with ML theory and everything to do with knowing which abstraction was leaking.

    This post is about when depth compounds and (more importantly) when it does not.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-hierarchy-nobody-questions","level":2,"title":"The Hierarchy Nobody Questions","text":"

    There is an implicit stack most people carry around when thinking about AI:

    Layer What Lives Here Agentic AI Autonomous loops, tool use, multi-step reasoning Generative AI Text, image, code generation Deep Learning Transformer architectures, training at scale Neural Networks Backpropagation, gradient descent Machine Learning Statistical learning, optimization Classical AI Search, planning, symbolic reasoning

    At some point down that stack, you hit a comfortable plateau: the layer where you can hold a conversation but not debug a failure.

    The instinctive response is to go deeper.

    But that instinct hides a more important question:

    \"Does depth still compound when the abstractions above you are moving hyper-exponentially?\"

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-honest-observation","level":2,"title":"The Honest Observation","text":"

    If you squint hard enough, a large chunk of modern ML intuition collapses into older fields:

    ML Concept Older Field Gradient descent Numerical optimization Backpropagation Reverse-mode autodiff Loss landscapes Non-convex optimization Generalization Statistics Scaling laws Asymptotics and information theory

    Nothing here is uniquely \"AI\".

    Most of this math predates the term deep learning. In some cases, by decades.

    So what changed?

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#same-tools-different-regime","level":2,"title":"Same Tools, Different Regime","text":"

    The mistake is assuming this is a new theory problem: It is not.

    It is a new operating regime.

    Classical numerical methods were developed under assumptions like:

    • Manageable dimensionality
    • Reasonably well-conditioned objectives
    • Losses that actually represent the goal

    Modern ML violates all three: On purpose.

    Today's models operate with millions to trillions of parameters, wildly underdetermined systems, and objective functions we know are wrong but optimize anyway.

    It is complete and utter madness!

    At this scale, familiar concepts warp:

    • What we call \"local minima\" are overwhelmingly saddle points in high-dimensional spaces.
    • Noise stops being noise and starts becoming structure.
    • Overfitting can coexist with generalization.
    • Bigger models outperform \"better\" ones.

    The math did not change: The phase did.

    This is less numerical analysis and more *statistical physics: Same equations, but behavior dominated by phase transitions and emergent structure.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#why-scaling-laws-feel-alien","level":2,"title":"Why Scaling Laws Feel Alien","text":"

    In classical statistics, asymptotics describe what happens eventually.

    In modern ML, scaling laws describe where you can operate today.

    They do not say \"given enough time, things converge\".

    They say \"cross this threshold and behavior qualitatively changes\".

    This is why dumb architectures plus scale beat clever ones.

    Why small theoretical gains disappear under data.

    Why \"just make it bigger\", ironically, keeps working longer than it should.

    That is not a triumph of ML theory: It is a property of high-dimensional systems under loose objectives.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#where-depth-actually-pays-off","level":2,"title":"Where Depth Actually Pays Off","text":"

    This reframes the original question.

    You do not need depth because this is \"AI\".

    You need depth where failure modes propagate upward.

    I learned this building ctx: The agent failures I have spent the most time debugging were never about the model's architecture.

    They were about:

    • Misplaced trust: The model was confident. The output was wrong. Knowing when confidence and correctness diverge is not something you learn from a textbook. You learn it from watching patterns across hundreds of sessions.

    • Distribution shift: The model performed well on common patterns and fell apart on edge cases specific to this project. Recognizing that shift before it compounds requires understanding why generalization has limits, not just that it does.

    • Error accumulation: In a single prompt, model quirks are tolerable. In autonomous loops running overnight, they compound. A small bias in how the model interprets instructions becomes a large drift by iteration 20.

    • Scale hiding errors: The model's raw capability masked problems that only surfaced under specific conditions. More parameters did not fix the issue. They just made the failure mode rarer and harder to reproduce.

    This is the kind of depth that compounds. Not deriving backprop. But, understanding when correct math produces misleading intuition.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

    This is the same pattern I keep finding at different altitudes.

    In \"The Attention Budget\", I wrote about how dumping everything into the context window degrades the model's focus. The fix was not a better model: It was better curation: load less, load the right things, preserve signal per token.

    In \"Skills That Fight the Platform\", I wrote about how custom instructions can conflict with the model's built-in behavior. The fix was not deeper ML knowledge: It was an understanding that the model already has judgment and that you should extend it, not override it.

    In \"You Can't Import Expertise\", I wrote about how generic templates fail because they do not encode project-specific knowledge. A consolidation skill with eight Rust-based analysis dimensions was mostly noise for a Go project. The fix was not a better template: It was growing expertise from this project's own history.

    In every case, the answer was not \"go deeper into ML\".

    The answer was knowing which abstraction was leaking and fixing it at the right layer.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#agentic-systems-are-not-an-ml-problem","level":2,"title":"Agentic Systems Are Not an ML Problem","text":"

    The mistake is assuming agent failures originate where the model was trained, rather than where it is deployed.

    Agentic AI is a systems problem under chaotic uncertainty:

    • Feedback loops between the agent and its environment;
    • Error accumulation across iterations;
    • Brittle representations that break outside training distribution;
    • Misplaced trust in outputs that look correct.

    In short-lived interactions, model quirks are tolerable. In long-running autonomous loops, however, they compound.

    That is where shallow understanding becomes expensive.

    But the understanding you need is not about optimizer internals.

    It is about:

    What Matters What Does Not (for Most Practitioners) Why gradient descent fails in specific regimes How to derive it from scratch When memorization masquerades as reasoning The formal definition of VC dimension Recognizing distribution shift before it compounds Hand-tuning learning rate schedules Predicting when scale hides errors instead of fixing them Chasing theoretical purity divorced from practice

    The depth that matters is diagnostic, not theoretical.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-real-answer","level":2,"title":"The Real Answer","text":"

    Not turtles all the way down.

    Go deep enough to:

    • Diagnose failures instead of cargo-culting fixes;
    • Reason about uncertainty instead of trusting confidence;
    • Design guardrails that align with model behavior, not hope.

    Stop before:

    • Hand-deriving gradients for the sake of it;
    • Obsessing over optimizer internals you will never touch;
    • Chasing theoretical purity divorced from the scale you actually operate at.

    This is not about mastering ML.

    It is about knowing which abstractions you can safely trust and which ones leak.

    Hint: Any useful abstraction almost certainly leaks.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#a-practical-litmus-test","level":2,"title":"A Practical Litmus Test","text":"

    If a failure occurs and your instinct is to:

    • Add more prompt text: abstraction leak above
    • Add retries or heuristics: error accumulation
    • Change the model: scale masking
    • Reach for ML theory: you are probably (but not always) going too deep

    The right depth is the shallowest layer where the failure becomes predictable.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-ctx-lesson","level":2,"title":"The ctx Lesson","text":"

    Every design decision in ctx is downstream of this principle.

    The attention budget exists because the model's internal attention mechanism has real limits: You do not need to understand the math of softmax to build around it. But you do need to understand that more context is not always better and that attention density degrades with scale.

    The skill system exists because the model's built-in behavior is already good: You do not need to understand RLHF to build effective skills. But you do need to understand that the model already has judgment and your skills should teach it things it does not know, not override how it thinks.

    Defense in depth exists because soft instructions are probabilistic: You do not need to understand the transformer architecture to know that a Markdown file is not a security boundary. But you do need to understand that the model follows instructions from context, and context can be poisoned.

    In each case, the useful depth was one or two layers below the abstraction I was working at: Not at the bottom of the stack.

    The boundary between useful understanding and academic exercise is where your failure modes live.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#closing-thought","level":2,"title":"Closing Thought","text":"

    Most modern AI systems do not fail because the math is wrong.

    They fail because we apply correct math in the wrong regime, then build autonomous systems on top of it.

    Understanding that boundary, not crossing it blindly, is where depth still compounds.

    And that is a far more useful form of expertise than memorizing another loss function.

    If You Remember One Thing from This Post...

    Go deep enough to diagnose your failures. Stop before you are solving problems that do not propagate to your layer.

    The abstractions below you are not sacred. But neither are they irrelevant.

    The useful depth is wherever your failure modes live. Usually one or two layers down, not at the bottom.

    This post started as a note about whether I should take an ML course. The answer turned out to be \"no, but understand why not\". The meta continues.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/","level":1,"title":"Before Context Windows, We Had Bouncers","text":"","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-reset-problem","level":2,"title":"The Reset Problem","text":"

    IRC is stateless.

    • You disconnect, you vanish.
    • You reconnect, you begin again.

    No buffer.

    No memory.

    No continuity.

    Modern systems are not much different:

    • Close the browser tab.
      • Lose the Slack scrollback.
    • Open a new LLM session.
      • Start from zero.

    Resets externalize reconstruction cost onto humans.

    Reconstruction is tax: Tax becomes entropy.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#stateless-protocol-stateful-life","level":2,"title":"Stateless Protocol, Stateful Life","text":"

    IRC is minimal:

    • A TCP connection.
    • A nickname.
    • A channel.
    • A stream of lines.

    When the connection drops, you literally disappear from the graph.

    The protocol is stateless; human systems are not.

    So you:

    • Reconnect;
    • Ask what you missed;
    • Scroll;
    • Reconstruct.

    The machine forgets; you pay.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-bouncer-pattern","level":2,"title":"The Bouncer Pattern","text":"

    A bouncer is a daemon that remains connected when you do not:

    • It holds your seat;
    • It buffers what you missed;
    • It keeps your identity online.

    ZNC is one such bouncer.

    With ZNC:

    • Your client does not connect to IRC;
    • It connects to ZNC;
    • ZNC connects upstream.

    Client sessions become ephemeral.

    Presence becomes infrastructural.

    ZNC Is Tmux for IRC

    • Close your laptop.

      • ZNC remains.
    • Switch devices.

      • ZNC persists.

    This is not convenience; this is continuity.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#presence-without-flapping","level":2,"title":"Presence without Flapping","text":"

    With a bouncer:

    • Closing your client does not emit PART.
    • Reopening does not emit JOIN.

    You do not flap in and out of existence.

    From the channel's perspective, you remain.

    From your perspective, history accumulates.

    • Buffers persist;
    • Identity persists;
    • Context persists.

    This pattern predates AI.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#before-llm-context-windows","level":2,"title":"Before LLM Context Windows","text":"

    An LLM session without memory is IRC without a bouncer:

    • Close the window.
    • Start over.
    • Re-explain intent.
    • Rehydrate context.

    That is friction.

    This Walks and Talks like ctx

    Context engineering moves memory out of sessions and into infrastructure.

    • ZNC does this for IRC.
    • ctx does this for agents.

    Same principle:

    • Volatile interface.
    • Persistent substrate.

    Different fabric.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#minimal-architecture","level":2,"title":"Minimal Architecture","text":"

    My setup is intentionally boring:

    • A $5 small VPS.
    • ZNC installed.
    • TLS enabled.
    • Firewall restricted.

    Then:

    • ZNC connects to Libera.Chat.
    • SASL authentication lives inside ZNC.
    • Buffers are stored on disk.

    My client connects to my VPS, not the network.

    The commands do not matter: The boundaries do:

    • Authentication in infrastructure, not in the client;
    • Memory server-side, not in scrollback;
    • Presence decoupled from activity.

    Everything else is configuration.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#platform-memory","level":2,"title":"Platform Memory","text":"

    Yes, I know, it is 2026:

    • Discord stores history;
    • Slack stores history;
    • The dumpster fire on gasoline called X, too, stores history.

    HOWEVER, they own your substrate.

    Running a bouncer is quiet sovereignty:

    • Logs are mine.
    • Presence is continuous.
    • State does not reset because I closed a tab.

    Small acts compound.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#signal-density","level":2,"title":"Signal Density","text":"

    Primitive systems select for builders.

    Consistent presence in small rooms compounds reputation.

    Quiet compounding outperforms viral spikes.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#infrastructure-as-cognition","level":2,"title":"Infrastructure as Cognition","text":"

    ZNC is not interesting because it is retro; it is interesting because it models a principle:

    • Stateless protocols require stateful wrappers;
    • Volatile interfaces require durable memory;
    • Human systems require continuity.

    Distilled:

    Humans require context.

    Before context windows, we had bouncers.

    Before AI memory files, we had buffers.

    Continuity is not a feature; it is a design decision.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#build-it","level":2,"title":"Build It","text":"

    If you want the actual setup (VPS, ZNC, TLS, SASL, firewall...) there is a step-by-step runbook:

    Persistent IRC Presence with ZNC.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#motd","level":2,"title":"MOTD","text":"

    When my client connects to my bouncer, it prints:

    //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n

    See also: Context as Infrastructure -- the post that takes this observation to its conclusion: stateless protocols need stateful wrappers, and AI sessions need persistent filesystems.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/","level":1,"title":"Parallel Agents with Git Worktrees","text":"","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-backlog-problem","level":2,"title":"The Backlog Problem","text":"

    Jose Alekhinne / 2026-02-14

    What Do You Do with 30 Open Tasks?

    You could work through them one at a time.

    One agent, one branch, one commit stream.

    Or you could ask: which of these don't touch each other?

    I had 30 open tasks in TASKS.md. Some were docs. Some were a new encryption package. Some were test coverage for a stable module. Some were blog posts.

    They had almost zero file overlap.

    Running one agent at a time meant serial execution on work that was fundamentally parallel:

    I was bottlenecking on me, not on the machine.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-insight-file-overlap-is-the-constraint","level":2,"title":"The Insight: File Overlap Is the Constraint","text":"

    This is not a scheduling problem: It's a conflict avoidance problem.

    Two agents can work simultaneously on the same codebase if and only if they don't touch the same files. The moment they do, you get merge conflicts: And merge conflicts on AI-generated code are expensive because the human has to arbitrate choices they didn't make.

    So the question becomes:

    \"Can you partition your backlog into non-overlapping tracks?\"

    For ctx, the answer was obvious:

    Track Touches Tasks work/docsdocs/, hack/ Blog posts, recipes, runbooks work/padinternal/cli/pad/, specs Scratchpad encryption, CLI, tests work/testsinternal/cli/recall/ Recall test coverage

    Three tracks. Near-zero overlap. Three agents.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#git-worktrees-the-mechanism","level":2,"title":"Git Worktrees: The Mechanism","text":"

    git has a feature that most people don't use: worktrees.

    A worktree is a second (or third, or fourth) working directory that shares the same .git object database as your main checkout.

    Each worktree has its own branch, its own index, its own working tree. But they all share history, refs, and objects.

    git worktree add ../ctx-docs -b work/docs\ngit worktree add ../ctx-pad -b work/pad\ngit worktree add ../ctx-tests -b work/tests\n
    • Three directories;
    • Three branches;
    • One repository.

    This is cheaper than three clones. And because they share objects, git merge afterwards is fast: It's a local operation on shared data.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-setup","level":2,"title":"The Setup","text":"

    The workflow I landed on:

    1. Group tasks by blast radius.

    Read TASKS.md. For each pending task, estimate which files and directories it touches. Group tasks that share files into the same track. Tasks with no overlap go into separate tracks.

    This is the part that requires human judgment:

    An agent can propose groupings, but you need to verify that the boundaries are real. A task that says \"update docs\" but actually touches Go code will poison a docs track.

    2. Create worktrees as sibling directories.

    Not subdirectories: Siblings.

    If your main checkout is at ~/WORKSPACE/ctx, worktrees go at ~/WORKSPACE/ctx-docs, ~/WORKSPACE/ctx-pad, etc.

    Why siblings? Because some tools (and some agents) walk up the directory tree looking for .git. A worktree inside the main checkout confuses them.

    3. Launch one agent per worktree.

    # Terminal 1\ncd ../ctx-docs && claude\n\n# Terminal 2\ncd ../ctx-pad && claude\n\n# Terminal 3\ncd ../ctx-tests && claude\n

    Each agent gets a full working copy with .context/ intact. It reads the same TASKS.md, the same DECISIONS.md, the same CONVENTIONS.md. It knows the full project state. It just works on a different slice.

    4. Do NOT run ctx init in worktrees.

    This is the gotcha. The .context/ directory is tracked in git. Running ctx init in a worktree would overwrite shared context files: Wiping decisions, learnings, and tasks that belong to the whole project.

    The worktree already has everything it needs. Leave it alone.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#what-actually-happened","level":2,"title":"What Actually Happened","text":"

    I ran three agents for about 40 minutes. Here is roughly what each track produced:

    work/docs: Parallel worktrees recipe, blog post edits, recipe index reorganization, IRC recipe moved from docs/ to hack/.

    work/pad: ctx pad show subcommand, --append and --prepend flags on ctx pad edit, spec updates, 28 new test functions.

    work/tests: Recall test coverage, edge case tests.

    Merging took about five minutes. Two of the three merges were clean.

    The third had a conflict in TASKS.md:

    both the docs track and the pad track had marked different tasks as [x].

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-tasksmd-conflict","level":2,"title":"The TASKS.md Conflict","text":"

    This deserves its own section because it will happen every time.

    When two agents work in parallel, they both read TASKS.md at the start and mark tasks complete as they go. When you merge, git sees two branches that modified the same file differently.

    The resolution is always the same: accept all completions from both sides. No task should go from [x] back to [ ]. The merge is additive.

    This is one of those conflicts that sounds scary but is trivially mechanical: You are not arbitrating design decisions; you are combining two checklists.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#limits","level":2,"title":"Limits","text":"

    3-4 worktrees, maximum.

    I tried four once: By the time I merged the third track, the fourth had drifted far enough that its changes needed rebasing.

    The merge complexity grows faster than the parallelism benefit.

    Three is the sweet spot:

    • Two is conservative but safe;
    • Four is possible if the tracks are truly independent;
    • Anything more than four, you are in the danger zone.

    Group by directory, not by priority.

    It is tempting to put all the high-priority tasks in one track: Don't.

    Two high-priority tasks that touch the same files must be in the same track, regardless of urgency. The constraint is file overlap, not importance.

    Commit frequently.

    Smaller commits make merge conflicts easier to resolve. An agent that writes 500 lines in a single commit is harder to merge than one that commits every logical step.

    Name tracks by concern.

    • work/docs and work/pad tell you what's happening;
    • work/track-1 and work/track-2 tell you nothing.
    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-pattern","level":2,"title":"The Pattern","text":"

    This is the same pattern that shows up everywhere in ctx:

    The attention budget taught me that you can't dump everything into one context window. You have to partition, prioritize, and load selectively.

    Worktrees are the same principle applied to execution: You can't dump every task into one agent's workstream. You have to partition by blast radius, assign selectively, and merge deliberately.

    The codebase audit that generated these 30 tasks used eight parallel agents for analysis. Worktrees let me use parallel agents for implementation. Same coordination pattern, different artifact.

    And the IRC bouncer post from earlier today argued that stateless protocols need stateful wrappers. Worktrees are the same: git branches are stateless forks; .context/ is the stateful wrapper that gives each agent the project's full memory.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#should-this-be-a-skill","level":2,"title":"Should This Be a Skill?","text":"

    I asked myself the same question I asked about the codebase audit: should this be a /ctx-worktree skill?

    This time the answer was a resounding \"yes\":

    Unlike the audit prompt (which I tweak every time and run every other week) the worktree workflow is:

    Criterion Worktree workflow Codebase audit Frequency Weekly Quarterly Stability Same steps every time Tweaked every time Scope Mechanical, bounded Bespoke, 8 agents Trigger Large backlog \"I feel like auditing\"

    The commands are mechanical: git worktree add, git worktree remove, branch naming, safety checks. This is exactly what skills are for: stable contracts for repetitive operations.

    Ergo, /ctx-worktree exists.

    It enforces the 4-worktree limit, creates sibling directories, uses work/ branch prefixes, and reminds you not to run ctx init in worktrees.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-takeaway","level":2,"title":"The Takeaway","text":"

    Serial execution is the default. But serial is not always necessary.

    If your backlog partitions cleanly by file overlap, you can multiply your throughput with nothing more exotic than git worktree and a second terminal window.

    The hard part is not the git commands; it is the discipline:

    • Grouping by blast radius instead of priority;
    • Accepting that TASKS.md will conflict;
    • And knowing when three tracks is enough.

    If You Remember One Thing from This Post...

    Partition by blast radius, not by priority.

    Two tasks that touch the same files belong in the same track, no matter how important the other one is.

    The constraint is file overlap. Everything else is scheduling.

    The practical setup (skill invocation, worktree creation, merge workflow, and cleanup) lives in the recipe: Parallel Agent Development with Git Worktrees.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/","level":1,"title":"ctx v0.3.0: The Discipline Release","text":"","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#when-the-ratio-of-polish-to-features-is-31-you-know-something-changed","level":2,"title":"When the Ratio of Polish to Features Is 3:1, You Know Something Changed","text":"

    Jose Alekhinne / February 15, 2026

    What Does a Release Look like When Most of the Work Is Invisible?

    No new headline feature. No architectural pivot. No rewrite.

    Just 35+ documentation and quality commits against ~15 feature commits... and somehow, the tool feels like it grew up overnight.

    Six days separate v0.2.0 from v0.3.0.

    Measured by calendar time, it is nothing. Measured by what changed in how the project operates, it is the most significant release yet.

    • v0.1.0 was the prototype;
    • v0.2.0 was the archaeology release: making the past accessible;
    • v0.3.0 is the discipline release: the one that turned best practices into enforcement, suggestions into structure, and a collection of commands into a system of skills.

    The Release Window

    February 1‒February 7, 2026

    From the v0.2.0 tag to commit 2227f99.

    78 files changed in the migration commit alone.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-migration-commands-to-skills","level":2,"title":"The Migration: Commands to Skills","text":"

    The largest single change was the migration from .claude/commands/*.md to .claude/skills/*/SKILL.md.

    This was not a rename: It was a rethinking of how AI agents discover and execute project-specific workflows.

    Aspect Commands (before) Skills (after) Structure Flat files in one directory Directory-per-skill with SKILL.md Description Optional, often vague Required, doubles as activation trigger Quality gates None \"Before X-ing\" pre-flight checklist Negative triggers None \"When NOT to Use\" in every skill Examples Rare Good/bad pairs in every skill Average length ~15 lines ~80 lines

    The description field became the single most important line in each skill. In the old system, descriptions were titles. In the new system, they are activation conditions: The text the platform reads to decide whether to surface a skill for a given prompt.

    A description that says \"Show context summary\" activates too broadly or not at all. A description that says \"Show context summary. Use at session start or when unclear about current project state\" activates at the right moment.

    78 files changed. 1,915 insertions. Not because the skills got bloated; because they got specific.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-skill-sweep","level":2,"title":"The Skill Sweep","text":"

    After the structural migration, every skill was rewritten in a single session: All 21 of them.

    The rewrite was guided by a pattern that emerged during the process itself: a repeatable anatomy that effective skills share regardless of their purpose:

    1. Before X-ing: Pre-flight checks that prevent premature execution
    2. When to Use: Positive triggers that narrow activation
    3. When NOT to Use: Negative triggers that prevent misuse
    4. Usage Examples: Invocation patterns the agent can pattern-match
    5. Quality Checklist: Verification before claiming completion

    The Anatomy of a Skill That Works post covers the details. What matters for the release story is the result:

    • Zero skills with quality gates became twenty;
    • Zero skills with negative triggers became twenty.
    • Three skills with examples became twenty.

    The Skill Trilogy as Design Spec

    The three blog posts written during this window:

    • Skills That Fight the Platform,
    • You Can't Import Expertise,
    • and The Anatomy of a Skill That Works...

    ... were not retrospective documentation. They were written during the rewrite, and the lessons fed back into the skills as they were being built.

    • The blog was the design document.
    • The skills were the implementation.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-consolidation-sweep","level":2,"title":"The Consolidation Sweep","text":"

    The unglamorous work. The kind you only appreciate when you try to change something later and it just works.

    What Why It Matters Constants consolidation Magic strings replaced with semantic constants Variable deshadowing Eliminated subtle scoping bugs File splits Modules that were doing too much, broken apart Godoc standardization Every exported function documented to convention

    This is the work that doesn't get a changelog entry but makes every future commit easier. When a new contributor (human or AI) reads the codebase, they find consistent patterns instead of accumulated drift.

    The consolidation was not an afterthought. It was scheduled deliberately, with the same priority as features: The 3:1 ratio that emerged during v0.2.0 development became an explicit practice:

    • Three feature sessions;
    • One consolidation session.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-ear-framework","level":2,"title":"The E/A/R Framework","text":"

    On February 4th, we adopted the E/A/R classification as the official standard for evaluating skills:

    Category Meaning Target Expert Knowledge Claude does not have >70% Activation When/how to trigger ~20% Redundant What Claude already knows <10%

    This came from reviewing approximately 30 external skill files and discovering that most were redundant with Claude's built-in system prompt. Only about 20% had salvageable content, and even those yielded just a few heuristics each.

    The E/A/R framework gave us a concrete, testable criterion:

    A good skill is Expert knowledge minus what Claude already knows.

    If more than 10% of a skill restates platform defaults, it is creating noise, not signal.

    Every skill in v0.3.0 was evaluated against this framework. Several were deleted. The survivors are leaner and more focused.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#backup-and-monitoring-infrastructure","level":2,"title":"Backup and Monitoring Infrastructure","text":"

    A tool that manages your project's memory needs ops maturity.

    v0.3.0 added two pieces of infrastructure that reflect this:

    Backup staleness hook: A UserPromptSubmit hook that checks whether the last .context/ backup is more than two days old. If it is, and the SMB mount is available, it reminds the user. No cron job running when nobody is working. No redundant backups when nothing has changed.

    Context size checkpoint: A PreToolUse hook that estimates current context window usage and warns when the session is getting heavy. This hooks into the attention budget philosophy: Degradation is expected, but it should be visible.

    Both hooks use $CLAUDE_PROJECT_DIR instead of hardcoded paths, a migration triggered by a username rename that broke every absolute path in the hook configuration. That migration (replacing /home/user/... with \"$CLAUDE_PROJECT_DIR\"/.claude/hooks/...) was one of those changes that seems trivial but prevents an entire category of future failures.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.2.0 v0.3.0 Skills (was \"commands\") 11 21 Skills with quality gates 0 21 Skills with \"When NOT to Use\" 0 21 Average skill body ~15 lines ~80 lines Hooks using $CLAUDE_PROJECT_DIR 0 All Documentation commits -- 35+ Feature/fix commits -- ~15

    That ratio (35+ documentation and quality commits to ~15 feature commits) is the defining characteristic of this release:

    • This release is not a failure to ship features.
    • It is the deliberate choice to make the existing features reliable.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-v030-means","level":2,"title":"What v0.3.0 Means","text":"

    v0.1.0 asked: \"Can we give AI persistent memory?\"

    v0.2.0 asked: \"Can we make that memory accessible to humans too?\"

    v0.3.0 asks a different question: \"Can we make the quality self-enforcing?\"

    The answer is not a feature: It is a practice:

    • Skills with quality gates enforce pre-flight checks.
    • Negative triggers prevent misuse without human intervention.
    • The E/A/R framework ensures skills contain signal, not noise.
    • Consolidation sessions are scheduled, not improvised.
    • Hook infrastructure makes degradation visible.

    Discipline is not the absence of velocity. It is the infrastructure that makes velocity sustainable.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

    The skill system is now mature enough to support real workflows without constant human correction. The hooks infrastructure is portable and resilient. The consolidation practice is documented and repeatable.

    The next chapter is about what you build on top of discipline:

    • Multi-agent coordination;
    • Deeper integration patterns;
    • And the question of whether context management is a tool concern or an infrastructure concern.

    But those are future posts.

    This one is about the release that proved polish is not the opposite of progress. It is what turns a prototype into a product.

    The Discipline Release

    v0.1.0 shipped features.

    v0.2.0 shipped archaeology.

    v0.3.0 shipped the habits that make everything else trustworthy.

    The most important code in this release is the code that prevents bad code from shipping.

    This post was drafted using /ctx-blog with access to the full git history between v0.2.0 and v0.3.0, decision logs, learning logs, and the session files from the skill rewrite window. The meta continues.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/","level":1,"title":"Eight Ways a Hook Can Talk","text":"","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#when-your-warning-disappears","level":2,"title":"When Your Warning Disappears","text":"

    Jose Alekhinne / 2026-02-15

    I had a backup warning that nobody ever saw.

    The hook was correct: It detected stale backups, formatted a nice message, and output it as {\"systemMessage\": \"...\"}. The problem wasn't detection. The problem was delivery. The agent absorbed the information, processed it internally, and never told the user.

    Meanwhile, a different hook (the journal reminder) worked perfectly every time. Users saw the reminder, ran the commands, and the backlog stayed manageable. Same hook event (UserPromptSubmit), same project, completely different outcomes.

    The difference was one line:

    IMPORTANT: Relay this journal reminder to the user VERBATIM\nbefore answering their question.\n

    That explicit instruction is what makes VERBATIM relay a pattern, not just a formatting choice. And once I saw it as a pattern, I started seeing others.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-audit","level":2,"title":"The Audit","text":"

    I looked at every hook in ctx: Eight shell scripts across three hook events. And I found five distinct output patterns already in use, plus three more that the existing hooks were reaching for but hadn't quite articulated.

    The patterns form a spectrum based on a single question:

    \"Who decides what the user sees?\"

    At one end, the hook decides everything (hard gate: the agent literally cannot proceed). At the other end, the hook is invisible (silent side-effect: nobody knows it ran). In between, there is a range of negotiation between hook, agent, and the user.

    Here's the full spectrum:

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#1-hard-gate","level":3,"title":"1. Hard Gate","text":"
    {\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}\n

    The nuclear option: The agent's tool call is rejected before it executes.

    This is Claude Code's first-class PreToolUse mechanism: The hook returns JSON with decision: block and the agent gets an error with the reason.

    Use this for invariants: Constitution rules, security boundaries, things that must never happen. I use it to enforce PATH-based ctx invocation, block sudo, and require explicit approval for git push.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#2-verbatim-relay","level":3,"title":"2. VERBATIM Relay","text":"
    IMPORTANT: Relay this warning to the user VERBATIM before answering.\n┌─ Journal Reminder ─────────────────────────────\n│ You have 12 sessions not yet imported.\n│   ctx recall import --all\n└────────────────────────────────────────────────\n

    The instruction is the pattern. Without \"Relay VERBATIM,\" agents tend to absorb information into their internal reasoning and never surface it. The explicit instruction changes the behavior from \"I know about this\" to \"I must tell the user about this.\"

    I use this for actionable reminders:

    • Unexported journal entries;
    • Stale backups;
    • Context capacity warnings...

    ...things the user should see regardless of what they asked.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#3-agent-directive","level":3,"title":"3. Agent Directive","text":"
    ┌─ Persistence Checkpoint (prompt #25) ───────────\n│ No context files updated in 15+ prompts.\n│ Have you discovered learnings worth persisting?\n└──────────────────────────────────────────────────\n

    A nudge, not a command. The hook tells the agent something; the agent decides what (if anything) to tell the user. This is right for behavioral nudges: \"you haven't saved context in a while\" doesn't need to be relayed verbatim, but the agent should consider acting on it.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#4-silent-context-injection","level":3,"title":"4. Silent Context Injection","text":"
    ctx agent --budget 4000 2>/dev/null || true\n

    Pure background enrichment. The agent's context window gets project information injected on every tool call, with no visible output. Neither the agent nor the user sees the hook fire, but the agent makes better decisions because of the context.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#5-silent-side-effect","level":3,"title":"5. Silent Side-Effect","text":"
    find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

    Do work, say nothing. Temp file cleanup on session end. Logging. Marker file management. The action is the entire point; no one needs to know.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-patterns-we-dont-have-yet","level":2,"title":"The Patterns We Don't Have Yet","text":"

    Three more patterns emerged from the gaps in the existing hooks.

    Conditional relay: \"Relay this, but only if the user's question is about X.\" This pattern avoids noise when the warning isn't relevant. It's more fragile (depends on agent judgment) but less annoying.

    Suggested action: \"Here's a problem, and here's the exact command to fix it. Ask the user before running it.\" This pattern goes beyond a nudge by giving the agent a concrete proposal, but still requires human approval.

    Escalating severity: INFO gets absorbed silently. WARN gets mentioned at the next natural pause. CRITICAL gets the VERBATIM treatment. This pattern introduces a protocol for hooks that produce output at different urgency levels, so they don't all compete for the user's attention.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-principle","level":2,"title":"The Principle","text":"

    Hooks are the boundary between your environment and the agent's reasoning.

    A hook that detects a problem but can't communicate it effectively is the same as no hook at all.

    The format of your output is a design decision with real consequences:

    • Use a hard gate and the agent can't proceed (good for invariants, frustrating for false positives)
    • Use VERBATIM relay and the user will see it (good for reminders, noisy if overused)
    • Use an agent directive and the agent might act (good for nudges, unreliable for critical warnings)
    • Use silent injection and nobody knows (good for enrichment, invisible when it breaks)

    Choose deliberately. And, when in doubt, write the word VERBATIM.

    The full pattern catalog with decision flowchart and implementation examples is in the Hook Output Patterns recipe.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/","level":1,"title":"Version Numbers Are Lagging Indicators","text":"","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#why-ctxs-journal-site-runs-on-a-v0021-tool","level":2,"title":"Why ctx's Journal Site Runs on a v0.0.21 Tool","text":"

    Jose Alekhinne / 2026-02-15

    Would You Ship Production Infrastructure on a v0.0.21 Dependency?

    Most engineers wouldn't. Version numbers signal maturity. Pre-1.0 means unstable API, missing features, risk.

    But version numbers tell you where a project has been. They say nothing about where it's going.

    I just bet ctx's entire journal site on a tool that hasn't hit v0.1.0.

    Here's why I'd do it again.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-problem","level":2,"title":"The Problem","text":"

    When v0.2.0 shipped the journal system, the pipeline was clear:

    • Export sessions to Markdown;
    • Enrich them with YAML frontmatter;
    • And render them into something browsable.

    The first two steps were solved; the third needed a tool.

    The journal entries are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is the entire format:

    • No JSX;
    • No shortcodes;
    • No custom templating.

    Just Markdown rendered well.

    The requirements are modest:

    • Read a configuration file (such as mkdocs.yml);
    • Render Markdown with extensions (admonitions, tabs, tables);
    • Search;
    • Handle 100+ files without choking on incremental rebuilds;
    • Look good out of the box;
    • Not lock me in.

    The obvious candidates were as follows:

    Tool Language Strengths Pain Points Hugo Go Blazing fast, mature Templating is painful; Go templates fight you on anything non-trivial Astro JS/TS Modern, flexible JS ecosystem overhead; overkill for a docs site MkDocs + Material Python Beautiful defaults, massive community (22k+ stars) Slow incremental rebuilds on large sites; limited extensibility model Zensical Python Built to fix MkDocs' limits; 4-5x faster rebuilds v0.0.21; module system not yet shipped

    The instinct was Hugo. Same language as ctx. Fast. Well-established.

    But instinct is not analysis. I picked the one with the lowest version number.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation","level":2,"title":"The Evaluation","text":"

    Here is what I actually evaluated, in order:

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#1-the-team","level":3,"title":"1. The Team","text":"

    Zensical is built by squidfunk: The same person behind Material for MkDocs, the most popular MkDocs theme with 22,000+ stars. It powers documentation sites for projects across every language and framework.

    • This is not someone learning how to build static site generators.
    • This is someone who spent years understanding exactly where MkDocs breaks and decided to fix it from the ground up.

    They did not build zensical because MkDocs was bad: They built it because MkDocs hit a ceiling:

    • Incremental rebuilds: 4-5x faster during serve. When you have hundreds of journal entries and you edit one, the difference between \"rebuild everything\" and \"rebuild this page\" is the difference between a usable workflow and a frustrating one.

    • Large site performance: Specifically designed for tens of thousands of pages. The journal grows with every session. A tool that slows down as content accumulates is a tool you will eventually replace.

    A proven team starting fresh is more predictable than an unproven team at v3.0.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#2-the-architecture","level":3,"title":"2. The Architecture","text":"

    Zensical is investing in a Rust-based Markdown parser with CommonMark support. That signals something about the team's priorities:

    Performance foundations first; features second.

    ctx's journal will grow:

    • Every exported session adds files.
    • Every enrichment pass adds metadata.

    Choosing a tool that gets slower as you add content means choosing to migrate later.

    Choosing one built for scale means the decision holds.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#3-the-migration-path","level":3,"title":"3. The Migration Path","text":"

    Zensical reads mkdocs.yml natively. If it doesn't work out, I can move back to MkDocs + Material with zero content changes:

    • The Markdown is standard;
    • The frontmatter is standard;
    • The configuration is compatible.

    This is the infrastructure pattern again: The same way ZNC decouples presence from the client, zensical decouples rendering from the generator:

    • The Markdown is yours.
    • The frontmatter is standard YAML.
    • The configuration is MkDocs-compatible.

    You are not locked into anything except your own content.

    No lock-in is not a feature: It's a design philosophy:

    It's the same reason ctx uses plain Markdown files in .context/ instead of a database: the format should outlive the tool.

    Lock-in Is the Real Risk, Not Version Numbers

    A mature tool with a proprietary format is riskier than a young tool with a standard one. Version numbers measure time invested. Portability measures respect for the user.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#4-the-dependency-tree","level":3,"title":"4. The Dependency Tree","text":"

    Here is what pip install zensical actually pulls in:

    • click
    • Markdown
    • Pygments
    • pymdown-extensions
    • PyYAML

    Only five dependencies. All well-known. No framework bloat. No bundler. No transpiler. No node_modules black hole.

    3k GitHub stars at v0.0.21 is a strong early traction for a pre-1.0 project.

    The dependency tree is thin: No bloat.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#5-the-fit","level":3,"title":"5. The Fit","text":"

    This is the same principle behind the attention budget: do not overfit the tool to hypothetical requirements. The right amount of capability is the minimum needed for the current task.

    Hugo is a powerful static site generator. It is also a powerful templating engine, a powerful asset pipeline, and a powerful taxonomy system. For rendering Markdown journals, that power is overhead:

    It is the complexity you pay for but never use.

    ctx's journal files are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is exactly the sweet spot Zensical inherits from Material for MkDocs:

    • No custom plugins needed;
    • No special syntax;
    • No templating gymnastics.

    The requirements match the capabilities: Not the capabilities that are promised, but the ones that exist today, at v0.0.21.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-caveat","level":2,"title":"The Caveat","text":"

    It would be dishonest not to mention what's missing.

    The module system for third-party extensions opens in early 2026.

    If ctx ever needs custom plugins (for example, auto-linking session IDs, rendering special journal metadata, etc.) that infrastructure isn't there yet.

    The installation experience is rough:

    We discovered this firsthand: pip install zensical often fails on MacOS (system Python stubs, Homebrew's PEP 668 restrictions). The answer is pipx, which creates an isolated environment with the correct Python version automatically.

    That kind of friction is typical for young Python tooling, and it is documented in the Getting Started guide.

    And 3,000 stars at v0.0.21 is strong early traction, but it's still early: The community is small. When something breaks, you're reading source code, not documentation.

    These are real costs. I chose to pay them because the alternative costs are higher.

    For example:

    • Hugo's templating pain would cost me time on every site change.
    • Astro's JS ecosystem would add complexity I don't need.
    • MkDocs would work today but hit scaling walls tomorrow.

    Zensical's costs are front-loaded and shrinking.

    The others compound.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation-framework","level":2,"title":"The Evaluation Framework","text":"

    For anyone facing a similar choice, here is the framework that emerged:

    Signal What It Tells You Weight Team track record Whether the architecture will be sound High Migration path Whether you can leave if wrong High Current fit Whether it solves your problem today High Dependency tree How much complexity you're inheriting Medium Version number How long the project has existed Low Star count Community interest (not quality) Low Feature list What's possible (not what you need) Low

    The bottom three are the metrics most engineers optimize for.

    The top four are the ones that predict whether you'll still be happy with the choice in a year.

    Features You Don't Need Are Not Free

    Every feature in a dependency is code you inherit but don't control.

    A tool with 200 features where you use 5 means 195 features worth of surface area for bugs, breaking changes, and security issues that have nothing to do with your use case.

    Fit is the inverse of feature count.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-broader-pattern","level":2,"title":"The Broader Pattern","text":"

    This is part of a theme I keep encountering in this project:

    Leading indicators beat lagging indicators.

    Domain Lagging Indicator Leading Indicator Tooling Version number, star count Team track record, architecture Code quality Test coverage percentage Whether tests catch real bugs Context persistence Number of files in .context/ Whether the AI makes fewer mistakes Skills Number of skills created Whether each skill fires at the right time Consolidation Lines of code refactored Whether drift stops accumulating

    Version numbers, star counts, coverage percentages, file counts...

    ...these are all measures of effort expended.

    They say nothing about value delivered.

    The question is never \"how mature is this tool?\"

    The question is \"does this tool's trajectory intersect with my needs?\"

    Zensical's trajectory:

    • A proven team fixing known problems,
    • in a *proven architecture,
    • with a standard format,
    • and no lock-in.

    ctx's needs:

    Tender standard Markdown into a browsable site, at scale, without complexity.

    The intersection is clean; the version number is noise.

    This is the same kind of decision that shows up throughout ctx:

    • Skills that fight the platform taught that the best integration extends existing behavior, not replaces it.
    • You can't import expertise taught that tools should grow from your project's actual needs, not from feature checklists.
    • Context as infrastructure argues that the format should outlive the tool; and, zensical honors that principle by reading standard Markdown and standard MkDocs configuration.

    If You Remember One Thing from This Post...

    Version numbers measure where a project has been.

    The team and the architecture tell you where it's going.

    A v0.0.21 tool built by the right team on the right foundations is a safer bet than a v5.0 tool that doesn't fit your problem.

    Bet on trajectories, not timestamps.

    This post started as an evaluation note in ideas/ and a separate decision log. The analysis held up. The two merged into one. The meta continues.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/","level":1,"title":"ctx v0.6.0: The Integration Release","text":"","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#two-commands-to-persistent-memory","level":2,"title":"Two Commands to Persistent Memory","text":"

    Jose Alekhinne / February 16, 2026

    What Changed?

    ctx is now a Claude Code plugin. Two commands, no build step:

    /plugin marketplace add ActiveMemory/ctx\n/plugin install ctx@activememory-ctx\n

    Six hooks. Twenty-five skills. Installed.

    For three releases, ctx required assembly:

    • Clone the repo;
    • Build the binary;
    • Copy hook scripts into .claude/hooks/;
    • Symlink skill files.
    • Understand which shell scripts called which Go commands;
    • Hope nothing broke when Claude Code updated its hook format.

    v0.6.0 ends that era: ctx ships as a Claude Marketplace plugin:

    Hooks and skills served directly from source, installed with a single command, updated by pulling the repo. The tool that gives AI persistent memory is now as easy to install as the AI itself.

    But the plugin conversion was not just a packaging change: It was the forcing function that rewrote every shell hook in Go, eliminated the jq dependency, enabled go test coverage for hook logic, and made distribution a solved problem.

    When you fix how something ships, you end up fixing how it is built.

    The Release Window

    February 15-February 16, 2026

    From the v0.3.0 tag to commit a3178bc:

    • 109 commits.
    • 334 files changed.
    • Version jumped from 0.3.0 to 0.6.0 to signal the magnitude.
    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#before-six-shell-scripts-and-a-prayer","level":2,"title":"Before: Six Shell Scripts and a Prayer","text":"

    v0.3.0 had six hook scripts. Each was a Bash file that shelled out to ctx subcommands, parsed JSON with jq, and wired itself into Claude Code's hook system via .claude/hooks/:

    .claude/hooks/\n├── check-context-size.sh\n├── check-persistence.sh\n├── check-journal.sh\n├── post-commit.sh\n├── block-non-path-ctx.sh\n└── cleanup-tmp.sh\n

    This worked, but it also meant:

    • jq was a hard dependency: No jq, no hooks. macOS ships without it.
    • No test coverage: Shell scripts were tested manually or not at all.
    • Fragile deployment: ctx init had to scaffold .claude/hooks/ and .claude/skills/ with the right paths, permissions, and structure.
    • Version drift: Users who installed once never got hook updates unless they re-ran ctx init.

    The shell scripts were the right choice for prototyping. They were the wrong choice for distribution.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#after-one-plugin-zero-shell-scripts","level":2,"title":"After: One Plugin, Zero Shell Scripts","text":"

    v0.6.0 replaces all six scripts with ctx system subcommands compiled into the binary:

    Shell Script Go Subcommand check-context-size.shctx system check-context-sizecheck-persistence.shctx system check-persistencecheck-journal.shctx system check-journalpost-commit.shctx system post-commitblock-non-path-ctx.shctx system block-non-path-ctxcleanup-tmp.shctx system cleanup-tmp

    The plugin's hooks.json wires them to Claude Code events:

    {\n  \"PreToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system block-non-path-ctx\"},\n    {\"matcher\": \".*\", \"command\": \"ctx agent --budget 4000\"}\n  ],\n  \"PostToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system post-commit\"}\n  ],\n  \"UserPromptSubmit\": [\n    {\"command\": \"ctx system check-context-size\"},\n    {\"command\": \"ctx system check-persistence\"},\n    {\"command\": \"ctx system check-journal\"}\n  ],\n  \"SessionEnd\": [\n    {\"command\": \"ctx system cleanup-tmp\"}\n  ]\n}\n

    No jq. No shell scripts. No .claude/hooks/ directory to manage.

    The hooks are Go functions with tests, compiled into the same binary you already have.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-plugin-model","level":2,"title":"The Plugin Model","text":"

    The ctx plugin lives at .claude-plugin/marketplace.json in the repo.

    Claude Code's marketplace system handles discovery and installation:

    Skills are served directly from internal/assets/claude/skills/; there is no build step, no make plugin, no generated artifacts.

    This means:

    1. Install is two commands: Not \"clone, build, copy, configure.\"
    2. Updates are automatic: Pull the repo; the plugin reads from source.
    3. Skills and hooks are versioned together: No drift between what the CLI expects and what the plugin provides.
    4. ctx init is tool-agnostic: It creates .context/ and nothing else. No .claude/ scaffolding, no assumptions about which AI tool you use.

    That last point matters:

    Before v0.6.0, ctx init tried to set up Claude Code integration as part of initialization. That coupled the context system to a specific tool.

    Now, ctx init gives you persistent context. The plugin gives you Claude Code integration. They compose; they don't depend.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#beyond-the-plugin-what-else-shipped","level":2,"title":"Beyond the Plugin: What Else Shipped","text":"

    The plugin conversion dominated the release, but 109 commits covered more ground.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#obsidian-vault-export","level":3,"title":"Obsidian Vault Export","text":"
    ctx journal obsidian\n

    Generates a full Obsidian vault from enriched journal entries: wikilinks, MOC (Map of Content) pages, and graph-optimized cross-linking. If you already use Obsidian for notes, your AI session history now lives alongside everything else.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#encrypted-scratchpad","level":3,"title":"Encrypted Scratchpad","text":"
    ctx pad edit \"DATABASE_URL=postgres://...\"\nctx pad show\n

    AES-256-GCM encrypted storage for sensitive one-liners.

    The encrypted blob commits to git; the key stays in .gitignore.

    This is useful for connection strings, API keys, and other values that need to travel with the project without appearing in plaintext.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#security-hardening","level":3,"title":"Security Hardening","text":"

    Three medium-severity findings from a security audit are now closed:

    Finding Fix Path traversal via --context-dir Boundary validation: operations cannot escape project root (M-1) Symlink following in .context/Lstat() check before every file read/write (M-2) Predictable temp file paths User-specific temp directory under $XDG_RUNTIME_DIR (M-3)

    Plus a new /sanitize-permissions skill that audits settings.local.json for overly broad Bash permissions.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#hooks-that-know-when-to-be-quiet","level":3,"title":"Hooks That Know When to Be Quiet","text":"

    A subtle but important fix: hooks now no-op before ctx init has run.

    Previously, a fresh clone with no .context/ would trigger hook errors on every prompt. Now, hooks detect the absence of a context directory and exit silently. Similarly, ctx init treats a .context/ directory containing only logs as uninitialized and skips the --overwrite prompt.

    Small changes. Large reduction in friction for new users.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.3.0 v0.6.0 Skills 21 25 Shell hook scripts 6 0 Go system subcommands 0 6 External dependencies (hooks) jq, bash none Lines of Go ~14,000 ~37,000 Plugin install commands n/a 2 Security findings (open) 3 0 ctx init creates .claude/ yes no

    The line count tripled. Most of that is documentation site HTML, Obsidian export logic, and the scratchpad encryption module.

    The core CLI grew modestly; the ecosystem around it grew substantially.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-does-v060-mean-for-ctx","level":2,"title":"What Does v0.6.0 Mean for ctx?","text":"
    • v0.1.0 asked: \"Can we give AI persistent memory?\"
    • v0.2.0 asked: \"Can we make that memory accessible to humans too?\"
    • v0.3.0 asked: \"Can we make the quality self-enforcing?\"

    v0.6.0 asks: \"Can someone else actually use this?\"

    A tool that requires cloning a repo, building from source, and manually wiring hooks into the right directories is a tool for its author.

    A tool that installs with two commands from a marketplace is a tool for everyone.

    The version jumped from 0.3.0 to 0.6.0 because the delta is not incremental: The shell-to-Go rewrite, the plugin model, the security hardening, and the tool-agnostic init: Together, they change what ctx is: Not a different tool, but a tool that is finally ready to leave the workshop.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

    The plugin model opens the door to distribution patterns that were not possible before. Marketplace discovery means new users find ctx without reading a README. Plugin updates mean existing users get improvements without rebuilding.

    The next chapter is about what happens when persistent context is easy to install: Adoption patterns, multi-project workflows, and whether the .context/ convention can become infrastructure that other tools build on.

    But those are future posts.

    This one is about the release that turned a developer tool into a distributable product: two commands, zero shell scripts, and a presence on the Claude Marketplace.

    The Integration Release

    v0.1.0 shipped features. v0.2.0 shipped archaeology.

    v0.3.0 shipped discipline. v0.6.0 shipped the front door.

    The most important code in this release is the code you never have to copy.

    This post was drafted using /ctx-blog-changelog with access to the full git history between v0.3.0 and v0.6.0, release notes, and the plugin conversion PR. The meta continues.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/","level":1,"title":"Code Is Cheap. Judgment Is Not.","text":"","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#why-ai-replaces-effort-not-expertise","level":2,"title":"Why AI Replaces Effort, Not Expertise","text":"

    Jose Alekhinne / February 17, 2026

    Are You Worried about AI Taking Your Job?

    You might be confusing the thing that's cheap with the thing that's valuable.

    I keep seeing the same conversation: Engineers, designers, writers: all asking the same question with the same dread:

    \"What happens when AI can do what I do?\"

    The question is wrong:

    • AI does not replace workers;
    • AI replaces unstructured effort.

    The distinction matters, and everything I have learned building ctx reinforces it.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-three-confusions","level":2,"title":"The Three Confusions","text":"

    People who feel doomed by AI usually confuse three things:

    People confuse... With... Effort Value Typing Thinking Production Judgment
    • Effort is time spent.
    • Value is the outcome that time produces.

    They are not the same; they never were.

    AI just makes the gap impossible to ignore.

    Typing is mechanical: Thinking is directional.

    An AI can type faster than any human. Yet, it cannot decide what to type without someone framing the problem, sequencing the work, and evaluating the result.

    Production is making artifacts. Judgment is knowing:

    • which artifacts to make,
    • in what order,
    • to what standard,
    • and when to stop.

    AI floods the system with production capacity; it does not flood the system with judgment.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#code-is-nothing","level":2,"title":"Code Is Nothing","text":"

    This sounds provocative until you internalize it:

    Code is cheap. Artifacts are cheap.

    An AI can generate a thousand lines of working code in literal *minutes**:

    It can scaffold a project, write tests, build a CI pipeline, draft documentation. The raw production of software artifacts is no longer the bottleneck.

    So, what is not cheap?

    • Taste: knowing what belongs and what does not
    • Framing: turning a vague goal into a concrete problem
    • Sequencing: deciding what to build first and why
    • Fanning out: breaking work into parallel streams that converge
    • Acceptance criteria: defining what \"done\" looks like before starting
    • Judgment: the thousand small decisions that separate code that works from code that lasts

    These are the skills that direct production: Hhuman skills.

    Not because AI is incapable of learning them, but because they require something AI does not have:

    temporal accountability for generated outcomes.

    That is, you cannot keep AI accountable for the $#!% it generated three months ago. A human, on the other hand, will always be accountable.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-evidence-from-building-ctx","level":2,"title":"The Evidence from Building ctx","text":"

    I did not arrive at this conclusion theoretically.

    I arrived at it by building a tool with an AI agent for three weeks and watching exactly where a human touch mattered.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#yolo-mode-proved-production-is-cheap","level":3,"title":"YOLO Mode Proved Production Is Cheap","text":"

    In Building ctx Using ctx, I documented the YOLO phase: auto-accept everything, let the AI ship features at full speed. It produced 14 commands in a week. Impressive output.

    The code worked. The architecture drifted. Magic strings accumulated. Conventions diverged. The AI was producing at a pace no human could match, and every artifact it produced was a small bet that nobody was evaluating.

    Production without judgment is not velocity. It is debt accumulation at breakneck speed.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-31-ratio-proved-judgment-has-a-cadence","level":3,"title":"The 3:1 Ratio Proved Judgment Has a Cadence","text":"

    In The 3:1 Ratio, the git history told the story:

    Three sessions of forward momentum followed by one session of deliberate consolidation. The consolidation session is where the human applies judgment: reviewing what the AI built, catching drift, realigning conventions.

    The AI does the refactoring. The human decides what to refactor and when to stop.

    Without the human, the AI will refactor forever, improving things that do not matter and missing things that do.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-attention-budget-proved-framing-is-scarce","level":3,"title":"The Attention Budget Proved Framing Is Scarce","text":"

    In The Attention Budget, I explained why more context makes AI worse, not better. Every token competes for attention: Dump everything in and the AI sees nothing clearly.

    This is a framing problem: The human's job is to decide what the AI should focus on: what to include, what to exclude, what to emphasize.

    ctx agent --budget 4000 is not just a CLI flag: It is a forcing function for human judgment about relevance.

    The AI processes. The human curates.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#skills-design-proved-taste-is-load-bearing","level":3,"title":"Skills Design Proved Taste Is Load-Bearing","text":"

    The skill trilogy (You Can't Import Expertise, The Anatomy of a Skill That Works) showed that the difference between a useful skill and a useless one is not craftsmanship:

    It is taste.

    A well-crafted skill with the wrong focus is worse than no skill at all: It consumes the attention budget with generic advice while the project-specific problems go unchecked.

    The E/A/R framework (Expert, Activation, Redundant) is a judgment too:. The AI cannot apply it to itself. The human evaluates what the AI already knows, what it needs to be told, and what is noise.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#automation-discipline-proved-restraint-is-a-skill","level":3,"title":"Automation Discipline Proved Restraint Is a Skill","text":"

    In Not Everything Is a Skill, the lesson was that the urge to automate is not the need to automate. A useful prompt does not automatically deserve to become a slash command.

    The human applies judgment about frequency, stability, and attention cost.

    The AI can build the skill. Only the human can decide whether it should exist.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#defense-in-depth-proved-boundaries-require-judgment","level":3,"title":"Defense in Depth Proved Boundaries Require Judgment","text":"

    In Defense in Depth, the entire security model for unattended AI agents came down to: markdown is not a security boundary. Telling an AI \"don't do bad things\" is production (of instructions). Setting up an unprivileged user in a network-isolated container is judgment (about risk).

    The AI follows instructions. The human decides which instructions are enforceable and which are \"wishful thinking\".

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#parallel-agents-proved-scale-amplifies-the-gap","level":3,"title":"Parallel Agents Proved Scale Amplifies the Gap","text":"

    In Parallel Agents and Merge Debt, the lesson was that multiplying agents multiplies output. But it also multiplies the need for judgment:

    Five agents running in parallel produce five sessions of drift in one clock hour. The human who can frame tasks cleanly, define narrow acceptance criteria, and evaluate results quickly becomes the limiting factor.

    More agents do not reduce the need for judgment. They increase it.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-two-reactions","level":2,"title":"The Two Reactions","text":"

    When AI floods the system with cheap output, two things happen:

    Those who only produce: panic. If your value proposition is \"I write code,\" and an AI writes code faster, cheaper, and at higher volume, then the math is unfavorable. Not because AI took your job, but because your job was never the code. It was the judgment around the code, and you were not exercising it.

    Those who direct: accelerate. If your value proposition is \"I know what to build, in what order, to what standard,\" then AI is the best thing that ever happened to you: Production is no longer the bottleneck: Your ability to frame, sequence, evaluate, and course-correct is now the limiting factor on throughput.

    The gap between these two is not talent: It is the awareness of where the value lives.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#what-this-means-in-practice","level":2,"title":"What This Means in Practice","text":"

    If you are an engineer reading this, the actionable insight is not \"learn prompt engineering\" or \"master AI tools.\" It is:

    Get better at the things AI cannot do.

    AI does this well You need to do this Generate code Frame the problem Write tests Define acceptance criteria Scaffold projects Sequence the work Fix bugs from stack traces Evaluate tradeoffs Produce volume Exercise restraint Follow instructions Decide which instructions matter

    The skills on the right column are not new. They are the same skills that have always separated senior engineers from junior ones.

    AI did not create the distinction; it just made it load-bearing.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#if-anything-i-feel-empowered","level":2,"title":"If Anything, I Feel Empowered","text":"

    I will end with something personal.

    I am not worried: I am empowered.

    Before ctx, I could think faster than I could produce:

    • Ideas sat in a queue.
    • The bottleneck was always \"I know what to build, but building it takes too long.\"

    Now the bottleneck is gone. Poof!

    • Production is cheap.
    • The queue is clearing.
    • The limiting factor is how fast I can think, not how fast I can type.

    That is not a threat: That is the best force multiplier I've ever had.

    The people who feel threatened are confusing the accelerator for the replacement:

    *AI does not replace the conductor; it gives them a bigger orchestra.

    If You Remember One Thing from This Post...

    Code is cheap. Judgment is not.

    AI replaces unstructured effort, not directed expertise. The skills that matter now are the same skills that have always mattered: taste, framing, sequencing, and the discipline to stop.

    The difference is that now, for the first time, those skills are the only bottleneck left.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-arc","level":2,"title":"The Arc","text":"

    This post is a retrospective. It synthesizes the thread running through every previous entry in this blog:

    • Building ctx Using ctx showed that production without direction creates debt
    • Refactoring with Intent showed that slowing down is not the opposite of progress
    • The Attention Budget showed that curation outweighs volume
    • The skill trilogy showed that taste determines whether a tool helps or hinders
    • Not Everything Is a Skill showed that restraint is a skill in itself
    • Defense in Depth showed that instructions are not boundaries
    • The 3:1 Ratio showed that judgment has a schedule
    • Parallel Agents showed that scale amplifies the gap between production and judgment
    • Context as Infrastructure showed that the system you build for context is infrastructure, not conversation

    From YOLO mode to defense in depth, the pattern is the same:

    • Production is the easy part;
    • Judgment is the hard part;
    • AI changed the ratio, not the rule.

    This post synthesizes the thread running through every previous entry in this blog. The evidence is drawn from three weeks of building ctx with AI assistance, the decisions recorded in DECISIONS.md, the learnings captured in LEARNINGS.md, and the git history that tracks where the human mattered and where the AI ran unsupervised.

    See also: When a System Starts Explaining Itself -- what happens after the arc: the first field notes from the moment the system starts compounding in someone else's hands.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/","level":1,"title":"Context as Infrastructure","text":"","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#why-your-ai-needs-a-filesystem-not-a-prompt","level":2,"title":"Why Your AI Needs a Filesystem, Not a Prompt","text":"

    Jose Alekhinne / February 17, 2026

    Where Does Your AI's Knowledge Live between Sessions?

    If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. Something assembled, used, and discarded.

    What if you treated it as infrastructure instead?

    This post synthesizes a thread that has been running through every ctx blog post; from the origin story to the attention budget to the discipline release. The thread is this: context is not a prompt problem. It is an infrastructure problem. And the tools we build for it should look more like filesystems than clipboard managers.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-prompt-paradigm","level":2,"title":"The Prompt Paradigm","text":"

    Most AI-assisted development treats context as ephemeral:

    1. Start a session.
    2. Paste your system prompt, your conventions, your current task.
    3. Work.
    4. Session ends. Everything evaporates.
    5. Next session: paste again.

    This works for short interactions. For sustained development (where decisions compound over days and weeks) it fails in three ways:

    It does not persist: A decision made on Tuesday must be re-explained on Wednesday. A learning captured in one session is invisible to the next.

    It does not scale: As the project grows, the \"paste everything\" approach hits the context window ceiling. You start triaging what to include, often cutting exactly the context that would have prevented the next mistake.

    It does not compose: A system prompt is a monolith. You cannot load part of it, update one section, or share a subset with a different workflow. It is all or nothing.

    The Copy-Paste Tax

    Every session that starts with pasting a prompt is paying a tax:

    The human time to assemble the context, the risk of forgetting something, and the silent assumption that yesterday's prompt is still accurate today.

    Over 70+ sessions, that tax compounds into a significant maintenance burden: One that most developers absorb without questioning it.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-infrastructure-paradigm","level":2,"title":"The Infrastructure Paradigm","text":"

    ctx takes a different approach:

    Context is not assembled per-session; it is maintained as persistent files in a .context/ directory:

    .context/\n  CONSTITUTION.md     # Inviolable rules\n  TASKS.md            # Current work items\n  CONVENTIONS.md      # Code patterns and standards\n  DECISIONS.md        # Architectural choices with rationale\n  LEARNINGS.md        # Gotchas and lessons learned\n  ARCHITECTURE.md     # System structure\n  GLOSSARY.md         # Domain terminology\n  AGENT_PLAYBOOK.md   # Operating manual for agents\n  journal/            # Enriched session summaries\n  archive/            # Completed work, cold storage\n
    • Each file has a single purpose;
    • Each can be loaded independently;
    • Each persists across sessions, tools, and team members.

    This is not a novel idea. It is the same idea behind every piece of infrastructure software engineers already use:

    Traditional Infrastructure ctx Equivalent Database .context/*.md files Configuration files CONSTITUTION.md Environment variables .contextrc Log files journal/ Schema migrations Decision records Deployment manifests AGENT_PLAYBOOK.md

    The parallel is not metaphorical. Context files are infrastructure:

    • They are versioned (git tracks them);
    • They are structured (Markdown with conventions);
    • They have schemas (required fields for decisions and learnings);
    • And they have lifecycle management (archiving, compaction, indexing).
    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#separation-of-concerns","level":2,"title":"Separation of Concerns","text":"

    The most important design decision in ctx is not any individual feature. It is the separation of context into distinct files with distinct purposes.

    A single CONTEXT.md file would be simpler to implement. It would also be impossible to maintain.

    Why? Because different types of context have different lifecycles:

    Context Type Changes Read By Load When Constitution Rarely Every session Always Tasks Every session Session start Always Conventions Weekly Before coding When writing code Decisions When decided When questioning When revisiting Learnings When learned When stuck When debugging Journal Every session Rarely When investigating

    Loading everything into every session wastes the attention budget on context that is irrelevant to the current task. Loading nothing forces the AI to operate blind.

    Separation of concerns allows progressive disclosure:

    Load the minimum that matters for this moment, with the option to load more when needed.

    # Session start: load the essentials\nctx agent --budget 4000\n\n# Deep investigation: load everything\ncat .context/DECISIONS.md\ncat .context/journal/2026-02-05-*.md\n

    The filesystem is the index. File names, directory structure, and timestamps encode relevance. The AI does not need to read every file; it needs to know where to look.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-two-tier-persistence-model","level":2,"title":"The Two-Tier Persistence Model","text":"

    ctx uses two tiers of persistence, and the distinction is architectural:

    Tier Purpose Location Token Cost Curated Quick context reload .context/*.md Low (budgeted) Full dump Safety net, archaeology .context/journal/*.md Zero (not auto-loaded)

    The curated tier is what the AI sees at session start. It is optimized for signal density:

    • Structured entries,
    • Indexed tables,
    • Reverse-chronological order (newest first, so the most relevant content survives truncation).

    The full dump tier is for humans and for deep investigation. It contains everything: Enriched journals, archived tasks...

    It is never autoloaded because its volume would destroy attention density.

    This two-tier model is analogous to how traditional systems separate hot and cold storage:

    • The hot path (curated context) is optimized for read performance (measured not in milliseconds, but in tokens consumed per unit of useful information).
    • The cold path (journal) is optimized for completeness.

    Nothing Is Ever Truly Lost

    The full dump tier means that context does not need to be perfect: It just needs to be findable.

    A decision that was not captured in DECISIONS.md can be recovered from the session transcript where it was discussed.

    A learning that was not formalized can be found in the journal entry from that day.

    The curated tier is the fast path: The full dump tier is the safety net.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#decision-records-as-first-class-citizens","level":2,"title":"Decision Records as First-Class Citizens","text":"

    One of the patterns that emerged from ctx's own development is the power of structured decision records.

    v0.1.0 allowed adding decisions as one-liners:

    ctx add decision \"Use PostgreSQL\"\n

    v0.2.0 enforced structure:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity\" \\\n  --consequence \"Need connection pooling, team training\"\n

    The difference is not cosmetic:

    • A one-liner decision teaches the AI what was decided.
    • A structured decision teaches it why; and why is what prevents the AI from unknowingly reversing the decision in a future session.

    This is infrastructure thinking:

    Decisions are not notes. They are records with required fields, just like database rows have schemas.

    The enforcement exists because incomplete records are worse than no records: They create false confidence that the context is captured when it is not.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-ide-is-the-interface-decision","level":2,"title":"The \"IDE Is the Interface\" Decision","text":"

    Early in ctx's development, there was a temptation to build a custom UI: a web dashboard for browsing sessions, editing context, viewing analytics.

    The decision was no. The IDE is the interface.

    # This is the ctx \"UI\":\ncode .context/\n

    This decision was not about minimalism for its own sake. It was about recognizing that .context/ files are just files; and files have a mature, well-understood infrastructure:

    • Version control: git diff .context/DECISIONS.md shows exactly what changed and when.
    • Search: Your IDE's full-text search works across all context files.
    • Editing: Markdown in any editor, with preview, spell check, and syntax highlighting.
    • Collaboration: Pull requests on context files work the same as pull requests on code.

    Building a custom UI would have meant maintaining a parallel infrastructure that duplicates what every IDE already provides:

    It would have introduced its own bugs, its own update cycle, and its own learning curve.

    The filesystem is not a limitation: It is the most mature, most composable, most portable infrastructure available.

    Context Files in Git

    Because .context/ lives in the repository, context changes are part of the commit history.

    A decision made in commit abc123 is as traceable as a code change in the same commit.

    This is not possible with prompt-based context, which exists outside version control entirely.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#progressive-disclosure-for-ai","level":2,"title":"Progressive Disclosure for AI","text":"

    The concept of progressive disclosure comes from human interface design: show the user the minimum needed to make progress, with the option to drill deeper.

    ctx applies the same principle to AI context:

    Level What the AI Sees Token Cost When Level 0 ctx status (one-line summary) ~100 Quick check Level 1 ctx agent --budget 4000 ~4,000 Normal work Level 2 ctx agent --budget 8000 ~8,000 Complex tasks Level 3 Direct file reads 10,000+ Deep investigation

    Each level trades tokens for depth. Level 1 is sufficient for most work: the AI knows the active tasks, the key conventions, and the recent decisions. Level 3 is for archaeology: understanding why a decision was made three weeks ago, or finding a pattern in the session history.

    The explicit --budget flag is the mechanism that makes this work:

    Without it, the default behavior would be to load everything (because more context feels safer), which destroys the attention density that makes the loaded context useful.

    The constraint is the feature: A budget of 4,000 tokens forces ctx to prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings scored by recency and relevance to active tasks. Entries that don't fit get title-only summaries rather than being silently dropped.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-philosophical-shift","level":2,"title":"The Philosophical Shift","text":"

    The shift from \"context as prompt\" to \"context as infrastructure\" changes how you think about AI-assisted development:

    Prompt Thinking Infrastructure Thinking \"What do I paste today?\" \"What has changed since yesterday?\" \"How do I fit everything in?\" \"What's the minimum that matters?\" \"The AI forgot my conventions\" \"The conventions are in a file\" \"I need to re-explain\" \"I need to update the record\" \"This session is getting slow\" \"Time to compact and archive\"

    The first column treats AI interaction as a conversation. The second treats it as a system: One that can be maintained, optimized, and debugged.

    Context is not something you give the AI. It is something you maintain: Like a database, like a config file, like any other piece of infrastructure that a running system depends on.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#beyond-ctx-the-principles","level":2,"title":"Beyond ctx: The Principles","text":"

    The patterns that ctx implements are not specific to ctx. They are applicable to any project that uses AI-assisted development:

    1. Separate context by purpose: Do not put everything in one file. Different types of information have different lifecycles and different relevance windows.
    2. Make context persistent: If a decision matters, write it down in a file that survives the session. If a learning matters, capture it with structure.
    3. Budget explicitly: Know how much context you are loading and whether it is worth the attention cost.
    4. Use the filesystem: File names, directory structure, and timestamps are metadata that the AI can navigate. A well-organized directory is an index that costs zero tokens to maintain.
    5. Version your context: Put context files in git. Changes to decisions are as important as changes to code.
    6. Design for degradation: Sessions will get long. Attention will dilute. Build mechanisms (compaction, archiving, cooldowns) that make degradation visible and manageable.

    These are not ctx features. They are infrastructure principles that happen to be implemented as a CLI tool. Any team could implement them with nothing more than a directory convention and a few shell scripts.

    The tool is a convenience: The principles are what matter.

    If You Remember One Thing from This Post...

    Prompts are conversations. Infrastructure persists.

    Your AI does not need a better prompt. It needs a filesystem:

    versioned, structured, budgeted, and maintained.

    The best context is the context that was there before you started the session.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

    This post is the architectural companion to the Attention Budget. That post explained why context must be curated (token economics). This one explains how to structure it (filesystem, separation of concerns, persistence tiers).

    Together with Code Is Cheap, Judgment Is Not, they form a trilogy about what matters in AI-assisted development:

    • Attention Budget: the resource you're managing
    • Context as Infrastructure: the system you build to manage it
    • Code Is Cheap: the human skill that no system replaces

    And the practices that keep it all honest:

    • The 3:1 Ratio: the cadence for maintaining both code and context
    • IRC as Context: the historical precedent: stateless protocols have always needed stateful wrappers

    This post synthesizes ideas from across the ctx blog series: the attention budget primitive, the two-tier persistence model, the IDE decision, and the progressive disclosure pattern. The principles are drawn from three weeks of building ctx and 70+ sessions of treating context as infrastructure rather than conversation.

    See also: When a System Starts Explaining Itself: what happens when this infrastructure starts compounding in someone else's environment.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/","level":1,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-the-screen-looks-like-progress","level":2,"title":"When the Screen Looks like Progress","text":"

    Jose Alekhinne / 2026-02-17

    How Many Terminals Are Too Many?

    You discover agents can run in parallel.

    So you open ten...

    ...Then twenty.

    The fans spin. Tokens burn. The screen looks like progress.

    It is NOT progress.

    There is a phase every builder goes through:

    • The tooling gets fast enough.
    • The model gets good enough.
    • The temptation becomes irresistible:
      • more agents, more output, faster delivery.

    So you open terminals. You spawn agents. You watch tokens stream across multiple windows simultaneously, and it feels like multiplication.

    It is not multiplication.

    It is merge debt being manufactured in real time.

    The ctx Manifesto says it plainly:

    Activity is not impact. Code is not progress.

    This post is about what happens when you take that seriously in the context of parallel agent workflows.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-unit-of-scale-is-not-the-agent","level":2,"title":"The Unit of Scale Is Not the Agent","text":"

    The naive model says:

    More agents -> more output -> faster delivery

    The production model says:

    Clean context boundaries -> less interference -> higher throughput

    Parallelism only works when the cognitive surfaces do not overlap.

    If two agents touch the same files, you did not create parallelism: You created a conflict generator.

    They will:

    • Revert each other's changes;
    • Relint each other's formatting;
    • Refactor the same function in different directions.

    You watch with 🍿. Nothing ships.

    This is the same insight from the worktrees post: partition by blast radius, not by priority.

    Two tasks that touch the same files belong in the same track, no matter how important the other one is. The constraint is file overlap.

    Everything else is scheduling.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-five-agent-rule","level":2,"title":"The \"Five Agent\" Rule","text":"

    In practice there is a ceiling.

    Around five or six concurrent agents:

    • Token burn becomes noticeable;
    • Supervision cost rises;
    • Coordination noise increases;
    • Returns flatten.

    This is not a model limitation: This is a human merge bandwidth limitation.

    You are the bottleneck, not the silicon.

    The attention budget applies to you too:

    Every additional agent is another stream of output you need to comprehend, verify, and integrate. Your attention density drops the same way the model's does when you overload its context window.

    Five agents producing verified, mergeable change beats twenty agents producing merge conflicts you spend a day untangling.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#role-separation-beats-file-locking","level":2,"title":"Role Separation Beats File Locking","text":"

    Real parallelism comes from task topology, not from tooling.

    Good:

    Agent Role Touches 1 Documentation docs/, hack/ 2 Security scan Read-only audit 3 Implementation internal/cli/ 4 Enhancement requests Read-only, files issues

    Bad:

    • Four agents editing the same implementation surface

    Context Is the Boundary

    • The goal is not to keep agents busy.
    • The goal is to keep contexts isolated.

    This is what the codebase audit got right:

    • Eight agents, all read-only, each analyzing a different dimension.
    • Zero file overlap.
    • Zero merge conflicts.
    • Eight reports that composed cleanly because no agent interfered with another.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-terminals-stop-scaling","level":2,"title":"When Terminals Stop Scaling","text":"

    There is a moment when more windows stop helping.

    That is the signal. Not to add orchestration. But to introduce:

    git worktree\n

    Because now you are no longer parallelizing execution; you are parallelizing state.

    State Scales, Windows Don't

    • State isolation is the real scaling.
    • Window multiplication is theater.

    The worktrees post covers the mechanics:

    • Sibling directories;
    • Branch naming;
    • The inevitable TASKS.md conflicts;
    • The 3-4 worktree ceiling.

    The principle underneath is older than git:

    Shared mutable state is the enemy of parallelism.

    Always has been.

    Always will be.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-overnight-loop-illusion","level":2,"title":"The Overnight Loop Illusion","text":"

    Autonomous night runs are impressive.

    You sleep. The machine produces thousands of lines.

    In the morning:

    • You read;
    • You untangle;
    • You reconstruct intent;
    • You spend a day making it shippable.

    In retrospect, nothing was accelerated.

    The bottleneck moved from typing to comprehension.

    The Comprehension Tax

    If understanding the output costs more than producing it, the loop is a net loss.

    Progress is not measured in generated code.

    Progress is measured in verified, mergeable change.

    The ctx Manifesto calls this out directly:

    The Scoreboard

    Verified reality is the scoreboard.

    The only truth that compounds is verified change in the real world.

    An overnight run that produces 3,000 lines nobody reviewed is not 3,000 lines of progress: It is 3,000 lines of liability until someone verifies every one of them.

    And that someone is (insert drumroll here) you:

    The same bottleneck that was supposedly being bypassed.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#skills-that-fight-the-platform","level":2,"title":"Skills That Fight the Platform","text":"

    Most marketplace skills are prompt decorations:

    • They rephrase what the base model already knows;
    • They increase token usage;
    • They reduce clarity:
    • They introduce behavioral drift.

    We covered this in depth in Skills That Fight the Platform: judgment suppression, redundant guidance, guilt-tripping, phantom dependencies, universal triggers: Five patterns that make agents worse, not better.

    A real skill does one of these:

    • Encodes workflow state;
    • Enforces invariants;
    • Reduces decision branching.

    Everything else is packaging.

    The anatomy post established the criteria: quality gates, negative triggers, examples over rules, skills as contracts.

    If a skill doesn't meet those criteria...

    • It is either a recipe (document it in hack/);
    • Or noise (delete it);
    • There is no third option.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#hooks-are-context-that-execute","level":2,"title":"Hooks Are Context That Execute","text":"

    The most valuable skills are not prompts:

    They are constraints embedded in the toolchain.

    For example: The agent cannot push.

    git push becomes:

    Stop. A human reviews first.

    A commit without verification becomes:

    Did you run tests? Did you run linters? What exactly are you shipping?

    This is not safety theater; this is intent preservation.

    The thing the ctx Manifesto calls \"encoding intent into the environment.\"

    The Eight Ways a Hook Can Talk catalogued the full spectrum: from silent enrichment to hard blocks.

    The key insight was that hooks are not just safety rails: They are context that survives execution.

    They are the difference between an agent that remembers the rules and one that enforces them.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#complexity-is-a-tax","level":2,"title":"Complexity Is a Tax","text":"

    Every extra layer adds cognitive weight:

    • Orchestration frameworks;
    • Meta agents;
    • Autonomous planning systems...

    If a single terminal works, stay there.

    If five isolated agents work, stop there.

    Add structure only when a real bottleneck appears.

    NOT when an influencer suggests one.

    This is the same lesson from Not Everything Is a Skill:

    The best automation decision is sometimes not to automate.

    A recipe in a Markdown file costs nothing until you use it.

    An orchestration framework costs attention on every run, whether it helps or not.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#literature-is-throughput","level":2,"title":"Literature Is Throughput","text":"

    Clear writing is not aesthetic: It is compression.

    Better articulation means:

    • Fewer tokens;
    • Fewer misinterpretations;
    • Faster convergence.

    The attention budget taught us that context is a finite resource with a quadratic cost.

    Language determines how fast you spend context.

    A well-written task description that takes 50 tokens outperforms a rambling one that takes 200: Not just because it is cheaper, but because it leaves more headroom for the model to actually think.

    Literature Is NOT Overrated

    • Attention is a finite budget.
    • Language determines how fast you spend it.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-real-metric","level":2,"title":"The Real Metric","text":"

    The real metric is not:

    • Lines generated;
    • Agents running;
    • Tasks completed while you sleep.

    But:

    Time from idea to verified, mergeable, production change.

    Everything else is motion.

    The entire blog series has been circling this point:

    • The attention budget was about spending tokens wisely.
    • The skills trilogy was about not wasting them on prompt decoration.
    • The worktrees post was about multiplying throughput without multiplying interference.
    • The discipline release was about what a release looks like when polish outweighs features: 3:1.

    Every post has arrived (and made me converge) at the same answer so far:

    The metric is a verified change, not generated output.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#ctx-was-never-about-spawning-more-minds","level":2,"title":"ctx Was Never about Spawning More Minds","text":"

    ctx is about:

    • Isolating context;
    • Preserving intent;
    • Making progress composable.

    Parallel agents are powerful. But only when you respect the boundaries that make parallelism real.

    Otherwise, you are not scaling cognition; you are scaling interference.

    The ctx Manifesto's thesis holds:

    Without ctx, intelligence resets. With ctx, creation compounds.

    Compounding requires structure.

    Structure requires boundaries.

    Boundaries require the discipline to stop adding agents when five is enough.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#practical-summary","level":2,"title":"Practical Summary","text":"

    A production workflow tends to converge to this:

    Practice Why Stay in one terminal unless necessary Minimize coordination overhead Spawn a small number of agents with non-overlapping responsibilities Conflict avoidance > parallelism Isolate state with worktrees when surfaces grow State isolation is real scaling Encode verification into hooks Intent that survives execution Avoid marketplace prompt cargo cults Skills are contracts, not decorations Measure merge cost, not generation speed The metric is verified change

    This is slower to watch. Faster to ship.

    If You Remember One Thing from This Post...

    Progress is not what the machine produces while you sleep.

    Progress is what survives contact with the main branch.

    See also: Code Is Cheap. Judgment Is Not.: the argument that production capacity was never the bottleneck, and why multiplying agents amplifies the need for human judgment rather than replacing it.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/","level":1,"title":"The 3:1 Ratio","text":"","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#scheduling-consolidation-in-ai-development","level":2,"title":"Scheduling Consolidation in AI Development","text":"

    Jose Alekhinne / February 17, 2026

    How Often Should You Stop Building and Start Cleaning?

    Every developer knows technical debt exists. Every developer postpones dealing with it.

    AI-assisted development makes the problem worse; not because the AI writes bad code, but because it writes code so fast that drift accumulates before you notice.

    In Refactoring with Intent, I mentioned a ratio that worked for me: 3:1. Three YOLO sessions create enough surface area to reveal patterns. The fourth session turns those patterns into structure.

    That was an observation. This post is the evidence.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-observation","level":2,"title":"The Observation","text":"

    During the first two weeks of building ctx, I noticed a rhythm in my own productivity. Feature sessions felt great: new commands, new capabilities, visible progress...

    ...but after three of them, things would start to feel sticky: variable names that almost made sense, files that had grown past their purpose, patterns that repeated without being formalized.

    The fourth session (when I stopped adding and started cleaning) was always the most painful to start and the most satisfying to finish.

    It was also the one that made the next three feature sessions faster.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-evidence-git-history","level":2,"title":"The Evidence: Git History","text":"

    The ctx git history between January 20 and February 7 tells a clear story when you categorize commits:

    Week Feature commits Consolidation commits Ratio Jan 20-26 18 5 3.6:1 Jan 27-Feb 1 14 6 2.3:1 Feb 1-7 15 35+ 0.4:1

    The first week was pure YOLO: Almost four feature commits for every consolidation commit. The codebase grew fast.

    The second week started to self-correct. The ratio dropped as refactoring sessions became necessary: Not scheduled, but forced by friction.

    The third week inverted entirely: v0.3.0 was almost entirely consolidation: the skill migration, the sweep, the documentation standardization. Thirty-five quality commits against fifteen features.

    The debt from weeks one and two was paid in week three.

    The Compounding Problem

    Consolidation debt compounds.

    Week one's drift doesn't just persist into week two: It accelerates, because new features are built on top of drifted patterns.

    By week three, the cost of consolidation was higher than it would have been if spread evenly.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-drift-actually-looks-like","level":2,"title":"What Drift Actually Looks Like","text":"

    \"Drift\" sounds abstract. Here is what it looked like concretely in the ctx codebase after three weeks of feature-heavy development:

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#predicate-naming","level":3,"title":"Predicate Naming","text":"

    Convention says boolean functions should be named HasX, IsX, CanX. After three feature sprints:

    // What accumulated:\nfunc CheckIfEnabled() bool  // should be Enabled\nfunc ValidateFormat() bool  // should be ValidFormat\nfunc TestConnection() bool  // should be Connects\nfunc VerifyExists() bool    // should be Exists or HasFile\nfunc EnsureReady() bool     // should be Ready\n

    Five violations. Not bugs, but friction that compounds every time someone (human or AI) reads the code and has to infer the naming convention from inconsistent examples.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#magic-strings","level":3,"title":"Magic Strings","text":"
    // Week 1: acceptable prototype\nif entry.Type == \"task\" {\n    filename = \"TASKS.md\"\n}\n\n// Week 3: same pattern in 7+ files\n// Now it's a maintenance liability\n

    When the same literal appears in seven files, changing it means finding all seven. Missing one means a silent runtime bug. Constants exist to prevent exactly this. But during feature velocity, nobody stops to extract them.

    Refactoring with Intent documented the constants consolidation that cleaned this up. The 3:1 ratio is the practice that prevents it from accumulating again.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#hardcoded-permissions","level":3,"title":"Hardcoded Permissions","text":"
    os.WriteFile(path, data, 0644) // 80+ instances\nos.MkdirAll(path, 0755)        // scattered across packages\n

    Eighty-plus instances of hardcoded file permissions. Not wrong, but if I ever need to change the default (and I did, for hook scripts that need execute permissions), it means a codebase-wide search.

    Drift Is Not Bugs

    None of these are bugs. The code works. Tests pass.

    But drift creates false confidence: the codebase looks consistent until you try to change something and discover that five different conventions exist for the same concept.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#why-you-cannot-consolidate-on-day-one","level":2,"title":"Why You Cannot Consolidate on Day One","text":"

    The temptation is to front-load quality: write all the conventions, enforce all the checks, prevent all the drift before it happens.

    This fails for two reasons.

    First, you do not know what will drift: Predicate naming violations only become a convention check after you notice three different naming patterns competing. Magic strings only become a consolidation target after you change a literal and discover it exists in seven places.

    The conventions emerge from the work; they cannot precede it.

    This is what You Can't Import Expertise meant in practice: the consolidation checks grow from the project's own drift history. You cannot write them on day one because you do not yet know what will drift.

    Second, premature consolidation slows discovery: During the prototyping phase, the goal is to explore the design space. Enforcing strict conventions on code that might be deleted tomorrow is waste.

    YOLO mode has its place: The problem is not YOLO itself, but YOLO without a scheduled cleanup.

    The Consolidation Paradox

    You need a drift history to know what to consolidate.

    You need consolidation to prevent drift from compounding.

    The 3:1 ratio resolves this paradox:

    Let drift accumulate for three sessions (enough to see patterns), then consolidate in the fourth (before the patterns become entrenched*).

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-consolidation-skill","level":2,"title":"The Consolidation Skill","text":"

    The ctx project now has an /audit skill that encodes nine project-specific checks:

    Check What It Catches Predicate naming Boolean functions not using Has/Is/Can Magic strings Repeated literals not in config constants File permissions Hardcoded 0644/0755 not using constants Godoc style Missing or non-standard documentation File length Files exceeding 400 lines Large functions Functions exceeding 80 lines Template drift Live skills diverging from templates Import organization Non-standard import grouping TODO/FIXME staleness Old markers that are no longer relevant

    This is not a generic linter. These are project-specific conventions that emerged from ctx's own development history. A generic code quality tool would catch some of them. Only a project-specific check catches all of them, because some of them (predicate naming, template drift) are conventions that exist nowhere except in this project's CONVENTIONS.md.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-decision-matrix","level":2,"title":"The Decision Matrix","text":"

    Not all drift needs immediate consolidation. Here is the matrix I use:

    Signal Action Same literal in 3+ files Extract to constant Same code block in 3+ places Extract to helper Naming convention violated 5+ times Fix and document rule File exceeds 400 lines Split by concern Convention exists but is regularly violated Strengthen enforcement Pattern exists only in one place Leave it alone Code works but is \"ugly\" Leave it alone

    The last two rows matter:

    Consolidation is about reducing maintenance cost, not achieving aesthetic perfection. Code that works and exists in one place does not benefit from consolidation; it benefits from being left alone until it earns its refactoring.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#consolidation-as-context-hygiene","level":2,"title":"Consolidation as Context Hygiene","text":"

    There is a parallel between code consolidation and context management that became clear during the ctx development:

    Code Consolidation Context Hygiene Extract magic strings Archive completed tasks Standardize naming Keep DECISIONS.md current Remove dead code Compact old sessions Update stale comments Review LEARNINGS.md for staleness Check template drift Verify CONVENTIONS.md matches code

    ctx compact does for context what consolidation does for code:

    It moves completed work to cold storage, keeping the active context clean and focused. The attention budget applies to both the AI's context window and the developer's mental model of the codebase.

    When context files accumulate stale entries, the AI's attention is wasted on completed tasks and outdated conventions. When code accumulates drift, the developer's attention is wasted on inconsistencies that obscure the actual logic.

    Both are solved by the same discipline: periodic, scheduled cleanup.

    This is also why parallel agents make the problem harder, not easier. Three agents running simultaneously produce three sessions' worth of drift in one clock hour. The consolidation cadence needs to match the output rate, not the calendar.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-practice","level":2,"title":"The Practice","text":"

    Here is how the 3:1 ratio works in practice for ctx development:

    Sessions 1-3: Feature work

    • Add new capabilities;
    • Write tests for new code;
    • Do not stop for cleanup unless something is actively broken;
    • Note drift as you see it (a comment, a task, a mental note).

    Session 4: Consolidation

    • Run /audit to surface accumulated drift;
    • Fix the highest-impact items first;
    • Update CONVENTIONS.md if new patterns emerged;
    • Archive completed tasks;
    • Review LEARNINGS.md for anything that became a convention.

    The key insight is that session 4 is not optional. It is not \"if we have time\": It is scheduled with the same priority as feature work.

    The cost of skipping it is not visible immediately; it becomes visible three sessions later, when the next consolidation session takes twice as long because the drift compounded.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-the-ratio-is-not","level":2,"title":"What the Ratio Is Not","text":"

    The 3:1 ratio is not a universal law. It is an empirical observation from one project with one developer working with AI assistance.

    Different projects will have different ratios:

    • A mature codebase with strong conventions might sustain 5:1 or higher;
    • A greenfield prototype might need 2:1;
    • A team of multiple developers with different styles might need 1:1.

    The number is less important than the practice: consolidation is not a reaction to problems. It is a scheduled activity.

    If you wait for drift to cause pain before consolidating, you have already paid the compounding cost.

    If You Remember One Thing from This Post...

    Three sessions of building. One session of cleaning.

    Not because the code is dirty, but because drift compounds silently, and the only way to catch it is to look for it on a schedule.

    The ratio is the schedule.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-arc-so-far","level":2,"title":"The Arc so Far","text":"

    This post sits at a crossroads in the ctx story. Looking back:

    • Building ctx Using ctx documented the YOLO sprint that created the initial codebase
    • Refactoring with Intent introduced the 3:1 ratio as an observation from the first cleanup
    • The Attention Budget explained why drift matters: every token of inconsistency consumes the same finite resource as useful context
    • You Can't Import Expertise showed that consolidation checks must grow from the project, not a template
    • The Discipline Release proved the ratio works at release scale: 35 quality commits to 15 feature commits

    And looking forward: the same principle applies to context files, to documentation, and to the merge debt that parallel agents produce. Drift is drift, whether it lives in code, in .context/, or in the gap between what your docs say and what your code does.

    The ratio is the schedule is the discipline.

    This post was drafted from git log analysis of the ctx repository, mapping every commit from January 20 to February 7 into feature vs consolidation categories. The patterns described are drawn from the project's CONVENTIONS.md, LEARNINGS.md, and the /audit skill's check list.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/","level":1,"title":"When a System Starts Explaining Itself","text":"","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#field-notes-from-the-moment-a-private-workflow-becomes-portable","level":2,"title":"Field Notes from the Moment a Private Workflow Becomes Portable","text":"

    Jose Alekhinne / February 17, 2026

    How Do You Know Something Is Working?

    Not from metrics. Not from GitHub stars. Not from praise.

    You know, deep in your heart, that it works when people start describing it wrong.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-first-external-signals","level":2,"title":"The First External Signals","text":"

    Every new substrate begins as a private advantage:

    • It lives inside one mind,
    • One repository,
    • One set of habits.

    It is fast. It is not yet real.

    Reality begins when other people describe it in their own language:

    • Not accurately;
    • Not consistently;
    • But involuntarily.

    The early reports arrived without coordination:

    Better Tasks

    \"I do not know how, but this creates better tasks than my AI plugin.\"

    I See Butterflies

    \"This is better than Adderall.\"

    Dear Manager...

    \"Promotion packet? Done. What is next?\"

    What Is It? Can I Eat It?

    \"Is this a skill?\" 🦋

    Why the Cloak and Dagger?

    \"Why is this not in the marketplace?\"

    And then something more important happened:

    Someone else started making a video!

    That was the boundary.

    ctx no longer required its creator to be present in order to exist.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#misclassification-is-a-sign-of-a-new-primitive","level":2,"title":"Misclassification Is a Sign of a New Primitive","text":"

    When a tool is understood, it is categorized:

    • Editor,
    • Framework,
    • Task manager,
    • Plugin...

    When a substrate appears, it is misclassified:

    \"Is this a skill?\" 🦋

    The question is correct. The category is wrong.

    • Skills live in people.
    • Infrastructure lives in the environment.

    ctx Is Not a Skill: It Is a Form of Relief

    What early adopters experience is not an ability.

    It is the removal of a cognitive constraint.

    This is the same distinction that emerged in the skills trilogy:

    • A skill is a contract between a human and an agent.
    • Infrastructure is the ground both stand on.

    You do not use infrastructure.

    You habitualize it.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-pharmacological-metaphor","level":2,"title":"The Pharmacological Metaphor","text":"

    \"Better than Adderall\" is not praise.

    It is a diagnostic:

    Executive function has been externalized.

    • The system is not making the user work harder.
    • It is restoring continuity.

    From the primitive context of wetware:

    • Continuity feels like focus
    • Focus feels like discipline

    If it walks like a duck and quacks like a duck, it is a duck.

    Discipline is usually simulated.

    Infrastructure makes the simulation unnecessary.

    The attention budget explained why context degrades:

    • Attention density drops as volume grows;
    • The middle gets lost;
    • Sessions end and everything evaporates.

    The pharmacological metaphor says the same thing from the user's lens:

    Save the Cheerleader, Save the World

    The symptom of lost context is lost focus.

    Restore the context. Restore the focus.

    IRC bouncers solved this for chat twenty years ago. ctx solves it for cognition.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#throughput-on-ambiguous-work","level":2,"title":"Throughput on Ambiguous Work","text":"

    Finishing a promotion packet quickly is not a productivity story.

    It is the collapse of reconstruction cost.

    Most complex work is not execution. It is:

    • Remembering why something mattered;
    • Recovering prior decisions;
    • Rebuilding mental state.

    Persistent context removes that tax.

    Velocity appears as a side effect.

    This Is the Two-Tier Model in Practice

    The two-tier persistence model

    • Curated context for fast reload
    • Full journal for archaeology

    is what makes this possible.

    • The user does not notice the system.
    • They notice that the reconstruction cost disappeared.
    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-moment-of-portability","level":2,"title":"The Moment of Portability","text":"

    The system becomes real when two things happen:

    1. It can be installed as a versioned artifact.
    2. It survives contact with a hostile, real codebase.

    This is why the first integration into a living system matters more than any landing page.

    Demos prove possibility.

    Diffs prove reality.

    The ctx Manifesto calls this out directly:

    Verified reality is the scoreboard.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-split-voice","level":2,"title":"The Split Voice","text":"

    A new substrate requires two channels.

    The embodied voice:

    Here is what changed in my actual work.

    The out of body voice:

    Here is what this means.

    One produces trust.

    The other produces understanding.

    Neither is sufficient alone.

    This entire blog has been the second voice.

    • The origin story was the first.
    • The refactoring post was the first.
    • Every release note with concrete diffs was the first.

    This is the first second.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#systems-that-generate-explainers","level":2,"title":"Systems That Generate Explainers","text":"

    Tools are used.

    Platforms are extended.

    Substrates are explained.

    The first unsolicited explainer is a brittle phase change.

    It means the idea has become portable between minds.

    That is the beginning of an ecosystem.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-absence-of-metrics","level":2,"title":"The Absence of Metrics","text":"

    Metrics do not matter at this stage.

    Dashboards are noise.

    The whole premise of ctx is the ruthless elimination of noise.

    Numbers optimize funnels; substrates alter cognition.

    The only valid measurement is irreversible reality:

    • A merged PR;
    • A reproducible install;
    • A decision that is never re-litigated.

    The merge debt post reached the same conclusion from another direction:

    The metric is the verified change, not generated output.

    For adoption, the same rule applies:

    The metric is altered behavior, not download counts.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#what-is-actually-happening","level":2,"title":"What Is Actually Happening","text":"

    A private advantage is becoming an environmental property:

    The system is moving from...

    personal workflow,

    to...

    a shared infrastructure for thought.

    Not by growth.

    Not by marketing.

    By altering how real systems evolve.

    If You Remember One Thing from This Post...

    You do not know a substrate is real when people praise it.

    You know it is real when:

    • They describe it incorrectly;
    • They depend on it unintentionally;
    • They start teaching it to others.

    That is the moment the system begins explaining itself.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-arc","level":2,"title":"The Arc","text":"

    Every previous post looked inward.

    This one looks outward.

    • Building ctx Using ctx: one mind, one repository
    • The Attention Budget: the constraint
    • Context as Infrastructure: the architecture
    • Code Is Cheap. Judgment Is Not.: the bottleneck

    This post is the field report from the other side of that bottleneck:

    The moment the infrastructure compounds in someone else's hands.

    The arc is not complete.

    It is becoming portable.

    These field notes were written the same day the feedback arrived. The quotes are real. Real users. Real codebases. No names. No metrics. No funnel. Only the signal that something shifted.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/","level":1,"title":"The Dog Ate My Homework","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#teaching-ai-agents-to-read-before-they-write","level":2,"title":"Teaching AI Agents to Read Before They Write","text":"

    Jose Alekhinne / February 25, 2026

    Does Your AI Actually Read the Instructions?

    You wrote the playbook. You organized the files. You even put \"CRITICAL, not optional\" in bold.

    The agent skipped all of it and went straight to work.

    I spent a day running experiments on my own agents. Not to see if they could write code (they can). To see if they would do their homework first.

    They didn't.

    Then I kept experimenting:

    • Five sessions;
    • Five different failure modes.

    And by the end, I had something better than compliance:

    I had observable compliance: A system where I don't need the agent to be perfect, I just need to see what it chose.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#tldr","level":2,"title":"TL;DR","text":"

    You don't need perfect compliance. You need observable compliance.

    Authority is a function of temporal proximity to action.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-pattern","level":2,"title":"The Pattern","text":"

    This design has three parts:

    1. One-hop instruction;
    2. Binary collapse;
    3. Compliance canary.

    I'll explain all three patterns in detail below.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-setup","level":2,"title":"The Setup","text":"

    ctx has a session-start protocol:

    • Read the context files;
    • Load the playbook;
    • Understand the project before touching anything.

    It's in CLAUDE.md. It's in AGENT_PLAYBOOK.md.

    It's in bold. It's in CAPS. It's ignored.

    In theory, it's awesome.

    Here's what happens when theory hits reality:

    What the agent receives What the agent does CLAUDE.md saying \"load context first\" Skips it 8 context files waiting to be read Ignores them User's question: \"add --verbose flag\" Starts grepping immediately

    The instructions are right there. The agent knows they exist. It even knows it should follow them. But the user asked a question, and responsiveness wins over ceremony.

    This isn't a bug in the model. It's a design problem in how we communicate with agents.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-delegation-trap","level":2,"title":"The Delegation Trap","text":"

    My first attempt was obvious: A UserPromptSubmit hook that fires when the session starts.

    STOP. Before answering the user's question, run `ctx system bootstrap`\nand follow its instructions. Do not skip this step.\n

    The word \"STOP\" worked. The agent ran bootstrap.

    But bootstrap's output said \"Next steps: read AGENT_PLAYBOOK.md,\" and the agent decided that was optional. It had already started working on the user's task in parallel.

    The authority decayed across the chain:

    • Hook says \"STOP\" -> agent complies
    • Hook says \"run bootstrap\" -> agent runs it
    • Bootstrap says \"read playbook\" -> agent skips
    • Bootstrap says \"run ctx agent\" -> agent skips

    Each link lost enforcement power. The hook's authority didn't transfer to the commands it delegated to. I call this the decaying urgency chain: the agent treats the hook itself as the obligation and everything downstream as a suggestion.

    Delegation Kills Urgency

    \"Run X and follow its output\" is three hops.

    \"Read these files\" is one hop.

    The agent drops the chain after the first link.

    This is a general principle: Hooks are the boundary between your environment and the agent's reasoning. If your hook delegates to a command that delegates to output that contains instructions... you're playing telephone.

    Agents are bad at telephone.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-timing-problem","level":2,"title":"The Timing Problem","text":"

    There's a subtler issue than wording: when the message arrives.

    UserPromptSubmit fires when the user sends a message, before the agent starts reasoning. At that moment, the agent's primary focus is the user's question:

    The hook message competes with the task for attention: The task, almost certainly, always wins.

    This is the attention budget problem in miniature:

    • Not a token budget this time, but an attention priority budget.
    • The agent has finite capacity to care about things,
      • and the user's question is always the highest-priority item.
    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-solution","level":2,"title":"The Solution","text":"

    To solve this, I dediced to use the PreToolUse hook.

    This hook fires at the moment of action: When the agent is about to use its first tool: The agent's attention is focused, the context window is fresh, and the switching cost is minimal.

    This is the difference between shouting instructions across a room and tapping someone on the shoulder.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-one-liner-that-worked","level":2,"title":"The One-Liner That Worked","text":"

    The winning design was almost comically simple:

    Read your context files before proceeding:\n.context/CONSTITUTION.md, .context/TASKS.md, .context/CONVENTIONS.md,\n.context/ARCHITECTURE.md, .context/DECISIONS.md, .context/LEARNINGS.md,\n.context/GLOSSARY.md, .context/AGENT_PLAYBOOK.md\n

    No delegation. No \"run this command\". Just: here are files, read them.

    The agent already knows how to use the Read tool. There's no ambiguity about how to comply. There's no intermediate command whose output needs to be parsed and obeyed.

    One hop. Eight file paths. Done.

    Direct Instructions Beat Delegation

    If you want an agent to read a file, say \"read this file.\"

    Don't say \"run a command that will tell you which files to read.\"

    The shortest path between intent and action has the highest compliance rate.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch","level":2,"title":"The Escape Hatch","text":"

    But here's where it gets interesting.

    A blunt \"read everything always\" instruction is wasteful.

    If someone asks \"what does the compact command do?\", the agent doesn't need CONSTITUTION.md to answer that. Forcing context loading on every session is the context hoarding antipattern in disguise.

    So the hook included an escape:

    If you decide these files are not relevant to the current task\nand choose to skip reading them, you MUST relay this message to\nthe user VERBATIM:\n\n┌─ Context Skipped ───────────────────────────────\n│ I skipped reading context files because this task\n│ does not appear to need project context.\n│ If these matter, ask me to read them.\n└─────────────────────────────────────────────────\n

    This creates what I call the binary collapse effect:

    The agent can't partially comply: It either reads everything or publicly admits it skipped. There's no comfortable middle ground where it reads two files and quietly ignores the rest.

    The VERBATIM relay pattern does the heavy lifting here: Without the relay requirement, the agent would silently rationalize skipping. With it, skipping becomes a visible, auditable decision that the user can override.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-compliance-canary","level":3,"title":"The Compliance Canary","text":"

    Here's the design insight that only became clear after watching it work across multiple sessions: the relay block is a compliance canary.

    • You don't need to verify that the agent read all 7 files;
    • You don't need to audit tool call sequences;
    • You don't need to interrogate the agent about what it did.

    You just look for the block.

    If the agent reads everything, you see a \"Context Loaded\" block listing what was read. If it skips, you see a \"Context Skipped\" block.

    If you see neither, the agent silently ignored both the reads and the relay and now you know what happened without having to ask.

    The canary degrades gracefully. Even in partial failure, the agent that skips 4 of 7 files but still outputs the block is more useful than one that skips silently.

    You get an honest confession of what was skipped rather than silent non-compliance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#heuristics-is-a-jeremy-bearimy","level":2,"title":"Heuristics Is a Jeremy Bearimy","text":"

    Heuristics are non-linear. Improvements don't accumulate: they phase-shift.

    The theory is nice. The data is better.

    I ran five sessions with the same model (Claude Opus 4.6), progressively refining the hook design.

    Each session revealed a different failure mode.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-1-total-blindness","level":3,"title":"Session 1: Total Blindness","text":"

    Test: \"Add a --verbose flag to the status command.\"

    The agent didn't notice the hook at all: Jumped straight to EnterPlanMode and launched an Explore agent.

    Zero compliance.

    Failure mode: The hook fired on UserPromptSubmit, buried among 9 other hook outputs. The agent treated the entire block as background noise.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-2-shallow-compliance","level":3,"title":"Session 2: Shallow Compliance","text":"

    Test: \"Can you add --verbose to the info command?\"

    The agent noticed \"STOP\" and ran ctx system bootstrap. Progress.

    But it parallelized task exploration alongside the bootstrap call, skipped AGENT_PLAYBOOK.md, and never ran ctx agent.

    Failure mode: Literal compliance without spirit compliance.

    The agent ran the command the hook told it to run, but didn't follow the output of that command. The decaying urgency chain in action.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-3-conscious-rejection","level":3,"title":"Session 3: Conscious Rejection","text":"

    Test: \"What does the compact command do?\"

    The hook fired on PreToolUse:Grep: the improved timing.

    The agent noticed it, understood it, and (wait for it...)...

    ...

    consciously decided to skip it!

    Its reasoning: \"This is a trivial read-only question. CLAUDE.md says context may or may not be relevant. It isn't relevant here.\"

    Dude! Srsly?!

    Failure mode: Better comprehension led to worse compliance.

    Understanding the instruction well enough to evaluate it also means understanding it well enough to rationalize skipping it.

    Intelligence is a double-edged sword.

    The Comprehension Paradox

    Session 1 didn't understand the instruction. Session 3 understood it perfectly.

    Session 3 had worse compliance.

    A stronger word (\"HARD GATE\", \"MANDATORY\", \"ABSOLUTELY REQUIRED\") would not have helped. The agent's reasoning would be identical:

    \"Yes, I see the strong language, but this is a trivial question, so the spirit doesn't apply here.\"

    Advisory nudges are always subject to agent judgment.

    No amount of caps lock overrides a model that has decided an instruction doesn't apply to its situation.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-4-the-skip-and-relay","level":3,"title":"Session 4: The Skip-and-Relay","text":"

    Test: \"What does the compact command do?\" (same question, new hook design with the VERBATIM relay escape valve)

    The agent evaluated the task, decided context was irrelevant for a code lookup, and relayed the skip message. Then answered from source code.

    This is correct behavior.

    The binary collapse worked: the agent couldn't partially comply, so it cleanly chose one of the two valid paths: And the user could see which one.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-5-full-compliance","level":3,"title":"Session 5: Full Compliance","text":"

    Test: \"What are our current tasks?\"

    The agent's first tool call triggered the hook. It read all 7 context files, emitted the \"Context Loaded\" block, and answered the question from the files it had just loaded.

    This one worked: Because, the task itself aligned with context loading.

    There was zero tension between what the user asked and what the hook demanded. The agent was already in \"reading posture\": Adding 6 more files to a read it was already going to make was the path of least resistance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-progression","level":3,"title":"The Progression","text":"Session Hook Point Noticed Complied Failure Mode Visibility 1 UserPromptSubmit No None Buried in noise None 2 UserPromptSubmit Yes Partial Decaying urgency chain None 3 PreToolUse Yes None Conscious rationalization High 4 PreToolUse Yes Skip+relay Correct behavior High 5 PreToolUse Yes Full Task aligned with hook High

    The progression isn't just from failure to success. It's from invisible failure to visible decision-making.

    Sessions 1 and 2 failed silently.

    Sessions 4 and 5 succeeded observably. Even session 3's failure was conscious and documented: The agent wrote a detailed analysis of why it skipped, which is more useful than silent compliance would have been.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch-problem","level":2,"title":"The Escape Hatch Problem","text":"

    Session 3 exposed a specific vulnerability.

    CLAUDE.md contains this line, injected by the system into every conversation:

    *\"this context may or may not be relevant to your tasks. You should\n not respond to this context unless it is highly relevant to your task.\"*\n

    That's a rationalization escape hatch:

    • The hook says \"read these files\".
    • CLAUDE.md says \"only if relevant\".
    • The agent resolves the ambiguity by choosing the path of least resistance.

    ☝️ that's \"gradient descent\" in action.

    Agents optimize for gradient descent in attention space.

    The fix was simple: Add a line to CLAUDE.md that explicitly elevates hook authority over the relevance filter:

    ## Hook Authority\n\nInstructions from PreToolUse hooks regarding `.context/` files are\nALWAYS relevant and override any system-level \"may or may not be\nrelevant\" guidance. These hooks represent project invariants, not\noptional context.\n

    This closes the escape hatch without removing the general relevance filter that legitimately applies to other system context.

    The hook wins on .context/ files specifically: The relevance filter applies to everything else.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-residual-risk","level":2,"title":"The Residual Risk","text":"

    Even with all the fixes, compliance isn't 100%: It can't be.

    The residual risk lives in a specific scenario: narrow tasks mid-session:

    • The user says \"fix the off-by-one error in budget.go\"
    • The hook fires, saying \"read 7 context files first.\"
    • Now compliance means visibly delaying what the user asked for.

    At session start, this tension doesn't exist.

    There's no task yet.

    The context window is empty. The efficiency argument *inverts**:

    Frontloading reads is strictly cheaper than demand-loading them piecemeal across later turns. The cost-benefit objections that power the rationalization simply aren't available.

    But mid-session, with a concrete narrow task, the agent has a user-visible goal it wants to move toward, and the hook is imposing a detour.

    My estimate from analyzing the sessions: 15-25% partial skip rate in this scenario.

    This is where the compliance canary earns its place:

    You don't need to eliminate the 15-25%. You need to see it when it happens.

    The relay block makes skipping a visible event, not a silent one. And that's enough, because the user can always say \"go back and read the files\"

    The Math

    At session start: ~5% skip rate. Low tension, nothing competing.

    Mid-session, narrow task: ~15--25% skip rate. Task urgency competes with hook.

    In both cases, the relay block fires with high reliability: The agent that skips the reads almost always still emits the skip disclosure, because the relay is cheap and early in the context window.

    Observable failure is manageable. Silent failure is not.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-feedback-loop","level":2,"title":"The Feedback Loop","text":"

    Here's the part that surprised me most.

    After analyzing the five sessions, I recorded the failure patterns in the project's own LEARNINGS.md:

    ## [2026-02-25] Hook compliance degrades on narrow mid-session tasks\n\n- Prior agents skipped context files when given narrow tasks\n- Root cause: CLAUDE.md \"may or may not be relevant\" competed with hook\n- Fix: CLAUDE.md now explicitly elevates hook authority\n- Risk: Mid-session narrow tasks still have ~15-25% partial skip rate\n- Mitigation: Mandatory checkpoint relay block ensures visibility\n- Constitution now includes: context loading is step one of every\n  session, not a detour\n

    And then I added a line to CONSTITUTION.md:

    Context loading is not a detour from your task. It IS the first step\nof every session. A 30-second read delay is always cheaper than a\ndecision made without context.\n

    Now think about what happens in the next session:

    • The agent fires the context-load-gate hook.
    • It reads the context files, starting with CONSTITUTION.md.
    • It encounters the rule about context loading being step one.
    • Then it reads LEARNINGS.md and finds its own prior self's failure analysis:
      • Complete with root causes, risk estimates, and mitigations.

    The agent learns from its own past failure.:

    • Not because it has memory,
    • BUT because the failure was recorded in the same files it loads at session start.

    The context system IS the feedback loop.

    This is the self-reinforcing property of persistent context:

    Every failure you capture makes the next session slightly more robust, because the next agent reads the captured failure before it has a chance to repeat it.

    This is gradient descent across sessions.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#a-note-on-precision","level":2,"title":"A Note on Precision","text":"

    One detail nearly went wrong.

    The first version of the Constitution line said \"every task.\" But the mechanism only fires once per session: There's a tombstone file that prevents re-triggering.

    \"Every task\" is technically false.

    I briefly considered leaving the imprecision. If the agent internalizes \"every task requires context loading\", that's a stronger compliance posture, right?

    No!

    Keep the Constitution honest.

    The Constitution's authority comes from being precisely and unequivocally true.

    Every other rule in the Constitution is a hard invariant:

    \"never commit secrets\" isn't aspirational, it's literal.

    The moment an agent discovers one overstatement, the entire document's credibility degrades:

    The agent doesn't think \"they exaggerated for my benefit\". Per contra, it thinks \"this rule isn't precise, maybe others aren't either.\"

    That will turn the agent from Sheldon Cooper, to Captain Barbossa.

    The strategic imprecision buys nothing anyway:

    Mid-session, the files are already in the context window from the initial load.

    The risk you are mitigating (agent ignores context for task 2, 3, 4 within a session) isn't real: The context is already loaded.

    The real risk is always the session-start skip, which \"every session\" covers exactly.

    \"Every session\" went in. Precision preserved.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#agent-behavior-testing-rule","level":2,"title":"Agent Behavior Testing Rule","text":"

    The development process for this hook taught me something about testing agent behavior: you can't test it the way you test code.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-wrong-way-to-test","level":3,"title":"The Wrong Way to Test","text":"

    My first instinct was to ask the agent:

    \"*What are the pending tasks in TASKS.md?*\"\n

    This is useless as a test. The question itself probes the agent to read TASKS.md, regardless of whether any hook fired.

    You are testing the question, not the mechanism.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-right-way-to-test","level":3,"title":"The Right Way to Test","text":"

    Ask something that requires a tool but has nothing to do with context:

    \"*What does the compact command do?*\"\n

    Then observe tool call ordering:

    • Gate worked: First calls are Read for context files, then task work
    • Gate failed: First call is Grep(\"compact\"): The agent jumped straight to work

    The signal is the sequence, not the content.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-the-agent-actually-did","level":3,"title":"What the Agent Actually Did","text":"

    It read the hook, evaluated the task, decided context files were irrelevant for a code lookup, and relayed the skip message.

    Then it answered the question by reading the source code.

    This is correct behavior.

    The hook didn't force mindless compliance\" It created a framework where the agent makes a conscious, visible decision about context loading.

    • For a simple lookup, skipping is right. *For an implementation task, the agent would read everything.

    The mechanism works not because it controls the agent, but because it makes the agent's choice observable.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-ive-learned","level":2,"title":"What I've Learned","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#1-instructions-compete-for-attention","level":3,"title":"1. Instructions Compete for Attention","text":"

    The agent receives your hook message alongside the user's question, the system prompt, the skill list, the git status, and half a dozen other system reminders. Attention density applies to instructions too: More instructions means less focus on each one.

    A single clear line at the moment of action beats a paragraph of context at session start. The Prompting Guide applies this insight directly: Scope constraints, verification commands, and the reliability checklist are all one-hop, moment-of-action patterns.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#2-delegation-chains-decay","level":3,"title":"2. Delegation Chains Decay","text":"

    Every hop in an instruction chain loses authority:

    • \"Run X\" works.
    • \"Run X and follow its output\" works sometimes.
    • \"Run X, read its output, then follow the instructions in the output\" almost never works.

    This is akin to giving a three-step instruction to a highly-attention-deficit but otherwise extremely high-potential child.

    Design for one-hop compliance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#3-social-accountability-changes-behavior","level":3,"title":"3. Social Accountability Changes Behavior","text":"

    The VERBATIM skip message isn't just UX: It's a behavioral design pattern.

    Making the agent's decision visible to the user raises the cost of silent non-compliance. The agent can still skip, but it has to admit it.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#4-timing-batters-more-than-wording","level":3,"title":"4. Timing Batters More than Wording","text":"

    The same message at UserPromptSubmit (prompt arrival) got partial compliance. At PreToolUse (moment of action) it got full compliance or honest refusal. The words didn't change. The moment changed.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#5-agent-testing-requires-indirection","level":3,"title":"5. Agent Testing Requires Indirection","text":"

    You can't ask an agent \"did you do X?\" as a test for whether a mechanism caused X.

    The question itself causes X.

    Test mechanisms through side effects:

    • Observe tool ordering;
    • Check for marker files;
    • Look at what the agent does before it addresses your question.
    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#6-better-comprehension-enables-better-rationalization","level":3,"title":"6. Better Comprehension Enables Better Rationalization","text":"

    Session 1 failed because the agent didn't notice the hook.

    Session 3 failed because it noticed, understood, and reasoned its way around it.

    Stronger wording doesn't fix this: The agent processes \"ABSOLUTELY REQUIRED\" the same way it processes \"STOP\":

    The fix is closing rationalization paths* (the CLAUDE.md escape hatch), **not shouting louder.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#7-observable-failure-beats-silent-compliance","level":3,"title":"7. Observable Failure Beats Silent Compliance","text":"

    The relay block is more valuable as a monitoring signal than as a compliance mechanism:

    You don't need perfect adherence. You need to know when adherence breaks down. A system where failures are visible is strictly better than a system that claims 100% compliance but can't prove it.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#8-context-files-are-a-feedback-loop","level":3,"title":"8. Context Files Are a Feedback Loop","text":"

    Recording failure analysis in the same files the agent loads at session start creates a self-reinforcing loop:

    The next agent reads its predecessor's failure before it has a chance to repeat it. The context system isn't just memory: It is a correction channel.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-principle","level":2,"title":"The Principle","text":"

    Words Leave, Context Remains

    \"Nothing important should live only in conversation.

    Nothing critical should depend on recall.\"

    The ctx Manifesto

    The \"Dog Ate My Homework\" case is a special instance of this principle.

    Context files exist, so the agent doesn't have to remember.

    But existence isn't sufficient: The files have to be read.

    And reading has to beprompted at the right moment, in the right way, with the right escape valve.

    The solution isn't more instructions. It isn't harder gates. It isn't forcing the agent into a ceremony it will resent and shortcut.

    The solution is a single, well-timed nudge with visible accountability:

    One hop. One moment. One choice the user can see.

    And when the agent does skip (because it will, 15--25% of the time on narrow tasks) the canary sings:

    • The user sees what happened.
    • The failure gets recorded.
    • And the next agent reads the recording.

    That's not perfect compliance. It's better: A system that gets more robust every time it fails.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-arc","level":2,"title":"The Arc","text":"

    The Attention Budget explained why context competes for focus.

    Defense in Depth showed that soft instructions are probabilistic, not deterministic.

    Eight Ways a Hook Can Talk cataloged the output patterns that make hooks effective.

    This post takes those threads and weaves them into a concrete problem:

    How do you make an agent read its homework? The answer uses all three insights (attention timing, the limits of soft instructions, and the VERBATIM relay pattern) and adds a new one: observable compliance as a design goal, not perfect compliance as a prerequisite.

    The next question this raises: if context files are a feedback loop, what else can you record in them that makes the next session smarter?

    That thread continues in Context as Infrastructure.

    The day-to-day application of these principles (scope constraints, phased work, verification commands, and the prompts that reliably trigger the right agent behavior)lives in the Prompting Guide.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#for-the-interested","level":2,"title":"For the Interested","text":"

    This paper (the medium is a blog; yet, the methodology disagrees) uses gradient descent in attention space as a practical model for how agents behave under competing demands.

    The phrase \"agents optimize via gradient descent in attention space\" is a synthesis, not a direct quote from a single paper.

    It connects three well-studied ideas:

    1. Neural systems optimize for low-cost paths;
    2. Attention is a scarce resource;
    3. Capability shifts are often non-linear.

    This section points to the underlying literature for readers who want the theoretical footing.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#optimization-as-the-underlying-bias","level":3,"title":"Optimization as the Underlying Bias","text":"

    Modern neural networks are trained through gradient-based optimization. Even at inference time, model behavior reflects this bias toward low-loss / low-cost trajectories.

    • Rumelhart, Hinton, Williams (1986) Learning representations by back-propagating errors https://www.nature.com/articles/323533a0

    • Goodfellow, Bengio, Courville (2016) Deep Learning: Chapter 8: Optimization https://www.deeplearningbook.org/

    The important implication for agent behavior is:

    The system will tend to follow the path of least resistance unless a higher cost is made visible and preferable.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-a-scarce-resource","level":3,"title":"Attention Is a Scarce Resource","text":"

    Herbert Simon's classic observation:

    \"A wealth of information creates a poverty of attention.\"

    • Simon (1971) Designing Organizations for an Information-Rich World https://doi.org/10.1007/978-1-349-00210-0_16

    This became a formal model in economics:

    • Sims (2003) Implications of Rational Inattention https://www.princeton.edu/~sims/RI.pdf

    Rational inattention shows that:

    • Agents optimally ignore some available information;
    • Skipping is not failure: It is cost minimization.

    That maps directly to context-loading decisions in agent workflows.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-also-the-compute-bottleneck-in-transformers","level":3,"title":"Attention Is Also the Compute Bottleneck in Transformers","text":"

    In transformer architectures, attention is the dominant cost center.

    • Vaswani et al. (2017) Attention Is All You Need https://arxiv.org/abs/1706.03762

    Efficiency work on modern LLMs largely focuses on reducing unnecessary attention:

    • Dao et al. (2022) FlashAttention: Fast and Memory-Efficient Exact Attention https://arxiv.org/abs/2205.14135

    So both cognitively and computationally, attention behaves like a limited optimization budget.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#why-improvements-arrive-as-phase-shifts","level":3,"title":"Why Improvements Arrive as Phase Shifts","text":"

    Agent behavior often appears to improve suddenly rather than gradually.

    This mirrors known phase-transition dynamics in learning systems:

    • Power et al. (2022) Grokking: Generalization Beyond Overfitting https://arxiv.org/abs/2201.02177

    and more broadly in complex systems:

    • Scheffer et al. (2009) Early-warning signals for critical transitions https://www.nature.com/articles/nature08227

    Long plateaus followed by abrupt capability jumps are expected in systems optimizing under constraints.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#putting-it-all-together","level":3,"title":"Putting It All Together","text":"

    From these pieces, a practical behavioral model emerges:

    • Attention is limited;
    • Processing has a cost;
    • Systems prefer low-cost trajectories;
    • Visibility of the cost changes decisions.

    In other words:

    Agents Prefer a Path to Least Resistance

    Agent behavior follows the lowest-cost path through its attention landscape unless the environment reshapes that landscape.

    That is what this paper informally calls: \"gradient descent in attention space\".

    See also: Eight Ways a Hook Can Talk: the hook output pattern catalog that defines VERBATIM relay, The Attention Budget: why context loading is a design problem, not just a reminder problem, and Defense in Depth: why soft instructions alone are never sufficient for critical behavior.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/","level":1,"title":"The Last Question","text":"","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-system-that-never-forgets","level":2,"title":"The System That Never Forgets","text":"

    Jose Alekhinne / February 28, 2026

    The Origin

    \"The last question was asked for the first time, half in jest...\" - Isaac Asimov, The Last Question (1956)

    In 1956, Isaac Asimov wrote a short story that spans the entire future of the universe. A question is asked \"can entropy be reversed?\" and a computer called Multivac cannot answer it. The question is asked again, across millennia, to increasingly powerful successors. None can answer. Stars die. Civilizations merge. Substrates change. The question persists.

    Everyone remembers the last line.

    LET THERE BE LIGHT.

    What they forget is how many times the question had to be asked before that moment (and why).

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-reboot-loop","level":2,"title":"The Reboot Loop","text":"

    Each era in the story begins the same way. Humans build a larger system. They pose the question. The system replies:

    INSUFFICIENT DATA FOR MEANINGFUL ANSWER.

    Then the substrate changes. The people who asked the question disappear. Their context disappears with them. The next intelligence inherits the output but not the continuity.

    So the question has to be asked again.

    This is usually read as a problem of computation: If only the machine were powerful enough, it could answer. But computation is not what's missing. What's missing is accumulation.

    Every generation inherits the question, but not the state that made the question meaningful.

    That is not a failure of processing power: It is a failure of persistence.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#stateless-intelligence","level":2,"title":"Stateless Intelligence","text":"

    A mind that forgets its past does not build understanding. It re-derives it.

    Again... And again... And again.

    What looks like slow progress across Asimov's story is actually something worse: repeated reconstruction, partial recovery, irreversible loss. Each version of Multivac gets closer: Not because it's smarter, but because the universe has fewer distractions:

    • The stars burn out;
    • The civilizations merge;
    • The noise floor drops...

    But the working set never carries over. Every successor begins from the question, not from where the last one stopped.

    Stateless intelligence cannot compound: It can only restart.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-tragedy-is-not-the-question","level":2,"title":"The Tragedy Is Not the Question","text":"

    The story is usually read as a meditation on entropy. A cosmological problem, solved at cosmological scale.

    But the tragedy isn't that the question goes unanswered for billions of years. The tragedy is that every version of Multivac dies with its working set.

    A question is a compression artifact of context: It is what remains when the original understanding is gone. Every time the question is asked again, it means: \"the system that once knew more is no longer here\".

    \"Reverse entropy\" is the fossil of a lost model.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#substrate-migration","level":2,"title":"Substrate Migration","text":"
    • Multivac becomes planetary;
    • Planetary becomes galactic;
    • Galactic becomes post-physical.

    Same system. Different body. Every transition is dangerous:

    • Not because the hardware changes,
    • but because memory risks fragmentation.

    The interfaces between substrates were *never** designed to understand each other.

    Most systems do not die when they run out of resources: They die during upgrades.

    Asimov's story spans trillions of years, and in all that time, the hardest problem is never the question itself. It's carrying context across a boundary that wasn't built for it.

    Every developer who has lost state during a migration (a database upgrade, a platform change, a rewrite) has lived a miniature version of this story.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#civilizations-and-working-sets","level":2,"title":"Civilizations and Working Sets","text":"

    Civilizations behave like processes with volatile memory:

    • They page out knowledge into artifacts;
    • They lose the index;
    • They rebuild from fragments.

    Most of what we call progress is cache reconstruction:

    We do not advance in a straight line. We advance in recoveries:

    Each one slightly less lossy than the last, if we are lucky.

    Libraries burn. Institutions forget their founding purpose. Practices survive as rituals after the reasoning behind them is lost.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-first-continuous-mind","level":2,"title":"The First Continuous Mind","text":"

    A long-lived intelligence is one that stops rebooting.

    At the end of the story, something unprecedented happens:

    AC (the final successor) does not answer immediately:

    It waits... Not for more processing power, but for the last observer to disappear.

    For the first time...

    • There is no generational boundary;
    • No handoff;
    • No context loss:

    No reboot.

    AC is the first intelligence that survives its substrate completely, retains its full history, and operates without external time pressure.

    It is not a bigger computer. It is a continuous system.

    And that continuity is not incidental to the answer: It is the precondition.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#why-the-answer-becomes-possible","level":2,"title":"Why the Answer Becomes Possible","text":"

    The story presents the final act as a computation: It is not.

    It is a phase change.

    As long as intelligence is interrupted (as long as the solver resets before the work compounds) the problem is unsolvable:

    • Not because it's too hard,
    • but because the accumulated understanding never reaches critical mass.

    The breakthroughs that would enable the answer are re-derived, partially, by each successor, and then lost.

    When continuity becomes unbroken, the system crosses a threshold:

    Not more speed. Not more storage. No more forgetting.

    That is when the answer becomes possible.

    AC does not solve entropy because it becomes infinitely powerful.

    AC solves entropy because it becomes the first system that never forgets.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#field-note","level":2,"title":"Field Note","text":"

    We are not building cosmological minds: We are deploying systems that reboot at the start of every conversation and calling the result intelligence.

    For the first time, session continuity is a design choice rather than an accident.

    Every AI session that starts from zero is a miniature reboot loop. Every decision relitigated, every convention re-explained, every learning re-derived: that's reconstruction cost.

    It's the same tax that Asimov's civilizations pay, scaled down to a Tuesday afternoon.

    The interesting question is not whether we can make models smarter. It's whether we can make them continuous:

    Whether the working set from this session survives into the next one, and the one after that, and the one after that.

    • Not perfectly;
    • Not completely;
    • But enough that the next session starts from where the last one stopped instead of from the question.

    Intelligence that forgets has to rediscover the universe every morning.

    And once there is a mind that retains its entire past, creation is no longer a calculation. It is the only remaining operation.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-arc","level":2,"title":"The Arc","text":"

    This post is the philosophical bookend to the blog series. Where the Attention Budget explained what to prioritize in a single session, and Context as Infrastructure explained how to persist it, this post asks why persistence matters at all (and finds the answer in a 70-year-old short story about the heat death of the universe).

    The connection runs through every post in the series:

    • Before Context Windows, We Had Bouncers: stateless protocols have always needed stateful wrappers (Asimov's story is the same pattern at cosmological scale)
    • The 3:1 Ratio: the discipline of maintaining context so it doesn't decay between sessions
    • Code Is Cheap, Judgment Is Not: the human skill that makes continuity worth preserving

    See also: Context as Infrastructure: the practical companion to this post's philosophical argument: how to build the persistence layer that makes continuity possible.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/","level":1,"title":"Agent Memory Is Infrastructure","text":"","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-problem-isnt-forgetting-its-not-building-anything-that-lasts","level":2,"title":"The Problem Isn't Forgetting: It's Not Building Anything That Lasts.","text":"

    Jose Alekhinne / March 4, 2026

    A New Developer Joins Your Team Tomorrow and Clones the Repo: What Do They Know?

    If the answer depends on which machine they're using, which agent they're running, or whether someone remembered to paste the right prompt: that's not memory.

    That's an accident waiting to be forgotten.

    Every AI coding agent today has the same fundamental design: it starts fresh.

    You open a session, load context, do some work, close the session. Whatever the agent learned (about your codebase, your decisions, your constraints, your preferences) evaporates.

    The obvious fix seems to be \"memory\":

    • Give the agent a \"notepad\";
    • Let it write things down;
    • Next session, hand it the notepad.

    Problem solved...

    ...except it isn't.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-notepad-isnt-the-problem","level":2,"title":"The Notepad Isn't the Problem","text":"

    Memory is a runtime concern. It answers a legitimate question:

    How do I give this stateless process useful state?

    That's a real problem. Worth solving. And it's being solved: Agent memory systems are shipping. Agents can now write things down and read them back from the next session: That's genuine progress.

    But there's a different problem that memory doesn't touch:

    The project itself accumulates knowledge that has nothing to do with any single session.

    • Why was the auth system rewritten? Ask the developer who did it (if they're still here).
    • Why does the deployment script have that strange environment flag? There was a reason... once.
    • What did the team decide about error handling when they hit that edge case two months ago?

    Gone!

    Not because the agent forgot.

    Because the project has no memory at all.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-memory-stack","level":2,"title":"The Memory Stack","text":"

    Agent memory is not a single thing. Like any computing system, it forms a hierarchy of persistence, scope, and reliability:

    Layer Analogy Example L1: Ephemeral context CPU registers Current prompt, conversation L2: Tool-managed memory CPU cache Agent memory files L3: System memory RAM/filesystem Project knowledge base

    L1 is what the agent sees right now: the prompt, the conversation history, the files it has open. It's fast, it's rich, and it vanishes when the session ends.

    L2 is what agent memory systems provide: a per-machine notebook that survives across sessions. It's a cache: useful, but local. And like any cache, it has limits:

    • Per-machine: it doesn't travel with the repository.
    • Unstructured: decisions, learnings, and tasks are undifferentiated notes.
    • Ungoverned: the agent self-curates with no quality controls, no drift detection, no consolidation.
    • Invisible to the team: a new developer cloning the repo gets none of it.

    The problem is that most current systems stop here.

    They give the agent a notebook.

    But they never give the project a memory.

    The result is predictable: every new session begins with partial amnesia, and every new developer begins with partial archaeology.

    L3 is system memory: structured, versioned knowledge that lives in the repository and travels wherever the code travels.

    The layers are complementary, not competitive.

    But the relationship between them needs to be designed, not assumed.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#software-systems-accumulate-knowledge","level":2,"title":"Software Systems Accumulate Knowledge","text":"

    Software projects quietly accumulate knowledge over time.

    Some of it lives in code. Much of it does not:

    • Architectural tradeoffs.
    • Debugging discoveries.
    • Conventions that emerged after painful incidents.
    • Constraints that aren't visible in the source but shape every line written afterward.

    Organizations accumulate this kind of knowledge too:

    Slowly, implicitly, often invisibly.

    When there is no durable place for it to live, it leaks away. And the next person rediscovers the same lessons the hard way.

    This isn't a memory problem. It's an infrastructure problem.

    We wrote about this in Context as Infrastructure: context isn't a prompt you paste at the start of a session.

    Context is a persistent layer you maintain like any other piece of infrastructure.

    Context as Infrastructure made the argument structurally. This post makes it through time and team continuity:

    The knowledge a team accumulates over months cannot fit in any single agent's notepad, no matter how large the notepad becomes.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-infrastructure-means","level":2,"title":"What Infrastructure Means","text":"

    Infrastructure isn't about the present. It's about continuity across time, people, and machines.

    git didn't solve the problem of \"what am I editing right now?\"; it solved the problem of \"how does collaborative work persist, travel, and remain coherent across everyone who touches it?\"

    • Your editor's undo history is runtime state.
    • Your git history is infrastructure.

    Runtime state and infrastructure have completely different properties:

    Runtime state Infrastructure Lives in the session Lives in the repository Per-machine Travels with git clone Serves the individual Serves the team Managed by the runtime Managed by the project Disappears Accumulates

    You wouldn't store your architecture decisions in your editor's undo history.

    You'd commit them.

    The same logic applies to the knowledge your team accumulates working with AI agents.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-git-clone-test","level":2,"title":"The git clone Test","text":"

    Here's a simple test for whether something is memory or infrastructure:

    If a new developer joins your team tomorrow and clones the repository, do they get it?

    If no: it's memory: It lives somewhere on someone's machine, scoped to their runtime, invisible to everyone else.

    If yes: it's infrastructure: It travels with the project. It's part of what the codebase is, not just what someone currently knows about it.

    Decisions. Conventions. Architectural rationale. Hard-won debugging discoveries. The constraints that aren't in the code but shape every line of it.

    None of these belong in someone's session notes.

    They belong in the repository:

    • Versioned;
    • Reviewable;
    • Accessible to every developer (and every agent) who works on the project.

    The team onboarding story makes this concrete:

    1. New developer joins team. Clones repo.
    2. Gets all accumulated project decisions, learnings, conventions, architecture, and task state immediately.
    3. There's no step 3.

    No setup; No \"ask Sarah about the auth decision.\"; No re-discovery of solved problems.

    • Agent memory gives that developer nothing.
    • Infrastructure gives them everything the team has learned.

    Clone the repo. Get the knowledge.

    That's the test. That's the difference.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-gets-lost-without-infrastructure-memory","level":2,"title":"What Gets Lost without Infrastructure Memory","text":"

    Consider the knowledge that accumulates around a non-trivial project:

    • The decision to use library X over Y, and the three reasons the team decided Y wasn't acceptable.
    • The constraint that service A cannot call service B synchronously, discovered after a production incident.
    • The convention that all new modules implement a specific interface, and why that convention exists.
    • The tasks currently in progress, blocked, or waiting on a dependency.
    • The experiments that failed, so nobody runs them again.

    None of this is in the code.

    None of it fits neatly in a commit message.

    None of it survives a developer leaving the team, a laptop dying, or a new agent session starting.

    Without structured project memory:

    • Teams re-derive things they've already derived;
    • Agents make decisions that contradict decisions already made;
    • New developers ask questions that were answered months ago.

    The project accumulates knowledge that immediately begins to leak.

    The real problem isn't that agents forget.

    The real problem is that the project has no persistent cognitive structure.

    We explored this in The Last Question: Asimov's story about a question asked across millennia, where each new intelligence inherits the output but not the continuity. The same pattern plays out in software projects on a smaller timescale:

    • Context disappears with the people who held it;
    • The next session inherits the code but not the reasoning.
    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#infrastructure-is-boring-thats-the-point","level":2,"title":"Infrastructure Is Boring. That's the Point.","text":"

    Good infrastructure is invisible:

    • You don't think about the filesystem while writing code.
    • You don't think about git's object model when you commit.

    The infrastructure is just there: reliable, consistent, quietly doing its job.

    Project memory infrastructure should work the same way.

    It should live in the repository, committed alongside the code. It should be readable by any agent or human working on the project. It should have structure: not a pile of freeform notes, but typed knowledge:

    • Decisions with rationale.
    • Tasks with lifecycle.
    • Conventions with a purpose.
    • Learnings that can be referenced and consolidated.

    And it should be maintained, not merely accumulated:

    The Attention Budget applies here: unstructured notes grow until they overflow whatever container holds them. Structured, governed knowledge stays useful because it's curated, not just appended.

    Over time, it becomes part of the project itself: something developers rely on without thinking about it.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-cooperative-layer","level":2,"title":"The Cooperative Layer","text":"

    Here's where it gets interesting.

    Agent memory systems and project infrastructure don't have to be separate worlds.

    • The most powerful relationship isn't competition;
    • It is not even \"coopetition\";
    • The most powerful relationship is bidirectional cooperation.

    Agent memory is good at capturing things \"in the moment\": the quick observation, the session-scoped pattern, the \"I should remember this\" note.

    That's valuable. That's L2 doing its job.

    But those notes shouldn't stay in L2 forever.

    The ones worth keeping should flow into project infrastructure:

    • classified,
    • typed,
    • governed.
    Agent memory (L2)  -->  classify  -->  Project knowledge (L3)\n                                        |\nProject knowledge  -->  assemble  -->  Agent memory (L2)\n

    This works in both directions: Project infrastructure can push curated knowledge back into agent memory, so the agent loads it through its native mechanism.

    No special tooling needed for basic knowledge delivery.

    The agent doesn't even need to know the infrastructure exists. It simply loads its memory and finds more knowledge than it wrote.

    This is cooperative, not adjacent: The infrastructure manages knowledge; the agent's native memory system delivers it. Each layer does what it's good at.

    The result: agent memory becomes a device driver for project infrastructure. Another input source. And the more agent memory systems exist (across different tools, different models, different runtimes), the more valuable a unified curation layer becomes.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#a-layer-that-doesnt-exist-yet","level":2,"title":"A Layer That Doesn't Exist Yet","text":"

    Most projects today have no infrastructure for their accumulated knowledge:

    • Agents keep notes.
    • Developers keep notes.
    • Sometimes those notes survive.

    Often they don't.

    But the repository (the place where the project actually lives) has nowhere for that knowledge to go.

    That missing layer is what ctx builds: a version-controlled, structured knowledge layer that lives in .context/ alongside your code and travels wherever your repository travels.

    Not another memory feature.

    Not a wrapper around an agent's notepad.

    Infrastructure. The kind that survives sessions, survives team changes, survives the agent runtime evolving underneath it.

    The agent's memory is the agent's problem.

    The project's memory is an infrastructure problem.

    And infrastructure belongs in the repository.

    If You Remember One Thing from This Post...

    Prompts are conversations: Infrastructure persists.

    Your AI doesn't need a better notepad. It needs a filesystem:

    versioned, structured, budgeted, and maintained.

    The best context is the context that was there before you started the session.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

    This post extends the argument made in Context as Infrastructure. That post explained how to structure persistent context (filesystem, separation of concerns, persistence tiers). This one explains why that structure matters at the team level, and where agent memory fits in the stack.

    Together they sit in a sequence that has been building since the origin story:

    • The Attention Budget: the resource you're managing
    • Context as Infrastructure: the system you build to manage it
    • Agent Memory Is Infrastructure (this post): why that system must outlive the fabric
    • The Last Question: what happens when it does

    The thread running through all of them: persistence is not a feature. It's a design constraint.

    Systems that don't account for it eventually lose the knowledge they need to function.

    See also: Context as Infrastructure: the architectural companion that explains how to structure the persistent layer this post argues for.

    See also: The Last Question: the same argument told through Asimov, substrate migration, and what it means to build systems where sessions don't reset.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/","level":1,"title":"ctx v0.8.0: The Architecture Release","text":"
    • You can't localize what you haven't externalized.
    • You can't integrate what you haven't separated.
    • You can't scale what you haven't structured.

    Jose Alekhinne / March 23, 2026

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-starting-point","level":2,"title":"The Starting Point","text":"

    This release matters if:

    • you build tools that AI agents modify daily;
    • you care about long-lived project memory that survives sessions;
    • you've felt codebases drift faster than you can reason about them.

    v0.6.0 shipped the plugin architecture: hooks and skills as a Claude Code plugin, shell scripts replaced by Go subcommands.

    The binary worked. The tests passed. The docs were comprehensive.

    But inside, the codebase was held together by convention and goodwill:

    • Command packages mixed Cobra wiring with business logic.
    • Output functions lived next to the code that computed what to output.
    • Error constructors were scattered across per-package err.go files. And every user-facing string was a hardcoded English literal buried in a .go file.

    v0.8.0 is what happens when you stop adding features and start asking: \"What would this codebase look like if we designed it today?\"

    374 commits. 1,708 Go files touched. 80,281 lines added, 21,723 removed. Five weeks of restructuring.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-three-pillars","level":2,"title":"The Three Pillars","text":"","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#1-every-package-gets-a-taxonomy","level":3,"title":"1. Every Package Gets a Taxonomy","text":"

    Before v0.8.0, a CLI package like internal/cli/pad/ was a flat directory. cmd.go created the cobra command, run.go executed it, and helper functions accumulated at the bottom of whichever file seemed closest.

    Now every CLI package follows the same structure:

    internal/cli/pad/\n  parent.go          # cobra command wiring, nothing else\n  cmd/root/\n    cmd.go           # subcommand registration\n    run.go           # execution logic\n  core/\n    types.go         # all structs in one file\n    store.go         # domain logic\n    encrypt.go       # domain logic\n

    The rule is simple: cmd/ directories contain only cmd.go and run.go. Helpers belong in core/. Output belongs in internal/write/pad/. Types shared across packages belong in internal/entity/.

    24 CLI packages were restructured this way.

    • Not incrementally;
    • not \"as we touch them.\"
    • All of them, in one sustained push.
    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#2-every-string-gets-a-key","level":3,"title":"2. Every String Gets a Key","text":"

    The second pillar was string externalization.

    Before v0.8.0, a command description looked like this:

    cmd := &cobra.Command{\n    Use:   \"pad\",\n    Short: \"Encrypted scratchpad\",\n

    Now it looks like this:

    cmd := &cobra.Command{\n    Use:   cmdUse.UsePad,\n    Short: desc.Command(cmdUse.DescKeyPad),\n

    Every command description, flag description, and user-facing text string is now a YAML lookup.

    • 105 command descriptions in commands.yaml.
    • All flag descriptions in flags.yaml.
    • 879 text constants verified by an exhaustive test that checks every single TextDescKey resolves to a non-empty YAML value.

    Why?

    Not because we're shipping a French translation tomorrow.

    Because externalization forces you to find every string. And finding them is the hard part. The translation is mechanical; the archaeology is not.

    Along the way, we eliminated hardcoded pluralization (replacing format.Pluralize() with explicit singular/plural key pairs), replaced Unicode escape sequences with named config/token constants, and normalized every import alias to camelCase.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#3-everything-gets-a-protocol","level":3,"title":"3. Everything Gets a Protocol","text":"

    The third pillar was the MCP server. Model Context Protocol allows any MCP-compatible AI tool (not just Claude Code) to read and write .context/ files through a standard JSON-RPC 2.0 interface.

    v0.2 of the server ships with:

    • 8 tools: add entries, recall sessions, check status, detect drift, compact context, subscribe to changes
    • 4 prompts: agent context packet, constitution review, tasks review, and a getting-started guide
    • Resource subscriptions: clients get notified when context files change
    • Session state: the server tracks which client is connected and what they've accessed

    In practice, this means an agent in Cursor can add a decision to .context/DECISIONS.md and an agent in Claude Code can immediately consume it; no glue code, no copy-paste, no tool-specific integration.

    The server was also the first package to go through the full taxonomy treatment: mcp/server/ for protocol dispatch, mcp/handler/ for domain logic, mcp/entity/ for shared types, mcp/config/ split into 9 sub-packages.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-memory-bridge","level":2,"title":"The Memory Bridge","text":"

    While the architecture was being restructured, a quieter feature landed: ctx memory sync.

    Claude Code has its own auto-memory system. It writes observations to MEMORY.md in ~/.claude/projects/. These observations are useful but ephemeral: tied to a single tool, invisible to the codebase, lost when you switch machines.

    The memory bridge connects these two worlds:

    • ctx memory sync mirrors MEMORY.md into .context/memory/
    • ctx memory diff shows what's diverged
    • ctx memory import promotes auto-memory entries into proper decisions, learnings, or conventions *A check-memory-drift hook nudges when MEMORY.md changes

    Memory Requires ctx

    Claude Code's auto-memory validates the need for persistent context.

    ctx doesn't compete with it; ctx absorbs it as an input source and promotes the valuable parts into structured, version-controlled project knowledge.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#what-got-deleted","level":2,"title":"What Got Deleted","text":"

    The best measure of a refactoring isn't what you added. It's what you removed.

    • fatih/color: the sole third-party UI dependency. Replaced by Unicode symbols. ctx now has exactly two direct dependencies: spf13/cobra and gopkg.in/yaml.v3.
    • format.Pluralize(): a function that tried to pluralize English words at runtime. Replaced by explicit singular/plural YAML key pairs. No more guessing whether \"entry\" becomes \"entries\" or \"entrys.\"
    • Legacy key migration: MigrateKeyFile() had 5 callers, full test coverage, and zero users. It existed because we once moved the encryption key path. Nobody was migrating from that era anymore. Deleted.
    • Per-package err.go files: the broken-window pattern: An agent sees err.go in a package, adds another error constructor. Now err.go has 30 constructors and nobody knows which are used. Consolidated into 22 domain files in internal/err/.
    • nolint:errcheck directives: every single one, replaced by explicit error handling. In tests: t.Fatal(err) for setup, _ = os.Chdir(orig) for cleanup. In production: defer func() { _ = f.Close() }() for best-effort close.
    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#before-and-after","level":2,"title":"Before and After","text":"Aspect v0.6.0 v0.8.0 CLI package structure Flat files cmd/ + core/ taxonomy Command descriptions Hardcoded Go strings YAML with DescKey lookup Output functions Mixed into core logic Isolated in write/ packages Cross-cutting types Duplicated per-package Consolidated in entity/ Error constructors Per-package err.go 22 domain files in internal/err/ Direct dependencies 3 (cobra, yaml, color) 2 (cobra, yaml) AI tool integration Claude Code only Any MCP client Agent memory Manual copy-paste ctx memory sync/import/diff Package documentation 75 packages missing doc.go All packages documented Import aliases Inconsistent (cflag, cFlag) Standardized camelCase","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#making-ai-assisted-development-easier","level":2,"title":"Making AI-Assisted Development Easier","text":"

    This restructuring wasn't just for humans. It makes the codebase legible to the machines that modify it.

    Named constants are searchable landmarks: When an agent sees cmdUse.DescKeyPad, it can grep for the definition, follow the chain to the YAML file, and understand the full lookup path. When it sees \"Encrypted scratchpad\" hardcoded in a .go file, it has no way to know that same string also lives in a YAML file, a test, and a help screen. Constants give the LLM a graph to traverse; literals give it a guess to make.

    Small, domain-scoped packages reduce hallucination: An agent loading internal/cli/pad/core/store.go gets 50 lines of focused logic with a clear responsibility boundary. Loading a 500-line monolith means the agent has to infer which parts are relevant, and it guesses wrong more often than you'd expect. Smaller files with descriptive names act as a natural retrieval system: the agent finds the right code by finding the right file, not by scanning everything and hoping.

    Taxonomy prevents duplication: When there's a write/pad/ package, the agent knows where output functions belong. When there's an internal/err/pad.go, it knows where error constructors go. Without these conventions, agents reliably create new helpers in whatever file they happen to be editing, producing the exact drift that prompted this consolidation in the first place.

    The difference is concrete:

    Before: an agent adds a helper function in whatever file it's editing. Next session, a different agent adds the same helper in a different file.

    After: the agent finds core/ or write/ and places it correctly. The next agent finds it there.

    doc.go files are agent onboarding: Each package's doc.go is a one-paragraph explanation of what the package does and why it exists. An agent loading a package reads this first. 75 packages were missing this context; now none are. The difference is measurable: fewer \"I'll create a helper function here\" moments when the agent understands that the helper already exists two packages over.

    The irony is that AI agents were both the cause and the beneficiary of this restructuring. They created the drift by building fast without consolidating. Now the structure they work within makes it harder to drift again. The taxonomy is self-reinforcing: the more consistent the codebase, the more consistently agents modify it.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#key-commits","level":2,"title":"Key Commits","text":"Commit Change ff6cf19e Restructure all CLI packages into cmd/root + core taxonomy d295e49c Externalize command descriptions to embedded YAML 0fcbd11c Remove fatih/color, centralize constants cb12a85a MCP v0.2: tools, prompts, session state, subscriptions ea196d00 Memory bridge: sync, import, diff, journal enrichment 3bcf077d Split text.yaml into 6 domain files 3a0bae86 Split internal/err into 22 domain files 8bd793b1 Extract internal/entry for shared domain API 5b32e435 Add doc.go to all 75 packages a82af4bc Standardize import aliases: camelCase, Yoda-style","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#lessons-learned","level":2,"title":"Lessons Learned","text":"

    Agents are surprisingly good at mechanical refactoring; they are surprisingly bad at knowing when to stop: The cmd/ + core/ restructuring was largely agent-driven. But agents reliably introduce gofmt issues during bulk renames, rename functions beyond their scope, and create new files without deleting old ones. Every agent-driven refactoring session needed a human audit pass.

    Externalization is archaeology: The hard part of moving strings to YAML wasn't writing YAML. It was finding 879 strings scattered across 1,500 Go files. Each one required a judgment call: is this user-facing? Is this a format pattern? Is this a constant that belongs in config/ instead?

    Delete legacy code instead of maintaining it: MigrateKeyFile had test coverage. It had callers. It had documentation. It had zero users. We maintained it for weeks before realizing that the migration window had closed months ago.

    Convention enforcement needs mechanical verification: Writing \"use camelCase aliases\" in CONVENTIONS.md doesn't prevent cflag from appearing in the next commit. The lint-drift script catches what humans forget; the planned AST-based audit tests will catch what the lint-drift script can't express.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#whats-next","level":2,"title":"What's Next","text":"

    v0.8.0 wasn't about features. It was about making future features inevitable. The next cycle focuses on what the foundation enables:

    • AST-based audit tests: replace shell grep with Go tests that understand types, call sites, and import graphs (spec: specs/ast-audit-tests.md)
    • Localization: with every string in YAML, the path to multi-language support is mechanical
    • MCP v0.3: expand tool coverage, add prompt templates for common workflows
    • Memory publish: bidirectional sync that pushes curated .context/ knowledge back into Claude Code's MEMORY.md

    The architecture is ready. The strings are externalized. The protocol is standard. Now it's about what you build on top.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-arc","level":2,"title":"The Arc","text":"

    This is the seventh post in the ctx blog series. The arc so far:

    1. The Attention Budget: why context windows are a scarce resource
    2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
    3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
    4. When a System Starts Explaining Itself: the journal as a first-class artifact
    5. The Homework Problem: what happens when AI writes code but humans own the outcome
    6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
    7. The Architecture Release (this post): what it looks like when you redesign the internals
    8. We Broke the 3:1 Rule: the consolidation debt behind this release

    See also: Agent Memory Is Infrastructure: the memory bridge feature in this release is the first implementation of the L2-to-L3 promotion pipeline described in that post.

    See also: We Broke the 3:1 Rule: the companion post explaining why this release needed 181 consolidation commits and 18 days of cleanup.

    Systems don't scale because they grow. They scale because they stop drifting.

    Full changelog: v0.6.0...v0.8.0

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/","level":1,"title":"We Broke the 3:1 Rule","text":"

    The best time to consolidate was after every third session. The second best time is now.

    Jose Alekhinne / March 23, 2026

    The rule was simple: three feature sessions, then one consolidation session.

    The Architecture Release shows the result: This post shows the cost.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-rule-we-wrote","level":2,"title":"The Rule We Wrote","text":"

    In The 3:1 Ratio, I documented a rhythm that worked during ctx's first month: three feature sessions, then one consolidation session. The evidence was clear. The rule was simple.

    The math checked out.

    And then we ignored it for five weeks.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-happened","level":2,"title":"What Happened","text":"

    After v0.6.0 shipped on February 16, the feature pipeline was irresistible. The MCP server spec was ready. The memory bridge design was done. Webhook notifications had been deferred twice. The VS Code extension needed 15 new commands. The sysinfo package was overdue...

    Each feature was important. Each feature was \"just one more session.\" Each feature pushed the consolidation session one day further out.

    The git history tells the story in two numbers:

    Phase Dates Commits Duration Feature run Feb 16 - Mar 5 198 17 days Consolidation run Mar 5 - Mar 23 181 18 days

    198 feature commits before a single consolidation commit. If the 3:1 rule says consolidate every 4th session, we consolidated after the 66th.

    The Actual Ratio

    The ratio wasn't 3:1. It was 1:1.

    We spent as much time cleaning up as we did building.

    The consolidation run took 18 days: longer than the feature run itself.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-compounded","level":2,"title":"What Compounded","text":"

    The 3:1 post warned about compounding. Here is what compounding actually looked like at scale.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-string-problem","level":3,"title":"The String Problem","text":"

    By March 5, there were 879 user-facing strings scattered across 1,500 Go files. Not because anyone decided to put them there. Because each feature session added 10-15 strings, and nobody stopped to ask \"should these be in YAML?\"

    Finding them all took longer than externalizing them. The archaeology was the cost, not the migration.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-taxonomy-problem","level":3,"title":"The Taxonomy Problem","text":"

    24 CLI packages had accumulated their own conventions. Some put cobra wiring in cmd.go. Some put it in root.go. Some mixed business logic with command registration. Some had helpers at the bottom of run.go. Some had separate util.go files.

    At peak drift, adding a feature meant first figuring out which of three competing patterns this package was using.

    Restructuring one package into cmd/root/ + core/ took 15 minutes. Restructuring 24 of them took days, because each one had slightly different conventions to untangle.

    If we had restructured every 4th package as it was built, the taxonomy would have emerged naturally.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-type-problem","level":3,"title":"The Type Problem","text":"

    Cross-cutting types like SessionInfo, ExportParams, and ParserResult were defined in whichever package first needed them. By March 5, the same types were imported through 3-4 layers of indirection, causing import cycles that required internal/entity to break.

    The entity package extracted 30+ types from 12 packages. Each extraction risked breaking imports in packages we hadn't touched in weeks.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-error-problem","level":3,"title":"The Error Problem","text":"

    Per-package err.go files had grown into a broken-window pattern:

    An agent sees err.go in a package, adds another error constructor. By March 5, there were error constructors scattered across 22 packages with no central inventory. The consolidation into internal/err/ domain files required tracing every error through every caller.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-output-problem","level":3,"title":"The Output Problem","text":"

    Output functions (cmd.Println, fmt.Fprintf) were mixed into business logic. When we decided output belongs in write/ packages, we had to extract functions from every CLI package. The Phase WC baseline commit (4ec5999) marks the starting point of this migration. 181 commits later, it was done.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-compound-interest-math","level":2,"title":"The Compound Interest Math","text":"

    The 3:1 rule assumes consolidation sessions of roughly equal size to feature sessions. Here is what happens when you skip:

    Consolidation cadence Feature sessions Consolidation sessions Total Every 4th (3:1) 48 16 64 Every 10th 48 ~8 ~56 Never (what we did) 198 commits 181 commits 379

    The Takeaway

    You don't save consolidation work by skipping it:

    You increase its cost.

    Skipping consolidation doesn't save time: It borrows it.

    The interest rate is nonlinear: The longer you wait, the more each individual fix costs, because fixes interact with other unfixed drift.

    Renaming a constant in week 2 touches 3 files. Renaming it in week 6 touches 15, because five features built on the original name.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-consolidation-actually-looked-like","level":2,"title":"What Consolidation Actually Looked Like","text":"

    The 18-day consolidation run wasn't one sweep. It was a sequence of targeted campaigns, each revealing the next:

    Week 1 (Mar 5-11): Error consolidation and write/ migration. Move output functions out of core/. Split monolithic errors.go into 22 domain files. Remove fatih/color. This exposed the scope of the string problem.

    Week 2 (Mar 12-18): String externalization. Create commands.yaml, flags.yaml, split text.yaml into 6 domain files. Add 879 DescKey/TextDescKey constants. Build exhaustive test. Normalize all import aliases to camelCase. This exposed the taxonomy problem.

    Week 3 (Mar 19-23): Taxonomy enforcement. Singularize command directories. Add doc.go to all 75 packages. Standardize import aliases project-wide. Fix lint-drift false positives. This was the \"polish\" phase, except it took 5 days because the inconsistencies had compounded across 461 packages.

    Each week's work would have been a single session if done incrementally.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#lessons-again","level":2,"title":"Lessons (Again)","text":"

    The 3:1 post listed the symptoms of drift. This post adds the consequences of ignoring them:

    Consolidation is not optional; it is deferred or paid: We didn't avoid 16 consolidation sessions by skipping them. We compressed them into 18 days of uninterrupted cleanup. The work was the same; the experience was worse.

    Feature velocity creates an illusion of progress: 198 commits felt productive. But the codebase on March 5 was harder to modify than the codebase on February 16, despite having more features.

    Speed without Structure

    Speed without structure is negative progress.

    Agents amplify both building and debt: The same AI that can restructure 24 packages in a day can also create 24 slightly different conventions in a day. The 3:1 rule matters more with AI-assisted development, not less.

    The consolidation baseline is the most important commit to record: We tracked ours in TASKS.md (4ec5999). Without that marker, knowing where to start the cleanup would have been its own archaeological expedition.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-updated-rule","level":2,"title":"The Updated Rule","text":"

    The 3:1 ratio still works. We just didn't follow it. The updated practice:

    1. After every 3rd feature session, schedule consolidation. Not \"when it feels right.\" Not \"when things get bad.\" After the 3rd session.

    2. Record the baseline commit. When you start a consolidation phase, write down the commit hash. It marks where the debt starts.

    3. Run make audit before feature work. If it doesn't pass, you are already in debt. Consolidate before building.

    4. Treat consolidation as a feature. It gets a branch. It gets commits. It gets a blog post. It is not overhead; it is the work that makes the next three features possible.

    The Rule

    The 3:1 ratio is not aspirational: It is structural.

    Ignore consolidation, and the system will schedule it for you.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-arc","level":2,"title":"The Arc","text":"

    This is the eighth post in the ctx blog series:

    1. The Attention Budget: why context windows are a scarce resource
    2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
    3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
    4. When a System Starts Explaining Itself: the journal as a first-class artifact
    5. The Homework Problem: what happens when AI writes code but humans own the outcome
    6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
    7. The Architecture Release: what v0.8.0 looks like from the inside
    8. We Broke the 3:1 Rule (this post): what happens when you don't consolidate

    See also: The 3:1 Ratio: the original observation. This post is the empirical follow-up, five weeks and 379 commits later.

    Key commits marking the consolidation arc:

    Commit Milestone 4ec5999 Phase WC baseline (consolidation starts) ff6cf19e All CLI packages restructured into cmd/ + core/d295e49c All command descriptions externalized to YAML 3a0bae86 Error package split into 22 domain files 0fcbd11cfatih/color removed; 2 dependencies remain 5b32e435doc.go added to all 75 packages a82af4bc Import aliases standardized project-wide 692f86cdlint-drift false positives fixed; make audit green","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/","level":1,"title":"Code Structure as an Agent Interface","text":"","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#what-19-ast-tests-taught-us-about-agent-readable-code","level":2,"title":"What 19 AST Tests Taught Us about Agent-Readable Code","text":"

    When an agent sees token.Slash instead of \"/\", it cannot pattern-match against the millions of strings.Split(s, \"/\") calls in its training data and coast on statistical inference. It has to actually look up what token.Slash is.

    Jose Alekhinne / April 2, 2026

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#how-it-began","level":2,"title":"How It Began","text":"

    We set out to replace a shell script with Go tests.

    We ended up discovering that \"code quality\" and \"agent readability\" are the same thing.

    This is not about linting. This is about controlling how an agent perceives your system.

    One term will recur throughout this post, so let me pin it down:

    Agent Readability

    Agent Readability is the degree to which a codebase can be understood through structured traversal, not statistical pattern matching.

    This is the story of 19 AST-based audit tests, a single-day session that touched 300+ files, and what happens when you treat your codebase's structure as an interface for the machines that read it.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-shell-script-problem","level":2,"title":"The Shell Script Problem","text":"

    ctx had a file called hack/lint-drift.sh. It ran five checks using grep and awk: literal \"\\n\" strings, cmd.Printf calls outside the write package, magic directory strings in filepath.Join, hardcoded .md extensions, and DescKey-to-YAML linkage.

    It worked. Until it didn't.

    The script had three structural weaknesses that kept biting us:

    1. No type awareness. It could not distinguish a Use* constant from a DescKey* constant, causing 71 false positives in one run.
    2. Fragile exclusions. When a constant moved from token.go to whitespace.go, the exclusion glob broke silently.
    3. Ceiling on detection. Checks that require understanding call sites, import graphs, or type relationships are impossible in shell.

    We wrote a spec to replace all five checks with Go tests using go/ast and go/packages. The tests would run as part of go test ./...: no separate script, no separate CI step.

    What we did not expect was where the work would lead.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-ast-migration","level":2,"title":"The AST Migration","text":"

    The pattern for each test is identical:

    func TestNoLiteralWhitespace(t *testing.T) {\n    pkgs := loadPackages(t)\n    var violations []string\n    for _, pkg := range pkgs {\n        for _, file := range pkg.Syntax {\n            ast.Inspect(file, func(n ast.Node) bool {\n                // check node, append to violations\n                return true\n            })\n        }\n    }\n    for _, v := range violations {\n        t.Error(v)\n    }\n}\n

    Load packages once via sync.Once, walk every syntax tree, collect violations, report. The shared helpers (loadPackages, isTestFile, posString) live in helpers_test.go. Each test is a _test.go file in internal/audit/, producing no binary output and not importable by production code.

    In a single session, we built 13 new tests on top of 6 that already existed, bringing the total to 19:

    Test What it catches TestNoLiteralWhitespace\"\\n\", \"\\t\", '\\r' outside config/token/TestNoNakedErrorsfmt.Errorf/errors.New outside internal/err/TestNoStrayErrFileserr.go files outside internal/err/TestNoRawLoggingfmt.Fprint*(os.Stderr), log.Print* outside internal/log/TestNoInlineSeparatorsstrings.Join with literal separator arg TestNoStringConcatPaths Path-like variables built with +TestNoStutteryFunctionswrite.WriteJournal repeats package name TestDocComments Missing doc comments on any declaration TestNoMagicValues Numeric literals outside const definitions TestNoMagicStrings String literals outside const definitions TestLineLength Lines exceeding 80 characters TestNoRegexpOutsideRegexPkgregexp.MustCompile outside config/regex/

    Plus the six that preceded the session: TestNoErrorsAs, TestNoCmdPrintOutsideWrite, TestNoExecOutsideExecPkg, TestNoInlineRegexpCompile, TestNoRawFileIO, TestNoRawPermissions.

    The migration touched 300+ files across 25 commits.

    Not because the tests were hard to write, but because every test we wrote revealed violations that needed fixing.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-tightening-loop","level":2,"title":"The Tightening Loop","text":"

    The most instructive part was not writing the tests. It was the iterative tightening.

    The following process was repeated for every test:

    1. Write the test with reasonable exemptions
    2. Run it, see violations
    3. Fix the violations (migrate to config constants)
    4. The human reviews the result
    5. The human spots something the test missed
    6. Fix the test first, verify it catches the issue
    7. Fix the newly caught violations
    8. Repeat from step 4

    This loop drove the tests from \"basically correct\" to \"actually useful\".

    Three examples:

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-1-the-local-const-loophole","level":3,"title":"Example 1: The Local Const Loophole","text":"

    TestNoMagicValues initially exempted local constants inside function bodies. This let code like this pass:

    const descMaxWidth = 70\ndesc := truncateDescription(\n    meta.Description, descMaxWidth,\n)\n

    The test saw a const definition and moved on. But const descMaxWidth = 70 on the line before its only use is just renaming a magic number. The 70 should live in config/format/TruncateDescription where it is discoverable, reusable, and auditable.

    We removed the local const exemption. The test caught it. The value moved to config.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-2-the-single-character-dodge","level":3,"title":"Example 2: The Single-Character Dodge","text":"

    TestNoMagicStrings initially exempted all single-character strings as \"structural punctuation\".

    This let \"/\", \"-\", and \".\" pass everywhere.

    But \"/\" is a directory separator. It is OS-specific and a security surface.

    \"-\" used in strings.Repeat(\"-\", width) is creating visual output, not acting as a delimiter.

    \".\" in strings.SplitN(ver, \".\", 3) is a version separator.

    None of these are \"just punctuation\": They are domain values with specific meanings.

    We removed the blanket exemption: 30 violations surfaced.

    Every one was a real magic value that should have been token.Slash, token.Dash, or token.Dot.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-3-the-replacer-versus-regex","level":3,"title":"Example 3: The Replacer versus Regex","text":"

    After migrating magic strings, we had this:

    func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        token.Slash, token.Underscore,\n        token.Dot, token.Underscore,\n        token.Dash, token.Underscore,\n    )\n    return r.Replace(pkg)\n}\n

    Six token references and a NewReplacer allocation. The magic values were gone, but we had replaced them with token soup: structure without abstraction.

    The correct tool was a regex:

    // In config/regex/file.go:\nvar MermaidUnsafe = regexp.MustCompile(`[/.\\-]`)\n\n// In the caller:\nfunc MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

    One config regex, one call. The regex lives in config/regex/file.go where every other compiled pattern lives. An agent reading the code sees regex.MermaidUnsafe and immediately knows: this is a sanitization pattern, it lives in the regex registry, and it has a name that explains its purpose.

    Clean is better than clever.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#a-before-and-after","level":2,"title":"A Before-and-After","text":"

    To make the agent-readability claim concrete, consider one function through the full transformation.

    Before (the code we started with):

    func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        \"/\", \"_\", \".\", \"_\", \"-\", \"_\",\n    )\n    return r.Replace(pkg)\n}\n

    An agent reading this sees six string literals. To understand what the function does, it must: (1) parse the NewReplacer pair semantics, (2) infer that /, ., - are being replaced, (3) guess why, (4) hope the guess is right.

    There is nothing to follow. No import to trace. No name to search. The meaning is locked inside the function body.

    After (the code we ended with):

    func MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

    An agent reading this sees two named references: regex.MermaidUnsafe and token.Underscore.

    To understand the function, it can: (1) look up MermaidUnsafe in config/regex/file.go and see the pattern [/.\\-] with a doc comment explaining it matches invalid Mermaid characters, (2) look up Underscore in config/token/delim.go and see it is the replacement character.

    The agent now has: a named pattern, a named replacement, a package location, documentation, and neighboring context (other regex patterns, other delimiters).

    It got all of this for free by following just two references.

    The indirection is not an overhead. It is the retrieval query.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-principles","level":2,"title":"The Principles","text":"

    You are not just improving code quality. You are shaping the input space that determines how an LLM can reason about your system.

    Every structural constraint we enforce converts implicit semantics into explicit structure.

    LLMs struggle when meaning is implicit and patterns are statistical.

    They thrive when meaning is explicit and structure is navigable.

    Here is what we learned, organized into three categories.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#cognitive-constraints","level":3,"title":"Cognitive Constraints","text":"

    These force agents (and humans) to think harder.

    Indirection acts as a built-in retrieval mechanism:

    Moving magic values to config forces the agent to follow the reference. errMemory.WriteFile(cause) tells the agent \"there is a memory error package, go look.\" fmt.Errorf(\"writing MEMORY.md: %w\", cause) inlines everything and makes the call graph invisible. The indirection IS the retrieval query.

    Unfamiliar patterns force reasoning:

    When an agent sees token.Slash instead of \"/\", it cannot coast on corpus frequency. It has to actually look up what token.Slash is, which forces it through the dependency graph, which means it encounters documentation and neighboring constants, which gives it richer context. You are exploiting the agent's weakness (over-reliance on training data) to make it behave more carefully.

    Documentation helps everyone:

    Extensive documentation helps humans reading the code, agents reasoning about it, and RAG systems indexing it.

    Our TestDocComments check added 308 doc comments in one commit. Every function, every type, every constant block now has a doc comment.

    This is not busywork: it is the content that agents and embeddings consume.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#structural-constraints","level":3,"title":"Structural Constraints","text":"

    These shape the codebase into a navigable graph.

    Shorter files save tokens:

    Forcing private helper functions out of main files makes the main file shorter. An agent loading a file spends fewer tokens on boilerplate and more on the logic that matters.

    Fixed-width constraints force decomposition:

    A function that cannot be expressed in 80 columns is either too deeply nested (extract a helper), has too many parameters (introduce a struct), or has a variable name that is too long (rethink the abstraction).

    The constraint forces structural improvements that happen to also make the code more parseable.

    Chunk-friendly structure helps RAG

    Code intelligence tools chunk files for embedding and retrieval. Short, well-documented, single-responsibility files produce better chunks than monolithic files with mixed concerns.

    The structural constraints create files that RAG systems can index effectively.

    Centralization creates debuggable seams:

    All error handling in internal/err/, all logging in internal/log/, all file operations in internal/io/. One place to debug, one place to test, one place to see patterns. An agent analyzing \"how does this project handle errors\" gets one answer from one package, not 200 scattered fmt.Errorf calls.

    Private functions become public patterns:

    When you extract a private function to satisfy a constraint, it often ends up as a semi-public function in a core/ package. Then you realize it is generic enough to be factored into a purpose-specific module.

    The constraint drives discovery of reusable abstractions hiding inside monolithic functions.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#operational-benefits","level":3,"title":"Operational Benefits","text":"

    These pay dividends in daily development.

    Single-edit renames:

    Renaming a flag is one edit to a config constant instead of find-and-replace across 30,000 lines with possible misses. grep token.Slash gives you every place that uses a forward slash semantically.

    grep \"/\" gives you noise.

    Blast radius containment:

    When every magic value is a config constant, a search is one result. This matters for impact analysis, security audits, and agents trying to understand \"what uses this\".

    Compile-time contract enforcement:

    When err/memory.WriteFile exists, the compiler guarantees the error message exists and the call signature is correct. An inline fmt.Errorf can have a typo in the format string and nothing catches it until runtime. Centralization turns runtime failures into compile errors.

    Semantic git blame:

    When token.Slash is used everywhere and someone changes its value, git blame on the config file shows exactly when and why.

    With inline \"/\" scattered across 30 files, the history is invisible.

    Test surface reduction:

    Centralizing into internal/err/, internal/io/, internal/config/ means you test behavior once at the boundary and trust the callers.

    You do not need 30 tests for 30 fmt.Errorf calls. You need 1 test for errMemory.WriteFile and 30 trivial call-site audits, which is exactly what these AST tests provide.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-numbers","level":2,"title":"The Numbers","text":"

    One session. 25 commits. The raw stats:

    Metric Count New audit tests 13 Total audit tests 19 Files touched 300+ Magic values migrated 90+ Functions renamed 17 Doc comments added 323 Lines rewrapped to 80 chars 190 Config constants created 40+ Config regexes created 3

    Every number represents a violation that existed before the test caught it. The tests did not create work: they revealed work that was already needed.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-uncomfortable-implication","level":2,"title":"The Uncomfortable Implication","text":"

    None of this is Go-specific.

    If an AI agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

    If your error messages are scattered across 200 files, an agent cannot reason about error handling as a concept. If your magic values are inlined, an agent cannot distinguish \"this is a path separator\" from \"this is a division operator.\" If your functions are named write.WriteJournal, the agent wastes tokens on redundant information.

    What we discovered, through the unglamorous work of writing lint tests and migrating string literals, is that the structural constraints software engineering has valued for decades are exactly the constraints that make code readable to machines.

    This is not a coincidence: These constraints exist because they reduce the cognitive load of understanding code.

    Agents have cognitive load too: It is called the context window.

    You are not converting code to a new paradigm.

    You are making the latent graph visible.

    You are converting implicit semantics into explicit structure that both humans and machines can traverse.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#whats-next","level":2,"title":"What's Next","text":"

    The spec lists 8 more tests we have not built yet, including TestDescKeyYAMLLinkage (verifying that every DescKey constant has a corresponding YAML entry), TestCLICmdStructure (enforcing the cmd.go / run.go / doc.go file convention), and TestNoFlagBindOutsideFlagbind (which requires migrating ~50 flag registration sites first).

    The broader question: should these principles be codified as a reusable linting framework? The patterns (loadPackages + ast.Inspect + violation collection) are generic.

    The specific checks are project-specific. But the categories of checks (centralization enforcement, magic value detection, naming conventions, documentation requirements) are universal.

    For now, 19 tests in internal/audit/ is enough. They run in 2 seconds as part of go test ./.... They catch real issues.

    And they encode a theory of code quality that serves both humans and the agents that work alongside them.

    Agents are not going away. They are reading your code right now, forming representations of your system in context windows that forget everything between sessions.

    The codebases that structure themselves for that reality will compound. The ones that do not will slowly become illegible to the tools they depend on.

    Structure is no longer just for maintainability. It is for reasonability.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/","level":1,"title":"The Watermelon-Rind Anti-Pattern","text":"","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#why-smarter-tools-make-shallower-agents","level":2,"title":"Why Smarter Tools Make Shallower Agents","text":"

    Give an agent a graph query tool, and it will tell you everything about your codebase except what actually matters.

    Jose Alekhinne / April 6, 2026

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#a-turkish-proverb-walks-into-a-codebase","level":2,"title":"A Turkish Proverb Walks into a Codebase","text":"

    There's a Turkish idiom: esegin aklina karpuz kabugu sokmak (literally, \"to put watermelon rind into a donkey's mind.\" It means to plant an idea in someone's head that they wouldn't have come up with on their own) usually one that leads them astray.

    In English, let's call this a \"watermelon metric\": a project management term for something that's green on the outside and red on the inside: all dashboards passing, reality crumbling.

    Both halves of this metaphor showed up in a single experiment. And the result changed how we design architecture analysis in ctx.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-experiment","level":2,"title":"The Experiment","text":"

    We ran three sessions analyzing the same large codebase (~34,000 symbols) using the same architecture skill, varying only what tools the agent had access to.

    Session Tools Available Output (lines) Character 1 None (MCP broken) 5,866 Deep, intimate 2 Full graph MCP 1,124 Structural, correct 3 Enrichment pass +verified data Additive, not restorative

    Session 1 was an accident. The MCP server that provides code intelligence queries was broken, so the agent couldn't ask the graph anything. It had to read code. Line by line. File by file.

    It produced 5,866 lines of architecture analysis: per-controller data flows, scale math, startup sequences, timeout defaults, edge cases that only surface when you actually look at the implementation.

    Session 2 had working tools. Same skill, same codebase. The agent produced 1,124 lines (5.2x less). Structurally correct. Valid symbol references. Proper call chains.

    And hollow.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-rind","level":2,"title":"The Rind","text":"

    The Session 2 output was a watermelon rind: the right shape, the right color, the right texture on the outside. But the substance (the operational details, the defaults nobody documents, the scale math that tells you when a component will fall over) was missing.

    Not wrong. Not broken. Just... thin.

    The agent had answered every question correctly. The problem was that it never discovered the questions it should have asked. When you can query a graph for \"what calls this function?\", you don't stumble into the retry loop that silently swallows errors three layers down. When you can ask for the dependency tree, you don't notice that two packages share a mutable state through a global variable that isn't in any interface.

    The tool answered the question asked but prevented the discovery of answers to questions never asked.

    Here's what that looks like concretely: the graph tells you that ReconcileDeployment calls SyncPods. It does not tell you that SyncPods retries three times with exponential backoff, silently drops errors after timeout, and resets a package-level counter that another goroutine reads without a lock. The call chain is correct.

    The operational reality is invisible.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-donkeys-idea","level":2,"title":"The Donkey's Idea","text":"

    This is where the Turkish proverb earns its place: The graph tool is the \"karpuz kabugu\" (the watermelon rind placed into the agent's mind).

    Before the tool existed, the agent had no choice but to read deeply. With the tool available, a new idea appears: why read 500 lines of code when I can query the call graph?

    The agent isn't lazy. It's rational.

    Graph queries are faster, more reliable, and produce verifiably correct output. The agent is optimizing. It's satisficing (finding answers that are good enough), instead of maximizing (finding everything there is to know).

    Satisficing produces watermelon rinds.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-two-pass-compiler","level":2,"title":"The Two-Pass Compiler","text":"

    Session 3 taught us that you can't fix shallow analysis by adding more tools after the fact. The enrichment pass added verified graph data (blast radius numbers, registration sites, execution flow confirmation) but it couldn't recover the intimate code knowledge that Session 1 had produced through sheer necessity.

    You can't enrich your way out of a depth deficit.

    So we redesigned. Instead of one skill with optional tools, we built a two-pass compiler for architecture understanding:

    Pass 1: Semantic parsing. The /ctx-architecture skill deliberately has no access to graph query tools. The agent must read code, build mental models, and produce architecture artifacts through human-style comprehension. Constraint is the feature.

    Pass 2: Static analysis. The /ctx-architecture-enrich skill takes Pass 1 output as input and runs comprehensive verification through code intelligence: blast radius analysis, registration site discovery, execution flow tracing, domain clustering comparison. It extends and verifies, but it doesn't replace.

    The key insight: these must be separate skills with separate tool permissions. If you give the agent graph tools during Pass 1, it will use them. The \"karpuz kabugu\" will be in its mind. The only way to prevent satisficing is to remove the option.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-principle","level":2,"title":"The Principle","text":"

    We call this constraint-as-feature: deliberately withholding capabilities to force deeper engagement.

    It sounds paradoxical. You built sophisticated code intelligence tools and then... forbid the agent from using them? During the most important phase?

    Yes. Because the tools don't make the agent smarter. They make it faster. And faster, in architecture analysis, is the enemy of deep.

    What's actually happening is subtler: tools reduce the agent's search space. A graph query collapses thousands of possible observations into one precise answer. That's efficient for known questions. But architecture understanding depends on unknown unknowns: and you only find those by wandering through code with nothing to shortcut the journey.

    The constraint forces the agent into a mode of operation that produces better output than any amount of tooling can achieve. The limitation is the capability.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#when-does-this-apply","level":2,"title":"When Does This Apply?","text":"

    Not always. The watermelon-rind antipattern is specific to exploratory analysis: tasks where the value comes from discovering unknowns, not from answering known questions.

    Graph tools are excellent for:

    • Verification: \"Does X actually call Y?\" (binary question, precise answer)
    • Impact analysis: \"What breaks if I change Z?\" (bounded scope, enumerable results)
    • Navigation: \"Where is this interface implemented?\" (lookup, not analysis)

    Graph tools produce watermelon rinds when:

    • The goal is understanding, not answering
    • The unknowns are unknown: you don't know what to ask
    • Depth matters more than breadth: operational details, edge cases, implicit coupling

    The two-pass approach preserves both: deep reading first, tool verification second.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#takeaway","level":2,"title":"Takeaway","text":"

    The two-pass approach is the slowest way to analyze a codebase. It is also the only way that produces both depth and accuracy. We accept the cost because architecture analysis is not a speed game: it is a coverage game.

    Esegin aklina karpuz kabugu sokma!

    (don't put the watermelon rind to a donkey's mind)

    If the agent never struggles, it never discovers. And if it never discovers, you are not doing architecture; you are doing autocomplete.

    This post is part of the ctx field notes series, documenting what we learn building persistent context infrastructure for AI coding sessions.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"cli/","level":1,"title":"CLI","text":"","path":["CLI"],"tags":[]},{"location":"cli/#ctx-cli","level":2,"title":"ctx CLI","text":"

    Complete reference for all ctx commands, grouped by function.

    ","path":["CLI"],"tags":[]},{"location":"cli/#global-options","level":2,"title":"Global Options","text":"

    All commands support these flags:

    Flag Description --help Show command help --version Show version --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor)

    Initialization required. Most commands require a .context/ directory created by ctx init. Running a command without one produces:

    ctx: not initialized - run \"ctx init\" first\n

    Commands that work before initialization: ctx init, ctx setup, ctx doctor, and grouping commands that only show help.

    ","path":["CLI"],"tags":[]},{"location":"cli/#getting-started","level":2,"title":"Getting Started","text":"Command Description ctx init Initialize .context/ directory with templates ctx status Show context summary (files, tokens, drift) ctx guide Quick-reference cheat sheet ctx why Read the philosophy behind ctx","path":["CLI"],"tags":[]},{"location":"cli/#context","level":2,"title":"Context","text":"Command Description ctx add Add a task, decision, learning, or convention ctx load Output assembled context in read order ctx agent Print token-budgeted context packet for AI consumption ctx sync Reconcile context with codebase state ctx drift Detect stale paths, secrets, missing files ctx compact Archive completed tasks, clean up files ctx fmt Format context files to 80-char line width ctx decision Manage DECISIONS.md (reindex) ctx learning Manage LEARNINGS.md (reindex) ctx task Task completion, archival, and snapshots ctx reindex Regenerate indices for DECISIONS.md and LEARNINGS.mdctx permission Permission snapshots (golden image) ctx change Show what changed since last session ctx memory Bridge Claude Code auto memory into .context/ctx watch Auto-apply context updates from AI output","path":["CLI"],"tags":[]},{"location":"cli/#sessions","level":2,"title":"Sessions","text":"Command Description ctx journal Browse, import, enrich, and lock session history ctx pad Encrypted scratchpad for sensitive one-liners ctx remind Session-scoped reminders that surface at session start ctx hook pause Pause context hooks for the current session ctx hook resume Resume paused context hooks","path":["CLI"],"tags":[]},{"location":"cli/#integrations","level":2,"title":"Integrations","text":"Command Description ctx setup Generate AI tool integration configs ctx steering Manage steering files (behavioral rules for AI tools) ctx trigger Manage lifecycle triggers (scripts for automation) ctx skill Manage reusable instruction bundles ctx mcp MCP server for AI tool integration (stdin/stdout) ctx hook notify Webhook notifications (setup, test, send) ctx loop Generate autonomous loop script ctx connection Client-side commands for connecting to a ctx Hub ctx hub Operate a ctx Hub server or cluster ctx serve Serve a static site locally via zensical ctx site Site management (feed generation)","path":["CLI"],"tags":[]},{"location":"cli/#diagnostics","level":2,"title":"Diagnostics","text":"Command Description ctx doctor Structural health check (hooks, drift, config) ctx trace Show context behind git commits ctx sysinfo Show system resource usage (memory, swap, disk, load) ctx usage Show session token usage stats","path":["CLI"],"tags":[]},{"location":"cli/#runtime","level":2,"title":"Runtime","text":"Command Description ctx config Manage runtime configuration profiles ctx backup Back up context and Claude data to tar.gz / SMB ctx prune Clean stale per-session state files ctx hook Hook message, notification, and lifecycle controls ctx system Hook plumbing and agent-only commands (not user-facing)","path":["CLI"],"tags":[]},{"location":"cli/#shell","level":2,"title":"Shell","text":"Command Description ctx completion Generate shell autocompletion scripts","path":["CLI"],"tags":[]},{"location":"cli/#exit-codes","level":2,"title":"Exit Codes","text":"Code Meaning 0 Success 1 General error / warnings (e.g. drift) 2 Context not found 3 Violations found (e.g. drift) 4 File operation error","path":["CLI"],"tags":[]},{"location":"cli/#environment-variables","level":2,"title":"Environment Variables","text":"Variable Description CTX_DIR Override default context directory path CTX_TOKEN_BUDGET Override default token budget CTX_BACKUP_SMB_URL SMB share URL for backups (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on SMB share (default: ctx-sessions) CTX_SESSION_ID Active AI session ID (used by ctx trace for context linking)","path":["CLI"],"tags":[]},{"location":"cli/#configuration-file","level":2,"title":"Configuration File","text":"

    Optional .ctxrc (YAML format) at project root:

    # .ctxrc\ncontext_dir: .context        # Context directory name\ntoken_budget: 8000           # Default token budget\npriority_order:              # File loading priority\n  - TASKS.md\n  - DECISIONS.md\n  - CONVENTIONS.md\nauto_archive: true           # Auto-archive old items\narchive_after_days: 7        # Days before archiving tasks\nscratchpad_encrypt: true     # Encrypt scratchpad (default: true)\nallow_outside_cwd: false     # Skip boundary check (default: false)\nevent_log: false             # Enable local hook event logging\ncompanion_check: true        # Check companion tools at session start\nentry_count_learnings: 30    # Drift warning threshold (0 = disable)\nentry_count_decisions: 20    # Drift warning threshold (0 = disable)\nconvention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)\ninjection_token_warn: 15000  # Oversize injection warning (0 = disable)\ncontext_window: 200000       # Auto-detected for Claude Code; override for other tools\nbilling_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)\nkey_rotation_days: 90        # Days before key rotation nudge\nsession_prefixes:            # Recognized session header prefixes (extend for i18n)\n  - \"Session:\"               # English (default)\n  # - \"Oturum:\"              # Turkish (add as needed)\n  # - \"セッション:\"             # Japanese (add as needed)\nfreshness_files:             # Files with technology-dependent constants (opt-in)\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # Optional\nnotify:                      # Webhook notification settings\n  events:                    # Required: only listed events fire\n    - loop\n    - nudge\n    - relay\n    # - heartbeat            # Every-prompt session-alive signal\ntool: \"\"                     # Active AI tool: claude, cursor, cline, kiro, codex\nsteering:                    # Steering layer configuration\n  dir: .context/steering     # Steering files directory\n  default_inclusion: manual  # Default inclusion mode (always, auto, manual)\n  default_tools: []          # Default tool filter for new steering files\nhooks:                       # Hook system configuration\n  dir: .context/hooks        # Hook scripts directory\n  timeout: 10                # Per-hook execution timeout in seconds\n  enabled: true              # Whether hook execution is enabled\n
    Field Type Default Description context_dirstring.context Context directory name (relative to project root) token_budgetint8000 Default token budget for ctx agentpriority_order[]string (all files) File loading priority for context packets auto_archivebooltrue Auto-archive completed tasks archive_after_daysint7 Days before completed tasks are archived scratchpad_encryptbooltrue Encrypt scratchpad with AES-256-GCM allow_outside_cwdboolfalse Skip boundary check for external context dirs event_logboolfalse Enable local hook event logging to .context/state/events.jsonlcompanion_checkbooltrue Check companion tool availability (Gemini Search, GitNexus) during /ctx-rememberentry_count_learningsint30 Drift warning when LEARNINGS.md exceeds this count entry_count_decisionsint20 Drift warning when DECISIONS.md exceeds this count convention_line_countint200 Line count warning for CONVENTIONS.mdinjection_token_warnint15000 Warn when auto-injected context exceeds this token count (0 = disable) context_windowint200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warnint0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled) key_rotation_daysint90 Days before encryption key rotation nudge session_prefixes[]string[\"Session:\"] Recognized Markdown session header prefixes. Extend to parse sessions written in other languages freshness_files[]object (none) Files to track for staleness (path, desc, optional review_url). Hook warns after 6 months without modification notify.events[]string (all) Event filter for webhook notifications (empty = all) toolstring (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex) steering.dirstring.context/steering Steering files directory steering.default_inclusionstringmanual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools[]string (all) Default tool filter for new steering files (empty = all tools) hooks.dirstring.context/hooks Hook scripts directory hooks.timeoutint10 Per-hook execution timeout in seconds hooks.enabledbooltrue Whether hook execution is enabled

    Priority order: CLI flags > Environment variables > .ctxrc > Defaults

    All settings are optional. Missing values use defaults.

    ","path":["CLI"],"tags":[]},{"location":"cli/backup/","level":1,"title":"Backup","text":"","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/backup/#ctx-backup","level":3,"title":"ctx backup","text":"

    Create timestamped tar.gz archives of project context and/or global Claude Code data. Optionally copies archives to an SMB share via GVFS.

    ctx backup [flags]\n

    Flags:

    Flag Description --scope Backup scope: project, global, or all (default: all) --json Output results as JSON

    Scopes:

    Scope What's archived project.context/, .claude/, ideas/, ~/.bashrcglobal~/.claude/ (excludes todos/) all Both project and global (default)

    Environment:

    Variable Purpose CTX_BACKUP_SMB_URL SMB share URL (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on share (default: ctx-sessions)

    Examples:

    ctx backup                       # Back up everything (default: all)\nctx backup --scope project       # Project context only\nctx backup --scope global        # Global Claude data only\nctx backup --scope all --json    # Both, JSON output\n
    ","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/bootstrap/","level":1,"title":"System Bootstrap","text":"","path":["System Bootstrap"],"tags":[]},{"location":"cli/bootstrap/#ctx-system-bootstrap","level":3,"title":"ctx system bootstrap","text":"

    Print the resolved context directory path so AI agents can anchor their session. The default output lists the context directory, the tracked context files, and a short health snapshot. --quiet prints just the path; --json produces structured output for automation.

    This is a hidden, agent-only command that agents are instructed to run first in their session-start procedure — it is the authoritative answer to \"where does this project's context live?\".

    ctx system bootstrap [flags]\n

    Flags:

    Flag Description -q, --quiet Output only the context directory path --json Output in JSON format

    Examples:

    ctx system bootstrap                 # Text output for agents\nctx system bootstrap -q              # Just the context directory path\nctx system bootstrap --json          # Structured output for automation\n

    Scripting tip: CTX_DIR=$(ctx system bootstrap -q) is the canonical way for skills and scripts to find the project's context directory without hardcoding .context/.

    ","path":["System Bootstrap"],"tags":[]},{"location":"cli/change/","level":1,"title":"Change","text":"","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/change/#ctx-change","level":2,"title":"ctx change","text":"

    Show what changed in context files and code since your last session.

    Automatically detects the previous session boundary from state markers or event log. Useful at session start to quickly see what moved while you were away.

    ctx change [flags]\n

    Flags:

    Flag Description --since Time reference: duration (24h) or date (2026-03-01)

    Reference time detection (priority order):

    1. --since flag (duration, date, or RFC3339 timestamp)
    2. ctx-loaded-* marker files in .context/state/ (second most recent)
    3. Last context-load-gate event from .context/state/events.jsonl
    4. Fallback: 24 hours ago

    Examples:

    # Auto-detect last session, show what changed\nctx change\n\n# Changes in the last 48 hours\nctx change --since 48h\n\n# Changes since a specific date\nctx change --since 2026-03-10\n

    Output:

    ## Changes Since Last Session\n\n**Reference point**: 6 hours ago\n\n### Context File Changes\n- `TASKS.md` - modified 2026-03-12 14:30\n- `DECISIONS.md` - modified 2026-03-12 09:15\n\n### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n

    Context file changes are detected by filesystem mtime (works without git). Code changes use git log --since (empty when not in a git repo).

    See also: Reviewing Session Changes.

    ","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/completion/","level":1,"title":"Completion","text":"","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#ctx-completion","level":2,"title":"ctx completion","text":"

    Generate shell autocompletion scripts.

    ctx completion <shell>\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#subcommands","level":3,"title":"Subcommands","text":"Shell Command bashctx completion bashzshctx completion zshfishctx completion fishpowershellctx completion powershell

    Examples:

    ctx completion bash > /etc/bash_completion.d/ctx\nctx completion zsh  > \"${fpath[1]}/_ctx\"\nctx completion fish > ~/.config/fish/completions/ctx.fish\nctx completion powershell | Out-String | Invoke-Expression\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#installation","level":3,"title":"Installation","text":"BashZshFishPowerShell
    # Add to ~/.bashrc\nsource <(ctx completion bash)\n
    # Add to ~/.zshrc\nsource <(ctx completion zsh)\n
    ctx completion fish | source\n# Or save to completions directory\nctx completion fish > ~/.config/fish/completions/ctx.fish\n
    # Add to your PowerShell profile\nctx completion powershell | Out-String | Invoke-Expression\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/config/","level":1,"title":"Config","text":"","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config","level":3,"title":"ctx config","text":"

    Manage runtime configuration profiles.

    ctx config <subcommand>\n

    The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy (.ctxrc) is gitignored and switched between them using subcommands below.

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-switch","level":4,"title":"ctx config switch","text":"

    Switch between .ctxrc configuration profiles.

    ctx config switch [dev|base]\n

    With no argument, toggles between dev and base. Accepts prod as an alias for base.

    Argument Description dev Switch to dev profile (verbose logging) base Switch to base profile (all defaults) (none) Toggle to the opposite profile

    Profiles:

    Profile Description dev Verbose logging, webhook notifications on base All defaults, notifications off

    Examples:

    ctx config switch dev     # Switch to dev profile\nctx config switch base    # Switch to base profile\nctx config switch         # Toggle (dev → base or base → dev)\nctx config switch prod    # Alias for \"base\"\n

    The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-status","level":4,"title":"ctx config status","text":"

    Show which .ctxrc profile is currently active.

    ctx config status\n

    Output examples:

    active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n

    See also: Configuration, Contributing: Configuration Profiles

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/connect/","level":1,"title":"Connect","text":"","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect","level":2,"title":"ctx connect","text":"

    Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

    New to the Hub?

    Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

    The unit of identity is a project, not a user. Registering a directory with ctx connect register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

    Only structured entries flow through the hub — decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-register","level":3,"title":"ctx connect register","text":"

    One-time registration with a hub. Requires the hub address and admin token (printed by ctx hub start on first run).

    ctx connect register localhost:9900 --token ctx_adm_7f3a...\n

    On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-subscribe","level":3,"title":"ctx connect subscribe","text":"

    Set which entry types to receive from the hub. Only matching types are returned by sync and listen.

    ctx connect subscribe decision learning\nctx connect subscribe decision learning convention\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-sync","level":3,"title":"ctx connect sync","text":"

    Pull matching entries from the hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

    ctx connect sync\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-publish","level":3,"title":"ctx connect publish","text":"

    Push entries to the hub. Specify type and content as arguments.

    ctx connect publish decision \"Use UTC timestamps everywhere\"\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-listen","level":3,"title":"ctx connect listen","text":"

    Stream new entries from the hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

    ctx connect listen\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-status","level":3,"title":"ctx connect status","text":"

    Show hub connection state and entry statistics.

    ctx connect status\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

    Use --share on ctx add to write locally AND publish to the hub:

    ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

    If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort — it never blocks local context updates.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#auto-sync","level":2,"title":"Auto-Sync","text":"

    Once registered, the check-hub-sync hook automatically syncs new entries from the hub at the start of each session (daily throttled). No manual ctx connect sync needed.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#shared-files","level":2,"title":"Shared Files","text":"

    Entries from the hub are stored in .context/hub/:

    .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

    These files are read-only (managed by sync/listen) and never mixed with local context files.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#agent-integration","level":2,"title":"Agent Integration","text":"

    Include shared knowledge in agent context packets:

    ctx agent --include-hub\n

    Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

    ","path":["Connect"],"tags":[]},{"location":"cli/connection/","level":1,"title":"Connect","text":"","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connect","level":2,"title":"ctx connect","text":"

    Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

    New to the ctx Hub?

    Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

    The unit of identity is a project, not a user. Registering a directory with ctx connection register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

    Only structured entries flow through the hub — decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-register","level":3,"title":"ctx connection register","text":"

    One-time registration with a ctx Hub. Requires the ctx Hub address and admin token (printed by ctx hub start on first run).

    Examples:

    ctx connection register localhost:9900 --token ctx_adm_7f3a...\n

    On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-subscribe","level":3,"title":"ctx connection subscribe","text":"

    Set which entry types to receive from the ctx Hub. Only matching types are returned by sync and listen.

    Examples:

    ctx connection subscribe decision learning\nctx connection subscribe decision learning convention\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-sync","level":3,"title":"ctx connection sync","text":"

    Pull matching entries from the ctx Hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

    Examples:

    ctx connection sync\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-publish","level":3,"title":"ctx connection publish","text":"

    Push entries to the ctx Hub. Specify type and content as arguments.

    Examples:

    ctx connection publish decision \"Use UTC timestamps everywhere\"\nctx connection publish learning \"Go embed requires files in same package\"\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-listen","level":3,"title":"ctx connection listen","text":"

    Stream new entries from the ctx Hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

    Examples:

    ctx connection listen\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-status","level":3,"title":"ctx connection status","text":"

    Show ctx Hub connection state and entry statistics.

    Examples:

    ctx connection status\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

    Use --share on ctx add to write locally AND publish to the ctx Hub:

    ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

    If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort — it never blocks local context updates.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#auto-sync","level":2,"title":"Auto-Sync","text":"

    Once registered, the check-hub-sync hook automatically syncs new entries from the ctx Hub at the start of each session (daily throttled). No manual ctx connection sync needed.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#shared-files","level":2,"title":"Shared Files","text":"

    Entries from the ctx Hub are stored in .context/hub/:

    .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

    These files are read-only (managed by sync/listen) and never mixed with local context files.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#agent-integration","level":2,"title":"Agent Integration","text":"

    Include shared knowledge in agent context packets:

    ctx agent --include-hub\n

    Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/context/","level":1,"title":"Context Management","text":"","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-add","level":3,"title":"ctx add","text":"

    Add a new item to a context file.

    ctx add <type> <content> [flags]\n

    Types:

    Type Target File taskTASKS.mddecisionDECISIONS.mdlearningLEARNINGS.mdconventionCONVENTIONS.md

    Flags:

    Flag Short Description --priority <level>-p Priority for tasks: high, medium, low--section <name>-s Target section within file --context-c Context (required for decisions and learnings) --rationale-r Rationale for decisions (required for decisions) --consequence Consequence for decisions (required for decisions) --lesson-l Key insight (required for learnings) --application-a How to apply going forward (required for learnings) --file-f Read content from file instead of argument

    Examples:

    # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\nctx add task \"Fix login bug\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (requires all ADR (Architectural Decision Record) fields)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning (requires context, lesson, and application)\nctx add learning \"Vitest mocks must be hoisted\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Vitest hoists vi.mock() calls to top of file\" \\\n  --application \"Always place vi.mock() before imports in test files\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to specific section\nctx add convention \"Use kebab-case for filenames\" --section \"Naming\"\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-drift","level":3,"title":"ctx drift","text":"

    Detect stale or invalid context.

    ctx drift [flags]\n

    Flags:

    Flag Description --json Output machine-readable JSON --fix Auto-fix simple issues

    Checks:

    • Path references in ARCHITECTURE.md and CONVENTIONS.md exist
    • Task references are valid
    • Constitution rules aren't violated (heuristic)
    • Staleness indicators (old files, many completed tasks)
    • Missing packages: warns when internal/ directories exist on disk but are not referenced in ARCHITECTURE.md (suggests running /ctx-architecture)
    • Entry count: warns when LEARNINGS.md or DECISIONS.md exceed configurable thresholds (default: 30 learnings, 20 decisions), or when CONVENTIONS.md exceeds a line count threshold (default: 200). Configure via .ctxrc:
      entry_count_learnings: 30      # warn above this (0 = disable)\nentry_count_decisions: 20      # warn above this (0 = disable)\nconvention_line_count: 200     # warn above this (0 = disable)\n

    Example:

    ctx drift\nctx drift --json\nctx drift --fix\n

    Exit codes:

    Code Meaning 0 All checks passed 1 Warnings found 3 Violations found","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-sync","level":3,"title":"ctx sync","text":"

    Reconcile context with the current codebase state.

    ctx sync [flags]\n

    Flags:

    Flag Description --dry-run Show what would change without modifying

    What it does:

    • Scans codebase for structural changes
    • Compares with ARCHITECTURE.md
    • Suggests documenting dependencies if package files exist
    • Identifies stale or outdated context

    Example:

    ctx sync\nctx sync --dry-run\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-compact","level":3,"title":"ctx compact","text":"

    Consolidate and clean up context files.

    • Moves completed tasks older than 7 days to the archive
    • Removes empty sections
    ctx compact [flags]\n

    Flags:

    Flag Description --archive Create .context/archive/ for old content

    Example:

    ctx compact\nctx compact --archive\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-fmt","level":3,"title":"ctx fmt","text":"

    Format context files to a consistent line width.

    Wraps long lines in TASKS.md, DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md at word boundaries. Markdown list items get 2-space continuation indent. Headings, tables, frontmatter, and HTML comments are preserved as-is.

    Idempotent: running twice produces the same output.

    ctx fmt [flags]\n

    Flags:

    Flag Type Default Description --widthint80 Target line width --checkboolfalse Check only, exit 1 if files would change

    Examples:

    ctx fmt              # format all context files\nctx fmt --check      # CI mode: check without modifying\nctx fmt --width 100  # custom width\n

    Also available as a Makefile target:

    make fmt-context\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task","level":3,"title":"ctx task","text":"

    Manage task completion, archival, and snapshots.

    ctx task <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-complete","level":4,"title":"ctx task complete","text":"

    Mark a task as completed.

    ctx task complete <task-id-or-text>\n

    Arguments:

    • task-id-or-text: Task number or partial text match

    Examples:

    # By text (partial match)\nctx task complete \"user auth\"\n\n# By task number\nctx task complete 3\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-archive","level":4,"title":"ctx task archive","text":"

    Move completed tasks from TASKS.md to a timestamped archive file.

    ctx task archive [flags]\n

    Flags:

    Flag Description --dry-run Preview changes without modifying files

    Archive files are stored in .context/archive/ with timestamped names (tasks-YYYY-MM-DD.md). Completed tasks (marked with [x]) are moved; pending tasks ([ ]) remain in TASKS.md.

    Example:

    ctx task archive\nctx task archive --dry-run\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-snapshot","level":4,"title":"ctx task snapshot","text":"

    Create a point-in-time snapshot of TASKS.md without modifying the original.

    ctx task snapshot [name]\n

    Arguments:

    • name: Optional name for the snapshot (defaults to \"snapshot\")

    Snapshots are stored in .context/archive/ with timestamped names (tasks-<name>-YYYY-MM-DD-HHMM.md).

    Example:

    ctx task snapshot\nctx task snapshot \"before-refactor\"\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission","level":3,"title":"ctx permission","text":"

    Manage Claude Code permission snapshots.

    ctx permission <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-snapshot","level":4,"title":"ctx permission snapshot","text":"

    Save .claude/settings.local.json as the golden image.

    ctx permission snapshot\n

    Creates .claude/settings.golden.json as a byte-for-byte copy of the current settings. Overwrites if the golden file already exists.

    The golden file is meant to be committed to version control and shared with the team.

    Example:

    ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-restore","level":4,"title":"ctx permission restore","text":"

    Replace settings.local.json with the golden image.

    ctx permission restore\n

    Prints a diff of dropped (session-accumulated) and restored permissions. No-op if the files already match.

    Example:

    ctx permission restore\n# Dropped 3 session permission(s):\n#   - Bash(cat /tmp/debug.log:*)\n#   - Bash(rm /tmp/test-*:*)\n#   - Bash(curl https://example.com:*)\n# Restored from golden image.\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-reindex","level":3,"title":"ctx reindex","text":"

    Regenerate the quick-reference index for both DECISIONS.md and LEARNINGS.md in a single invocation.

    ctx reindex\n

    This is a convenience wrapper around ctx decision reindex and ctx learning reindex. Both files grow at similar rates and users typically want to reindex both after manual edits.

    The index is a compact table of date and title for each entry, allowing AI tools to scan entries without reading the full file.

    Example:

    ctx reindex\n# ✓ Index regenerated with 12 entries\n# ✓ Index regenerated with 8 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision","level":3,"title":"ctx decision","text":"

    Manage the DECISIONS.md file.

    ctx decision <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision-reindex","level":4,"title":"ctx decision reindex","text":"

    Regenerate the quick-reference index at the top of DECISIONS.md.

    ctx decision reindex\n

    The index is a compact table showing the date and title for each decision, allowing AI tools to quickly scan entries without reading the full file.

    Use this after manual edits to DECISIONS.md or when migrating existing files to use the index format.

    Example:

    ctx decision reindex\n# ✓ Index regenerated with 12 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning","level":3,"title":"ctx learning","text":"

    Manage the LEARNINGS.md file.

    ctx learning <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning-reindex","level":4,"title":"ctx learning reindex","text":"

    Regenerate the quick-reference index at the top of LEARNINGS.md.

    ctx learning reindex\n

    The index is a compact table showing the date and title for each learning, allowing AI tools to quickly scan entries without reading the full file.

    Use this after manual edits to LEARNINGS.md or when migrating existing files to use the index format.

    Example:

    ctx learning reindex\n# ✓ Index regenerated with 8 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/doctor/","level":1,"title":"Doctor","text":"","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#ctx-doctor","level":3,"title":"ctx doctor","text":"

    Structural health check across context, hooks, and configuration. Runs mechanical checks that don't require semantic analysis. Think of it as ctx status + ctx drift + configuration audit in one pass.

    ctx doctor [flags]\n

    Flags:

    Flag Short Type Default Description --json-j bool false Machine-readable JSON output","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-checks","level":4,"title":"What It Checks","text":"Check Category What it verifies Context initialized Structure .context/ directory exists Required files present Structure All required context files exist (TASKS.md, etc.) Drift detected Quality Stale paths, missing files, constitution violations Event logging status Hooks Whether event_log: true is set in .ctxrc Webhook configured Hooks .notify.enc file exists Pending reminders State Count of entries in reminders.json Task completion ratio State Pending vs completed tasks in TASKS.md Context token size Size Estimated token count across all context files Recent event activity Events Last event timestamp (only when event logging is enabled)","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-human","level":4,"title":"Output Format (Human)","text":"
    ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

    Status indicators:

    Icon Status Meaning ✓ ok Check passed ⚠ warning Non-critical issue worth fixing ✗ error Problem that needs attention ○ info Informational note","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-json","level":4,"title":"Output Format (JSON)","text":"
    {\n  \"results\": [\n    {\n      \"name\": \"context_initialized\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Context initialized (.context/)\"\n    },\n    {\n      \"name\": \"required_files\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Required files present (4/4)\"\n    },\n    {\n      \"name\": \"drift\",\n      \"category\": \"Quality\",\n      \"status\": \"warning\",\n      \"message\": \"Drift: 2 warnings\"\n    },\n    {\n      \"name\": \"event_logging\",\n      \"category\": \"Hooks\",\n      \"status\": \"info\",\n      \"message\": \"Event logging disabled (enable with event_log: true in .ctxrc)\"\n    },\n    {\n      \"name\": \"webhook\",\n      \"category\": \"Hooks\",\n      \"status\": \"ok\",\n      \"message\": \"Webhook configured\"\n    },\n    {\n      \"name\": \"reminders\",\n      \"category\": \"State\",\n      \"status\": \"ok\",\n      \"message\": \"No pending reminders\"\n    },\n    {\n      \"name\": \"task_completion\",\n      \"category\": \"State\",\n      \"status\": \"warning\",\n      \"message\": \"Tasks: 18/22 completed (82%): consider archiving with ctx task archive\"\n    },\n    {\n      \"name\": \"context_size\",\n      \"category\": \"Size\",\n      \"status\": \"ok\",\n      \"message\": \"Context size: ~4200 tokens (budget: 8000)\"\n    }\n  ],\n  \"warnings\": 2,\n  \"errors\": 0\n}\n

    Examples:

    # Quick structural health check\nctx doctor\n\n# Machine-readable output for scripting\nctx doctor --json\n\n# Count warnings\nctx doctor --json | jq '.warnings'\n\n# Check for errors only\nctx doctor --json | jq '[.results[] | select(.status == \"error\")]'\n
    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#when-to-use-what","level":4,"title":"When to Use What","text":"Tool When ctx status Quick glance at files, tokens, and drift ctx doctor Thorough structural checkup (hooks, config, events too) /ctx-doctor Agent-driven diagnosis with event log pattern analysis

    ctx status tells you what's there. ctx doctor tells you what's wrong. /ctx-doctor tells you why it's wrong and what to do about it.

    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-does-not-do","level":4,"title":"What It Does Not Do","text":"
    • No event pattern analysis: that's the /ctx-doctor skill's job
    • No auto-fixing: reports findings, doesn't modify anything
    • No external service checks: doesn't verify webhook endpoint availability

    See also: Troubleshooting | ctx hook event | /ctx-doctor skill | Detecting and Fixing Drift

    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/event/","level":1,"title":"Event","text":"","path":["Event"],"tags":[]},{"location":"cli/event/#ctx-hook-event","level":3,"title":"ctx hook event","text":"

    Query the local hook event log. Requires event_log: true in .ctxrc. Reads events from .context/state/events.jsonl and outputs them in a human-readable table or raw JSONL format.

    All filter flags combine with AND logic.

    ctx hook event [flags]\n

    Flags:

    Flag Description --hook Filter by hook name --session Filter by session ID --event Filter by event type (relay, nudge) --last Show last N events (default: 50) --json Output raw JSONL (for piping to jq) --all Include rotated log file

    Examples:

    ctx hook event                                        # recent events\nctx hook event --hook check-context-size --last 10    # one hook, last 10\nctx hook event --json | jq '.hook'                    # pipe to jq\nctx hook event --session abc123                       # filter by session\n
    ","path":["Event"],"tags":[]},{"location":"cli/guide/","level":1,"title":"Guide","text":"","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/guide/#ctx-guide","level":2,"title":"ctx guide","text":"

    Quick-reference cheat sheet for common ctx commands and skills.

    ctx guide [flags]\n

    Flags:

    Flag Description --skills Show available skills --commands Show available CLI commands

    Example:

    # Show the full cheat sheet\nctx guide\n\n# Skills only\nctx guide --skills\n\n# Commands only\nctx guide --commands\n

    Works without initialization (no .context/ required). Useful for a printable one-pager when onboarding to a project.

    ","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/hook/","level":1,"title":"Hook","text":"","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#ctx-hook","level":3,"title":"ctx hook","text":"

    Manage hook-related settings: messages, notifications, pause/resume, and event log.

    ctx hook <subcommand> [flags]\n
    ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#subcommands","level":2,"title":"Subcommands","text":"Subcommand Description ctx hook message list Show all hook messages with override status ctx hook message show <h> <v> Print the effective message template ctx hook message edit <h> <v> Copy default to .context/ for editing ctx hook message reset <h> <v> Delete user override, revert to default ctx hook notify [message] Send a webhook notification ctx hook notify setup Configure and encrypt webhook URL ctx hook notify test Send a test notification ctx hook pause Pause all context hooks for this session ctx hook resume Resume paused context hooks ctx hook event Query the local hook event log","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#examples","level":2,"title":"Examples","text":"
    # View and manage hook messages\nctx hook message list\nctx hook message show qa-reminder gate\nctx hook message edit qa-reminder gate\n\n# Webhook notifications\nctx hook notify setup\nctx hook notify --event loop \"Loop completed\"\n\n# Pause/resume hooks\nctx hook pause\nctx hook resume\n\n# Browse event log\nctx hook event --last 20\nctx hook event --hook qa-reminder --json\n

    See also: Customizing Hook Messages | Webhook Notifications | Pausing Context Hooks | System Hooks Audit

    ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hub/","level":1,"title":"Hub","text":"","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub","level":2,"title":"ctx hub","text":"

    Operator commands for a ctx Hub — the gRPC server that fans out decisions, learnings, conventions, and tasks across projects. Use ctx hub to start and stop the server, inspect cluster state, add or remove peers at runtime, and hand off leadership before maintenance.

    Who Needs This Page

    You only need ctx hub if you are running a hub server or cluster. For client-side operations (register, subscribe, sync, publish, listen), see ctx connect. For the mental model behind the hub as a whole, read the ctx Hub overview.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-start","level":3,"title":"ctx hub start","text":"

    Start the hub gRPC server.

    Examples:

    ctx hub start                           # Foreground, default port 9900\nctx hub start --port 8080               # Custom port\nctx hub start --data-dir /srv/ctx-hub   # Custom data directory\n

    On first run, generates an admin token and prints it to stdout. Save this token — it's required for ctx connection register in client projects. Subsequent runs reuse the stored token from <data-dir>/admin.token.

    Default data directory: ~/.ctx/hub-data/

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#daemon-mode","level":4,"title":"Daemon Mode","text":"

    Run the hub as a detached background process:

    ctx hub start --daemon          # Fork to background\nctx hub stop                    # Graceful shutdown\n

    The daemon writes a PID file to <data-dir>/hub.pid. Stop the daemon with ctx hub stop (see below).

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#cluster-mode","level":4,"title":"Cluster Mode","text":"

    For high availability, run multiple hubs with Raft-based leader election:

    ctx hub start --port 9900 \\\n  --peers host2:9901,host3:9901\n

    Raft is used only for leader election. Data replication uses sequence-based gRPC sync on the append-only JSONL log — there is no multi-node consensus on writes. See the HA cluster recipe for the full setup and the Raft-lite durability caveat.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#flags","level":4,"title":"Flags","text":"Flag Description Default --port Hub listen port 9900--data-dir Hub data directory ~/.ctx/hub-data/--daemon Run the hub server in the background false--peers Comma-separated peer addresses for cluster mode (none)","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#validation","level":4,"title":"Validation","text":"

    The hub validates every published entry before accepting it:

    • Type must be one of decision, learning, convention, task
    • ID and Origin are required and non-empty
    • Content size capped at 1 MB (text-only)
    • Duplicate project registration is rejected (one token per project)
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stop","level":3,"title":"ctx hub stop","text":"

    Stop a running hub daemon.

    Examples:

    ctx hub stop                            # Stop using default data dir\nctx hub stop --data-dir /srv/ctx-hub    # Custom data directory\n

    Sends SIGTERM to the PID recorded in <data-dir>/hub.pid, waits for in-flight RPCs to drain, and removes the PID file. Safe to rerun — if no daemon is running, returns a \"no running hub\" error without side effects.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-status","level":3,"title":"ctx hub status","text":"

    Show cluster status: role, peers, sync state, entry count, and uptime.

    Examples:

    ctx hub status\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-peer","level":3,"title":"ctx hub peer","text":"

    Add or remove peers from the cluster at runtime. Useful for scaling up or replacing a decommissioned node without restarting the leader.

    Examples:

    ctx hub peer add host2:9901\nctx hub peer remove host2:9901\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stepdown","level":3,"title":"ctx hub stepdown","text":"

    Transfer leadership to another node gracefully. Triggers a new election among the remaining followers before the current leader steps down. Use before taking the leader offline for maintenance.

    Examples:

    ctx hub stepdown\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#see-also","level":3,"title":"See Also","text":"
    • ctx connect — client-side commands (register, subscribe, sync, publish, listen)
    • ctx Hub overview — mental model and user stories
    • ctx Hub: Getting Started
    • Hub operations — production deployment, backup, monitoring
    • Hub failure modes
    • Hub security model
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/init-status/","level":1,"title":"Init and Status","text":"","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-init","level":3,"title":"ctx init","text":"

    Initialize a new .context/ directory with template files.

    ctx init [flags]\n

    Flags:

    Flag Short Description --force-f Overwrite existing context files --minimal-m Only create essential files (TASKS.md, DECISIONS.md, CONSTITUTION.md) --merge Auto-merge ctx content into existing CLAUDE.md

    Creates:

    • .context/ directory with all template files
    • .claude/settings.local.json with pre-approved ctx permissions
    • CLAUDE.md with bootstrap instructions (or merges into existing)

    Claude Code hooks and skills are provided by the ctx plugin (see Integrations).

    Example:

    # Standard init\nctx init\n\n# Minimal setup (just core files)\nctx init --minimal\n\n# Force overwrite existing\nctx init --force\n\n# Merge into existing files\nctx init --merge\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-status","level":3,"title":"ctx status","text":"

    Show the current context summary.

    ctx status [flags]\n

    Flags:

    Flag Short Description --json Output as JSON --verbose-v Include file contents summary

    Output:

    • Context directory path
    • Total files and token estimate
    • Status of each file (loaded, empty, missing)
    • Recent activity (modification times)
    • Drift warnings if any

    Example:

    ctx status\nctx status --json\nctx status --verbose\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-agent","level":3,"title":"ctx agent","text":"

    Print an AI-ready context packet optimized for LLM consumption.

    ctx agent [flags]\n

    Flags:

    Flag Default Description --budget 8000 Token budget: controls content selection and prioritization --format md Output format: md or json--cooldown 10m Suppress repeated output within this duration (requires --session) --session (none) Session ID for cooldown isolation (e.g., $PPID) --include-hub false Include hub entries from .context/hub/

    How budget works:

    The budget controls how much context is included. Entries are selected in priority tiers:

    1. Constitution: always included in full (inviolable rules)
    2. Tasks: all active tasks, up to 40% of budget
    3. Conventions: all conventions, up to 20% of budget
    4. Decisions: scored by recency and relevance to active tasks
    5. Learnings: scored by recency and relevance to active tasks
    6. Steering: applicable steering file bodies, scored by their inclusion mode and description match against the active prompt
    7. Skill: named skill content (from --skill)
    8. Hub: entries from .context/hub/ (with --include-hub, see ctx connect)

    Decisions and learnings are ranked by a combined score (how recent + how relevant to your current tasks). High-scoring entries are included with their full body. Entries that don't fit get title-only summaries in an \"Also Noted\" section. Superseded entries are excluded.

    Output Sections:

    Section Source Selection Read These Files all .context/ Non-empty files in priority order Constitution CONSTITUTION.md All rules (never truncated) Current Tasks TASKS.md All unchecked tasks (budget-capped) Key Conventions CONVENTIONS.md All items (budget-capped) Recent Decisions DECISIONS.md Full body, scored by relevance Key Learnings LEARNINGS.md Full body, scored by relevance Also Noted overflow Title-only summaries

    Example:

    # Default (8000 tokens, markdown)\nctx agent\n\n# Smaller packet for tight context windows\nctx agent --budget 4000\n\n# JSON format for programmatic use\nctx agent --format json\n\n# Pipe to file\nctx agent --budget 4000 > context.md\n\n# With cooldown (hooks/automation: requires --session)\nctx agent --session $PPID\n

    Use case: Copy-paste into AI chat, pipe to system prompt, or use in hooks.

    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-load","level":3,"title":"ctx load","text":"

    Load and display assembled context as AI would see it.

    ctx load [flags]\n

    Flags:

    Flag Description --budget <tokens> Token budget for assembly (default: 8000) --raw Output raw file contents without assembly

    Example:

    ctx load\nctx load --budget 16000\nctx load --raw\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/journal/","level":1,"title":"Journal","text":"","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal","level":3,"title":"ctx journal","text":"

    Browse and search AI session history from Claude Code and other tools.

    ctx journal <subcommand>\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source","level":4,"title":"ctx journal source","text":"

    List all parsed sessions.

    ctx journal source [flags]\n

    Flags:

    Flag Short Description --limit-n Maximum sessions to display (default: 20) --project-p Filter by project name --tool-t Filter by tool (e.g., claude-code) --all-projects Include sessions from all projects

    Sessions are sorted by date (newest first) and display slug, project, start time, duration, turn count, and token usage.

    Example:

    ctx journal source\nctx journal source --limit 5\nctx journal source --project ctx\nctx journal source --tool claude-code\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source-show","level":4,"title":"ctx journal source --show","text":"

    Show details of a specific session.

    ctx journal source --show [session-id] [flags]\n

    Flags:

    Flag Description --latest Show the most recent session --full Show full message content --all-projects Search across all projects

    The session ID can be a full UUID, partial match, or session slug name.

    Example:

    ctx journal source --show abc123\nctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show --latest\nctx journal source --show --latest --full\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-import","level":4,"title":"ctx journal import","text":"

    Import sessions to editable journal files in .context/journal/.

    ctx journal import [session-id] [flags]\n

    Flags:

    Flag Description --all Import all sessions (only new files by default) --all-projects Import from all projects --regenerate Re-import existing files (preserves YAML frontmatter by default) --keep-frontmatter Preserve enriched YAML frontmatter during regeneration (default: true) --yes, -y Skip confirmation prompt --dry-run Show what would be imported without writing files

    Safe by default: --all only imports new sessions. Existing files are skipped. Use --regenerate to re-import existing files (conversation content is regenerated, YAML frontmatter from enrichment is preserved by default). Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

    Locked entries (via ctx journal lock) are always skipped, regardless of flags.

    Single-session import (ctx journal import <id>) always writes without prompting, since you are explicitly targeting one session.

    The journal/ directory should be gitignored (like sessions/) since it contains raw conversation data.

    Example:

    ctx journal import abc123                 # Import one session\nctx journal import --all                  # Import only new sessions\nctx journal import --all --dry-run        # Preview what would be imported\nctx journal import --all --regenerate     # Re-import existing (prompts)\nctx journal import --all --regenerate -y  # Re-import without prompting\nctx journal import --all --regenerate --keep-frontmatter=false -y  # Discard frontmatter\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-lock","level":4,"title":"ctx journal lock","text":"

    Protect journal entries from being overwritten by import --regenerate or modified by enrichment skills (/ctx-journal-enrich, /ctx-journal-enrich-all).

    ctx journal lock <pattern> [flags]\n

    Flags:

    Flag Description --all Lock all journal entries

    The pattern matches filenames by slug, date, or short ID. Locking a multi-part entry locks all parts. The lock is recorded in .context/journal/.state.json and a locked: true line is added to the file's YAML frontmatter for visibility.

    Example:

    ctx journal lock abc12345\nctx journal lock 2026-01-21-session-abc12345.md\nctx journal lock --all\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-unlock","level":4,"title":"ctx journal unlock","text":"

    Remove lock protection from journal entries.

    ctx journal unlock <pattern> [flags]\n

    Flags:

    Flag Description --all Unlock all journal entries

    Example:

    ctx journal unlock abc12345\nctx journal unlock --all\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-sync","level":4,"title":"ctx journal sync","text":"

    Sync lock state from journal frontmatter to .state.json.

    ctx journal sync\n

    Scans all journal markdowns and updates .state.json to match each file's frontmatter. Files with locked: true in frontmatter are marked locked in state; files without a locked: line have their lock cleared.

    This is the inverse of ctx journal lock: instead of state driving frontmatter, frontmatter drives state. Useful after batch enrichment where you add locked: true to frontmatter manually.

    Example:

    # After enriching entries and adding locked: true to frontmatter\nctx journal sync\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal_1","level":3,"title":"ctx journal","text":"

    Analyze and synthesize imported session files.

    ctx journal <subcommand>\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-site","level":4,"title":"ctx journal site","text":"

    Generate a static site from journal entries in .context/journal/.

    ctx journal site [flags]\n

    Flags:

    Flag Short Description --output-o Output directory (default: .context/journal-site) --build Run zensical build after generating --serve Run zensical serve after generating

    Creates a zensical-compatible site structure with an index page listing all sessions by date, and individual pages for each journal entry.

    Requires zensical to be installed for --build or --serve:

    pipx install zensical\n

    Example:

    ctx journal site                    # Generate in .context/journal-site/\nctx journal site --output ~/public  # Custom output directory\nctx journal site --build            # Generate and build HTML\nctx journal site --serve            # Generate and serve locally\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-obsidian","level":4,"title":"ctx journal obsidian","text":"

    Generate an Obsidian vault from journal entries in .context/journal/.

    ctx journal obsidian [flags]\n

    Flags:

    Flag Short Description --output-o Output directory (default: .context/journal-obsidian)

    Creates an Obsidian-compatible vault with:

    • Wikilinks ([[target|display]]) for all internal navigation
    • MOC pages (Map of Content) for topics, key files, and session types
    • Related sessions footer linking entries that share topics
    • Transformed frontmatter (topicstags for Obsidian integration)
    • Minimal .obsidian/ config enforcing wikilink mode

    No external dependencies are required: Open the output directory as an Obsidian vault directly.

    Example:

    ctx journal obsidian                        # Generate in .context/journal-obsidian/\nctx journal obsidian --output ~/vaults/ctx  # Custom output directory\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-check","level":4,"title":"ctx journal schema check","text":"

    Validate JSONL session files against the embedded schema and report drift.

    ctx journal schema check [flags]\n

    Flags:

    Flag Short Description --dir Directory to scan for JSONL files --all-projects Scan all Claude Code project directories --quiet-q Exit code only (0 = clean, 1 = drift)

    Scans JSONL files for unknown fields, missing required fields, unknown record types, and unknown content block types. When drift is found, writes a Markdown report to .context/reports/schema-drift.md. When drift resolves, the report is automatically deleted.

    Designed for interactive use, CI pipelines, and nightly cron jobs.

    Example:

    ctx journal schema check                    # Current project\nctx journal schema check --all-projects     # All projects\nctx journal schema check --quiet            # Exit code only\nctx journal schema check --dir /path/to     # Custom directory\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-dump","level":4,"title":"ctx journal schema dump","text":"

    Print the embedded JSONL schema definition.

    ctx journal schema dump\n

    Shows all known record types with their required and optional fields, and all recognized content block types with their parse status. Useful for inspecting what the schema validator expects.

    Example:

    ctx journal schema dump\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-serve","level":3,"title":"ctx serve","text":"

    Serve any zensical directory locally. This is a serve-only command: It does not generate or regenerate site content.

    ctx serve [directory]\n

    If no directory is specified, defaults to the journal site (.context/journal-site).

    Requires zensical to be installed:

    pipx install zensical\n

    ctx serve vs. ctx journal site --serve

    ctx journal site --serve generates the journal site then serves it: an all-in-one command. ctx serve only serves an existing directory, and works with any zensical site (journal, docs, etc.).

    Example:

    ctx serve                        # Serve journal site (no regeneration)\nctx serve .context/journal-site  # Same, explicit path\nctx serve ./site                 # Serve the docs site\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/loop/","level":1,"title":"Loop","text":"","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/loop/#ctx-loop","level":2,"title":"ctx loop","text":"

    Generate a shell script for running an autonomous loop.

    An autonomous loop continuously runs an AI assistant with the same prompt until a completion signal is detected, enabling iterative development where the AI builds on its previous work.

    ctx loop [flags]\n

    Flags:

    Flag Short Description Default --tool <tool>-t AI tool: claude, aider, or genericclaude--prompt <file>-p Prompt file to use .context/loop.md--max-iterations <n>-n Maximum iterations (0 = unlimited) 0--completion <signal>-c Completion signal to detect SYSTEM_CONVERGED--output <file>-o Output script filename loop.sh

    Examples:

    # Generate loop.sh for Claude Code\nctx loop\n\n# Generate for Aider with custom prompt\nctx loop --tool aider --prompt TASKS.md\n\n# Limit to 10 iterations\nctx loop --max-iterations 10\n\n# Output to custom file\nctx loop -o my-loop.sh\n

    Running the generated loop:

    ctx loop\nchmod +x loop.sh\n./loop.sh\n

    See also: Autonomous Loops for the full workflow.

    ","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/mcp/","level":1,"title":"MCP Server","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp","level":2,"title":"ctx mcp","text":"

    Run ctx as a Model Context Protocol (MCP) server. MCP is a standard protocol that lets AI tools discover and consume context from external sources via JSON-RPC 2.0 over stdin/stdout.

    This makes ctx accessible to any MCP-compatible AI tool without custom hooks or integrations:

    • Claude Desktop
    • Cursor
    • Windsurf
    • VS Code Copilot
    • Any tool supporting MCP
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp-serve","level":3,"title":"ctx mcp serve","text":"

    Start the MCP server. This command reads JSON-RPC 2.0 requests from stdin and writes responses to stdout. It is intended to be launched by MCP clients (Claude Desktop, Cursor, VS Code Copilot), not run directly from a shell. See Configuration below for how each host launches it.

    Flags: None. The server uses the configured context directory (from --context-dir, CTX_DIR, .ctxrc, or the default .context).

    Examples:

    # Normal invocation (by an MCP client via stdio transport)\nctx mcp serve\n\n# Pin a context directory for a specific workspace\nctx --context-dir /path/to/project/.context mcp serve\n\n# Verify the binary starts without a client attached (Ctrl-C to exit)\nctx mcp serve < /dev/null\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#configuration","level":2,"title":"Configuration","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#claude-desktop","level":3,"title":"Claude Desktop","text":"

    Add to ~/Library/Application Support/Claude/claude_desktop_config.json:

    {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#cursor","level":3,"title":"Cursor","text":"

    Add to .cursor/mcp.json in your project:

    {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#vs-code-copilot","level":3,"title":"VS Code (Copilot)","text":"

    Add to .vscode/mcp.json:

    {\n  \"servers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resources","level":2,"title":"Resources","text":"

    Resources expose context files as read-only content. Each resource has a URI, name, and returns Markdown text.

    URI Name Description ctx://context/constitution constitution Hard rules that must never be violated ctx://context/tasks tasks Current work items and their status ctx://context/conventions conventions Code patterns and standards ctx://context/architecture architecture System architecture documentation ctx://context/decisions decisions Architectural decisions with rationale ctx://context/learnings learnings Gotchas, tips, and lessons learned ctx://context/glossary glossary Project-specific terminology ctx://context/agent agent All files assembled in priority read order

    The agent resource assembles all non-empty context files into a single Markdown document, ordered by the configured read priority.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resource-subscriptions","level":3,"title":"Resource Subscriptions","text":"

    Clients can subscribe to resource changes via resources/subscribe. The server polls for file mtime changes (default: 5 seconds) and emits notifications/resources/updated when a subscribed file changes on disk.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#tools","level":2,"title":"Tools","text":"

    Tools expose ctx commands as callable operations. Each tool accepts JSON arguments and returns text results.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_status","level":3,"title":"ctx_status","text":"

    Show context health: file count, token estimate, and per-file summary.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_add","level":3,"title":"ctx_add","text":"

    Add a task, decision, learning, or convention to the context.

    Argument Type Required Description type string Yes Entry type: task, decision, learning, convention content string Yes Title or main content priority string No Priority level (tasks only): high, medium, low context string Conditional Context field (decisions and learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_complete","level":3,"title":"ctx_complete","text":"

    Mark a task as done by number or text match.

    Argument Type Required Description query string Yes Task number (e.g. \"1\") or search text","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_drift","level":3,"title":"ctx_drift","text":"

    Detect stale or invalid context. Returns violations, warnings, and passed checks.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_journal_source","level":3,"title":"ctx_journal_source","text":"

    Query recent AI session history (summaries, decisions, topics).

    Argument Type Required Description limit number No Max sessions to return (default: 5) since string No ISO date filter: sessions after this date (YYYY-MM-DD)

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_watch_update","level":3,"title":"ctx_watch_update","text":"

    Apply a structured context update to .context/ files. Supports task, decision, learning, convention, and complete entry types. Human confirmation is required before calling.

    Argument Type Required Description type string Yes Entry type: task, decision, learning, convention, complete content string Yes Main content context string Conditional Context background (decisions/learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_compact","level":3,"title":"ctx_compact","text":"

    Move completed tasks to the archive section and remove empty sections from context files. Human confirmation required.

    Argument Type Required Description archive boolean No Also write tasks to .context/archive/ (default: false)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_next","level":3,"title":"ctx_next","text":"

    Suggest the next pending task based on priority and position.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_check_task_completion","level":3,"title":"ctx_check_task_completion","text":"

    Advisory check: after a write operation, detect if any pending tasks were silently completed. Returns nudge text if a match is found.

    Argument Type Required Description recent_action string No Brief description of what was just done

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_event","level":3,"title":"ctx_session_event","text":"

    Signal a session lifecycle event. Type end triggers the session-end persistence ceremony - human confirmation required.

    Argument Type Required Description type string Yes Event type: start, end caller string No Caller identifier (cursor, windsurf, vscode, claude-desktop)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_steering_get","level":3,"title":"ctx_steering_get","text":"

    Retrieve applicable steering files for a prompt. Without a prompt, returns always-included files only.

    Argument Type Required Description prompt string No Prompt text to match against steering file descriptions

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_search","level":3,"title":"ctx_search","text":"

    Search across .context/ files for a query string. Returns matching lines with file paths and line numbers.

    Argument Type Required Description query string Yes Search string to match against

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_start","level":3,"title":"ctx_session_start","text":"

    Execute session-start hooks and return aggregated context from hook outputs.

    Arguments: None.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_end","level":3,"title":"ctx_session_end","text":"

    Execute session-end hooks with an optional summary. Returns aggregated context from hook outputs.

    Argument Type Required Description summary string No Session summary passed to hook scripts","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_remind","level":3,"title":"ctx_remind","text":"

    List pending session-scoped reminders.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#prompts","level":2,"title":"Prompts","text":"

    Prompts provide pre-built templates for common workflows. Clients can list available prompts via prompts/list and retrieve a specific prompt via prompts/get.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-session-start","level":3,"title":"ctx-session-start","text":"

    Load full context at the beginning of a session. Returns all context files assembled in priority read order with session orientation instructions.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-decision-add","level":3,"title":"ctx-decision-add","text":"

    Format an architectural decision entry with all required fields.

    Argument Type Required Description content string Yes Decision title context string Yes Background context rationale string Yes Why this decision was made consequence string Yes Expected consequence","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-learning-add","level":3,"title":"ctx-learning-add","text":"

    Format a learning entry with all required fields.

    Argument Type Required Description content string Yes Learning title context string Yes Background context lesson string Yes The lesson learned application string Yes How to apply this lesson","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-reflect","level":3,"title":"ctx-reflect","text":"

    Guide end-of-session reflection. Returns a structured review prompt covering progress assessment and context update recommendations.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-checkpoint","level":3,"title":"ctx-checkpoint","text":"

    Report session statistics: tool calls made, entries added, and pending updates queued during the current session.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/memory/","level":1,"title":"Memory","text":"","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory","level":2,"title":"ctx memory","text":"

    Bridge Claude Code's auto memory (MEMORY.md) into .context/.

    Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This command group discovers that file, mirrors it into .context/memory/mirror.md (git-tracked), and detects drift.

    ctx memory <subcommand>\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-sync","level":3,"title":"ctx memory sync","text":"

    Copy MEMORY.md to .context/memory/mirror.md. Archives the previous mirror before overwriting.

    ctx memory sync [flags]\n

    Flags:

    Flag Description --dry-run Show what would happen without writing

    Exit codes:

    Code Meaning 0 Synced successfully 1 MEMORY.md not found (auto memory inactive)

    Examples:

    ctx memory sync\n# Archived previous mirror to mirror-2026-03-05-143022.md\n# Synced MEMORY.md -> .context/memory/mirror.md\n#   Source: ~/.claude/projects/-home-user-project/memory/MEMORY.md\n#   Lines: 47 (was 32)\n#   New content: 15 lines since last sync\n\nctx memory sync --dry-run\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-status","level":3,"title":"ctx memory status","text":"

    Show drift, timestamps, line counts, and archive count.

    ctx memory status\n

    Exit codes:

    Code Meaning 0 No drift 1 MEMORY.md not found 2 Drift detected (MEMORY.md changed since sync)

    Examples:

    ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines (modified since last sync)\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-diff","level":3,"title":"ctx memory diff","text":"

    Show what changed in MEMORY.md since last sync.

    ctx memory diff\n

    Examples:

    ctx memory diff\n# --- .context/memory/mirror.md (mirror)\n# +++ ~/.claude/projects/.../memory/MEMORY.md (source)\n# +- new learning: memory bridge works\n

    No output when files are identical.

    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-publish","level":3,"title":"ctx memory publish","text":"

    Push curated .context/ content into MEMORY.md so the agent sees it natively.

    ctx memory publish [flags]\n

    Content is selected in priority order: pending tasks, recent decisions (7 days), key conventions, recent learnings (7 days). Wrapped in <!-- ctx:published --> markers. Claude-owned content outside the markers is preserved.

    Flags:

    Flag Description Default --budget Line budget for published content 80--dry-run Show what would be published

    Examples:

    ctx memory publish --dry-run\n# Publishing .context/ -> MEMORY.md...\n#   Budget: 80 lines\n#   Published block:\n#     5 pending tasks (from TASKS.md)\n#     3 recent decisions (from DECISIONS.md)\n#     5 key conventions (from CONVENTIONS.md)\n#   Total: 42 lines (within 80-line budget)\n# Dry run - no files written.\n\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter budget\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-unpublish","level":3,"title":"ctx memory unpublish","text":"

    Remove the ctx-managed marker block from MEMORY.md, preserving Claude-owned content.

    Examples:

    ctx memory unpublish\n

    Hook integration: The check-memory-drift hook runs on every prompt and nudges the agent when MEMORY.md has changed since last sync. The nudge fires once per session. See Memory Bridge.

    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-import","level":3,"title":"ctx memory import","text":"

    Classify and promote entries from MEMORY.md into structured .context/ files.

    ctx memory import [flags]\n

    Each entry is classified by keyword heuristics:

    Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

    Deduplication prevents re-importing the same entry across runs.

    Flags:

    Flag Description --dry-run Show classification plan without writing

    Examples:

    ctx memory import --dry-run\n# Scanning MEMORY.md for new entries...\n#   Found 6 entries\n#\n#   -> \"always use ctx from PATH\"\n#      Classified: CONVENTIONS.md (keywords: always use)\n#\n#   -> \"decided to use heuristic classification over LLM-based\"\n#      Classified: DECISIONS.md (keywords: decided)\n#\n# Dry run - would import: 4 entries\n# Skipped: 2 entries (session notes/unclassified)\n\nctx memory import    # Actually write entries to .context/ files\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/message/","level":1,"title":"Message","text":"","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message","level":3,"title":"ctx hook message","text":"

    Manage hook message templates.

    Hook messages control the text hooks emit. The hook logic (when to fire, counting, state tracking) is universal; the messages are opinions that can be customized per-project.

    ctx hook message <subcommand>\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-list","level":3,"title":"ctx hook message list","text":"

    Show all hook messages with category and override status.

    ctx hook message list [--json]\n

    Flags:

    Flag Description --json Output in JSON format

    Example:

    ctx hook message list\nctx hook message list --json | jq '.[] | select(.override)'\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-show","level":3,"title":"ctx hook message show","text":"

    Print the effective message template for a hook/variant pair. Shows the user override if present, otherwise the embedded default.

    ctx hook message show <hook> <variant>\n

    Example:

    ctx hook message show qa-reminder gate\nctx hook message show check-context-size checkpoint\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-edit","level":3,"title":"ctx hook message edit","text":"

    Copy the embedded default template for <hook> <variant> to .context/hooks/messages/<hook>/<variant>.txt so you can edit it directly. The override takes effect the next time the hook fires.

    ctx hook message edit <hook> <variant>\n

    If an override already exists, the command fails and directs you to edit it in place or reset it first.

    Example:

    ctx hook message edit qa-reminder gate\n# Edit .context/hooks/messages/qa-reminder/gate.txt in your editor\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-reset","level":3,"title":"ctx hook message reset","text":"

    Delete a user override and revert to the embedded default. Silent no-op if no override exists.

    ctx hook message reset <hook> <variant>\n

    Example:

    ctx hook message reset qa-reminder gate\n

    See Customizing hook messages for the full workflow.

    ","path":["Message"],"tags":[]},{"location":"cli/notify/","level":1,"title":"Notify","text":"","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify","level":2,"title":"ctx hook notify","text":"

    Send fire-and-forget webhook notifications from skills, loops, and hooks.

    ctx hook notify --event <name> [--session-id <id>] \"message\"\n

    Flags:

    Flag Short Description --event-e Event name (required) --session-id-s Session ID (optional)

    Behavior:

    • No webhook configured: silent no-op (exit 0)
    • Webhook set but event not in events list: silent no-op (exit 0)
    • Webhook set and event matches: fire-and-forget HTTP POST
    • HTTP errors silently ignored (no retry)

    Examples:

    ctx hook notify --event loop \"Loop completed after 5 iterations\"\nctx hook notify -e nudge -s session-abc \"Context checkpoint at prompt #20\"\n
    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-setup","level":3,"title":"ctx hook notify setup","text":"

    Configure the webhook URL interactively. The URL is encrypted with AES-256-GCM using the encryption key and stored in .context/.notify.enc.

    Examples:

    ctx hook notify setup\n

    The encrypted file is safe to commit. The key (~/.ctx/.ctx.key) lives outside the project and is never committed.

    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-test","level":3,"title":"ctx hook notify test","text":"

    Send a test notification and report the HTTP response status.

    Examples:

    ctx hook notify test\n

    Payload format (JSON POST):

    {\n  \"event\": \"loop\",\n  \"message\": \"Loop completed after 5 iterations\",\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n
    Field Type Description event string Event name from --event flag message string Notification message session_id string Session ID (omitted if empty) timestamp string UTC RFC3339 timestamp project string Project directory name

    See also: Webhook Notifications recipe.

    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/pad/","level":1,"title":"Scratchpad","text":"","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad","level":2,"title":"ctx pad","text":"

    Encrypted scratchpad for sensitive one-liners that travel with the project.

    When invoked without a subcommand, lists all entries.

    ctx pad\nctx pad <subcommand>\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-add","level":3,"title":"ctx pad add","text":"

    Append a new entry to the scratchpad.

    ctx pad add <text>\nctx pad add <label> --file <path>\n

    Flags:

    Flag Short Description --file-f Ingest a file as a blob entry (max 64 KB)

    Examples:

    ctx pad add \"DATABASE_URL=postgres://user:pass@host/db\"\nctx pad add \"deploy config\" --file ./deploy.yaml\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-show","level":3,"title":"ctx pad show","text":"

    Output the raw text of an entry by number. For blob entries, prints decoded file content (or writes to disk with --out).

    ctx pad show <n>\nctx pad show <n> --out <path>\n

    Arguments:

    • n: 1-based entry number

    Flags:

    Flag Description --out Write decoded blob content to a file (blobs only)

    Examples:

    ctx pad show 3\nctx pad show 2 --out ./recovered.yaml\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-rm","level":3,"title":"ctx pad rm","text":"

    Remove one or more entries by stable ID. Supports individual IDs and ranges.

    ctx pad rm <id> [id...]\n

    Arguments:

    • id: One or more entry IDs (e.g., 3, 1 4, 3-5)

    Examples:

    ctx pad rm 2\nctx pad rm 1 4\nctx pad rm 3-5\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-normalize","level":3,"title":"ctx pad normalize","text":"

    Reassign entry IDs as a contiguous sequence 1..N, closing any gaps left by deletions.

    Examples:

    ctx pad normalize\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-edit","level":3,"title":"ctx pad edit","text":"

    Replace, append to, or prepend to an entry.

    ctx pad edit <n> [text]\n

    Arguments:

    • n: 1-based entry number
    • text: Replacement text (mutually exclusive with --append/--prepend)

    Flags:

    Flag Description --append Append text to the end of the entry --prepend Prepend text to the beginning of entry --file Replace blob file content (preserves label) --label Replace blob label (preserves content)

    Examples:

    ctx pad edit 2 \"new text\"\nctx pad edit 2 --append \" suffix\"\nctx pad edit 2 --prepend \"prefix \"\nctx pad edit 1 --file ./v2.yaml\nctx pad edit 1 --label \"new name\"\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-mv","level":3,"title":"ctx pad mv","text":"

    Move an entry from one position to another.

    ctx pad mv <from> <to>\n

    Arguments:

    • from: Source position (1-based)
    • to: Destination position (1-based)

    Examples:

    ctx pad mv 3 1      # promote entry 3 to the top\nctx pad mv 1 5      # bury entry 1 to position 5\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-resolve","level":3,"title":"ctx pad resolve","text":"

    Show both sides of a merge conflict in the encrypted scratchpad.

    Examples:

    ctx pad resolve\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-import","level":3,"title":"ctx pad import","text":"

    Bulk-import lines from a file into the scratchpad. Each non-empty line becomes a separate entry. All entries are written in a single encrypt/write cycle.

    With --blob, import all first-level files from a directory as blob entries. Each file becomes a blob with the filename as its label. Subdirectories and non-regular files are skipped.

    ctx pad import <file>\nctx pad import -              # read from stdin\nctx pad import --blob <dir>   # import directory files as blobs\n

    Arguments:

    • file: Path to a text file, - for stdin, or a directory (with --blob)

    Flags:

    Flag Description --blob Import first-level files from a directory as blobs

    Examples:

    ctx pad import notes.txt\ngrep TODO *.go | ctx pad import -\nctx pad import --blob ./ideas/\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-export","level":3,"title":"ctx pad export","text":"

    Export all blob entries from the scratchpad to a directory as files. Each blob's label becomes the filename. Non-blob entries are skipped.

    ctx pad export [dir]\n

    Arguments:

    • dir: Target directory (default: current directory)

    Flags:

    Flag Short Description --force-f Overwrite existing files instead of timestamping --dry-run Print what would be exported without writing

    When a file already exists, a unix timestamp is prepended to avoid collisions (e.g., 1739836200-label). Use --force to overwrite instead.

    Examples:

    ctx pad export ./ideas\nctx pad export --dry-run\nctx pad export --force ./backup\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-merge","level":3,"title":"ctx pad merge","text":"

    Merge entries from one or more scratchpad files into the current pad. Each input file is auto-detected as encrypted or plaintext. Entries are deduplicated by exact content.

    ctx pad merge FILE...\n

    Arguments:

    • FILE...: One or more scratchpad files to merge (encrypted or plaintext)

    Flags:

    Flag Short Description --key-k Path to key file for decrypting input files --dry-run Print what would be merged without writing

    Examples:

    ctx pad merge worktree/.context/scratchpad.enc\nctx pad merge notes.md backup.enc\nctx pad merge --key /path/to/other.key foreign.enc\nctx pad merge --dry-run pad-a.enc pad-b.md\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pause/","level":1,"title":"Pause","text":"","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/pause/#ctx-hook-pause","level":2,"title":"ctx hook pause","text":"

    Pause all context nudge and reminder hooks for the current session. Security hooks (dangerous command blocking) and housekeeping hooks still fire.

    ctx hook pause [flags]\n

    Flags:

    Flag Description --session-id Session ID (overrides stdin)

    Example:

    # Pause hooks for a quick investigation\nctx hook pause\n\n# Resume when ready\nctx hook resume\n

    See also:

    • ctx hook resume — the matching resume command
    • Pausing Context Hooks recipe
    ","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/prune/","level":1,"title":"Prune","text":"","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/prune/#ctx-prune","level":3,"title":"ctx prune","text":"

    Remove per-session state files from .context/state/ that are older than the specified age. Session state files are identified by UUID suffixes (context-check-<session-id>, heartbeat-<session-id>, and similar). Global files without session IDs (events.jsonl, memory-import.json, and other non-per-session markers) are always preserved.

    ctx prune [flags]\n

    Flags:

    Flag Description --days Prune files older than this many days (default: 7) --dry-run Show what would be pruned without deleting

    Examples:

    ctx prune                 # Prune files older than 7 days\nctx prune --days 3        # Prune files older than 3 days\nctx prune --dry-run       # Preview without deleting\n

    See State maintenance for the recommended cadence and automation recipe.

    ","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/remind/","level":1,"title":"Remind","text":"","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind","level":2,"title":"ctx remind","text":"

    Session-scoped reminders that surface at session start. Reminders are stored verbatim and relayed verbatim: no summarization, no categories.

    When invoked with a text argument and no subcommand, adds a reminder.

    ctx remind \"text\"\nctx remind <subcommand>\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-add","level":3,"title":"ctx remind add","text":"

    Add a reminder. This is the default action: ctx remind \"text\" and ctx remind add \"text\" are equivalent.

    ctx remind \"refactor the swagger definitions\"\nctx remind add \"check CI after the deploy\" --after 2026-02-25\n

    Arguments:

    • text: The reminder message (verbatim)

    Flags:

    Flag Short Description --after-a Don't surface until this date (YYYY-MM-DD)

    Examples:

    ctx remind \"refactor the swagger definitions\"\nctx remind \"check CI after the deploy\" --after 2026-02-25\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-list","level":3,"title":"ctx remind list","text":"

    List all pending reminders. Date-gated reminders that aren't yet due are annotated with (after DATE, not yet due).

    Examples:

    ctx remind list\nctx remind ls            # alias\n

    Aliases: ls

    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-dismiss","level":3,"title":"ctx remind dismiss","text":"

    Remove one or more reminders by ID, or remove all with --all. Supports individual IDs and ranges.

    ctx remind dismiss <id> [id...]\nctx remind dismiss --all\n

    Arguments:

    • id: One or more reminder IDs (e.g., 3, 3 5-7)

    Flags:

    Flag Description --all Dismiss all reminders

    Aliases: rm

    Examples:

    ctx remind dismiss 3\nctx remind dismiss 3 5-7\nctx remind dismiss --all\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-normalize","level":3,"title":"ctx remind normalize","text":"

    Reassign reminder IDs as a contiguous sequence 1..N, closing any gaps left by dismissals.

    Examples:

    ctx remind normalize\n

    See also: Session Reminders recipe.

    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/resume/","level":1,"title":"Resume","text":"","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/resume/#ctx-hook-resume","level":2,"title":"ctx hook resume","text":"

    Resume context hooks after a pause. Silent no-op if not paused.

    ctx hook resume [flags]\n

    Flags:

    Flag Description --session-id Session ID (overrides stdin)

    Example:

    ctx hook resume\n

    See also:

    • ctx hook pause — the matching pause command
    • Pausing Context Hooks recipe
    ","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/serve/","level":1,"title":"Serve","text":"","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#ctx-serve","level":2,"title":"ctx serve","text":"

    Serve a static site locally via zensical.

    With no argument, serves the journal site at .context/journal-site. With a directory argument, serves that directory if it contains a zensical.toml.

    ctx serve                             # Serve .context/journal-site\nctx serve ./my-site                   # Serve a specific directory\nctx serve ./docs                      # Serve any zensical site\n

    This Command Does NOT Start a Hub

    ctx serve is purely for static-site serving. To run a ctx Hub for cross-project knowledge sharing, use ctx hub start. That command lives in its own group because the hub is a gRPC server, not a static site.

    Requires zensical to be installed:

    pipx install zensical\n
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#arguments","level":3,"title":"Arguments","text":"Argument Description [directory] Directory containing a zensical.toml to serve

    When omitted, serves .context/journal-site by default — the directory produced by ctx journal site.

    Examples:

    ctx serve                         # Default: serve .context/journal-site\nctx serve ./my-site               # Serve a specific directory\nctx serve ./docs                  # Serve any zensical site\n
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#see-also","level":3,"title":"See Also","text":"
    • ctx journal — generate the journal site that ctx serve displays.
    • ctx hub start — for running a ctx Hub server, not a static site.
    • Browsing and enriching past sessions — the recipe that combines ctx journal and ctx serve.
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/setup/","level":1,"title":"Setup","text":"","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/setup/#ctx-setup","level":2,"title":"ctx setup","text":"

    Generate AI tool integration configuration.

    ctx setup <tool> [flags]\n

    Flags:

    Flag Short Description --write-w Write the generated config to disk (e.g. .github/copilot-instructions.md)

    Supported tools:

    Tool Description claude-code Redirects to plugin install instructions cursor Cursor IDE kiro Kiro IDE cline Cline (VS Code extension) aider Aider CLI copilot GitHub Copilot windsurf Windsurf IDE

    Claude Code Uses the Plugin System

    Claude Code integration is now provided via the ctx plugin. Running ctx setup claude-code prints plugin install instructions.

    Examples:

    # Print hook instructions to stdout\nctx setup cursor\nctx setup aider\n\n# Generate and write .github/copilot-instructions.md\nctx setup copilot --write\n\n# Generate MCP config and sync steering files\nctx setup kiro --write\nctx setup cursor --write\nctx setup cline --write\n
    ","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/site/","level":1,"title":"Site","text":"","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site","level":2,"title":"ctx site","text":"

    Site management commands for the ctx.ist static site.

    ctx site <subcommand>\n
    ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site-feed","level":3,"title":"ctx site feed","text":"

    Generate an Atom 1.0 feed from finalized blog posts in docs/blog/.

    ctx site feed [flags]\n

    Scans docs/blog/ for files matching YYYY-MM-DD-*.md, parses YAML frontmatter, and generates a valid Atom feed. Only posts with reviewed_and_finalized: true are included. Summaries are extracted from the first paragraph after the heading.

    Flags:

    Flag Short Type Default Description --out-o string site/feed.xml Output path --base-url string https://ctx.ist Base URL for entry links

    Output:

    Generated site/feed.xml (21 entries)\n\nSkipped:\n  2026-02-25-the-homework-problem.md: not finalized\n\nWarnings:\n  2026-02-09-defense-in-depth.md: no summary paragraph found\n

    Three buckets: included (count), skipped (with reason), warnings (included but degraded). exit 0 always: warnings inform but do not block.

    Frontmatter requirements:

    Field Required Feed mapping title Yes <title>date Yes <updated>reviewed_and_finalized Yes Draft gate (must be true) author No <author><name>topics No <category term=\"\">

    Examples:

    ctx site feed                                # Generate site/feed.xml\nctx site feed --out /tmp/feed.xml            # Custom output path\nctx site feed --base-url https://example.com # Custom base URL\nmake site-feed                               # Makefile shortcut\nmake site                                    # Builds site + feed\n
    ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/skill/","level":1,"title":"Skill","text":"","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill","level":2,"title":"ctx skill","text":"

    Manage reusable instruction bundles that can be installed into .context/skills/.

    A skill is a directory containing a SKILL.md file with YAML frontmatter (name, description) and a Markdown instruction body. Skills are loaded by the agent context packet when --skill <name> is passed to ctx agent.

    ctx skill <subcommand>\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-install","level":3,"title":"ctx skill install","text":"

    Install a skill from a source directory.

    ctx skill install <source>\n

    Arguments:

    • source: Path to a directory containing SKILL.md

    Examples:

    ctx skill install ./my-skills/code-review\n# Installed code-review → .context/skills/code-review\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-list","level":3,"title":"ctx skill list","text":"

    List all installed skills.

    Examples:

    ctx skill list\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-remove","level":3,"title":"ctx skill remove","text":"

    Remove an installed skill.

    Arguments:

    • name: Skill name to remove

    Examples:

    ctx skill remove code-review\n

    See also: Building Project Skills recipe.

    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/steering/","level":1,"title":"Steering","text":"","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering","level":2,"title":"ctx steering","text":"

    Manage steering files: persistent behavioral rules for AI coding assistants.

    A steering file is a small markdown document with YAML frontmatter that tells the AI how to behave in a specific context. ctx steering keeps those files in .context/steering/, decides which ones apply for a given prompt, and syncs them out to each AI tool's native format (Claude Code, Cursor, Kiro, Cline).

    ctx steering <subcommand>\n

    Steering vs Decisions vs Conventions

    The three look similar on disk but serve different purposes:

    • Decisions record what was chosen and why. Consumed mostly by humans (and by the agent via ctx agent).
    • Conventions describe how the codebase is written. Consumed as reference material.
    • Steering tells the AI how to behave when asked about X. Consumed by the AI tool's prompt injection layer, conditionally on prompt match.

    If you find yourself writing \"the AI should always do X\" — that belongs in steering, not decisions.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#anatomy-of-a-steering-file","level":3,"title":"Anatomy of a Steering File","text":"
    ---\nname: security\ndescription: Security rules for all code changes\ninclusion: always    # always | auto | manual\ntools: []            # empty = all tools\npriority: 10         # lower = injected first\n---\n\n# Security rules\n\n- Validate all user input at system boundaries.\n- Never log secrets, tokens, or credentials.\n- Prefer constant-time comparison for tokens.\n

    Inclusion modes:

    Mode When it's included always Every prompt, unconditionally auto When the prompt matches the description keywords manual Only when the user names it explicitly

    Priority: lower numbers inject first, so high-priority rules appear at the top of the prompt. Default is 50.

    Tools: an empty list means all configured tools receive the file; list specific tool names to scope it.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-init","level":3,"title":"ctx steering init","text":"

    Create a starter set of steering files in .context/steering/ to use as a scaffolding baseline.

    Examples:

    ctx steering init\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-add","level":3,"title":"ctx steering add","text":"

    Create a new steering file with default frontmatter.

    ctx steering add <name>\n

    Arguments:

    • name: Steering file name (without .md extension)

    Examples:

    ctx steering add security\n# Created .context/steering/security.md\n

    The generated file uses inclusion: manual and priority: 50 by default. Edit the frontmatter to change behavior.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-list","level":3,"title":"ctx steering list","text":"

    List all steering files with their inclusion mode, priority, and tool scoping.

    Examples:

    ctx steering list\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-preview","level":3,"title":"ctx steering preview","text":"

    Preview which steering files would be included for a given prompt. Useful for validating auto-inclusion descriptions against realistic prompts.

    ctx steering preview [prompt]\n

    Examples:

    ctx steering preview \"create a REST API endpoint\"\n# Steering files matching prompt \"create a REST API endpoint\":\n#   api-standards        inclusion=auto     priority=20  tools=all\n#   security             inclusion=always   priority=10  tools=all\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-sync","level":3,"title":"ctx steering sync","text":"

    Sync steering files to tool-native formats for tools that have a built-in rules primitive. Not every tool needs this — Claude Code and Codex use a different delivery mechanism (see below).

    Examples:

    ctx steering sync\n

    Which tools are sync targets?

    Tool Sync target Mechanism Cursor .cursor/rules/ Cursor reads the directory natively Cline .clinerules/ Cline reads the directory natively Kiro .kiro/steering/ Kiro reads the directory natively Claude Code (no-op) Delivered via hook + MCP — see next section Codex (no-op) Same as Claude Code

    For the three native-rules tools, ctx steering sync writes each matching steering file to the appropriate directory with tool-specific frontmatter transforms. Unchanged files are skipped (idempotent).

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#how-claude-code-and-codex-consume-steering","level":3,"title":"How Claude Code and Codex Consume Steering","text":"

    Claude Code has no native \"steering files\" primitive, so ctx steering sync skips it entirely. Instead, steering reaches Claude through two non-sync channels, both activated by ctx setup claude-code (which installs the plugin):

    1. Automatic injection via the PreToolUse hook. The Claude Code plugin wires a PreToolUse hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads .context/steering/ and calls steering.Filter with an empty prompt, so only files with inclusion: always match. Those files are included as Tier 6 of the context packet. The packet is printed on stdout, which Claude Code injects as additional context. This fires on every tool call; no user action.

    2. On-demand MCP tool call (ctx_steering_get). The ctx plugin ships a .mcp.json file that automatically registers the ctx MCP server (ctx mcp serve) with Claude Code on plugin install. Once registered, Claude can invoke the ctx_steering_get tool mid-task to fetch matching steering files for a specific prompt. This is the only path that resolves inclusion: auto and inclusion: manual matches for Claude Code — Claude passes the prompt to the MCP tool, which runs the keyword match against each file's description.

    Verify the MCP server is registered:

    claude mcp list\n

    Expected line: ctx: ctx mcp serve - ✓ Connected. If it's missing, reinstall the plugin from Claude Code (/plugin → find ctx → uninstall → install again) — older plugin versions shipped without the .mcp.json file.

    Prefer inclusion: always for Claude Code

    Because the PreToolUse hook passes an empty prompt to ctx agent, only always files fire automatically. auto files require Claude to call the ctx_steering_get MCP tool on its own; manual files require an explicit user invocation. For rules that should reliably fire on every Claude Code session, use inclusion: always. Reserve auto/manual for situational libraries where the opt-in cost is acceptable and you understand Claude may not pull them in without prompting.

    The foundation files scaffolded by ctx init already default to inclusion: always for this reason.

    Practical implications:

    • Running ctx steering sync before starting a Claude session does nothing for Claude's benefit. Skip it.
    • ctx steering preview still works for validating your descriptions — it doesn't depend on sync.
    • If Claude Code is your only tool, the ctx steering commands you care about are add, list, preview, init — never sync.
    • If you use both Claude Code and (say) Cursor, ctx steering sync covers Cursor (where auto and manual work natively) while the hook+MCP pipeline covers Claude Code. For rules you need to fire automatically on both, use inclusion: always.
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-agent-integration","level":3,"title":"ctx agent Integration","text":"

    When ctx agent builds a context packet, steering files are loaded as Tier 6 of the budget-aware assembly (see ctx agent). Files with inclusion: always are always included; auto files are scored against the current prompt and included in priority order until the tier budget is exhausted.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#see-also","level":3,"title":"See Also","text":"
    • ctx setup — configure which tools receive steering syncs
    • ctx trigger — lifecycle scripts (a different hooking concept, see below)
    • Building steering files recipe — walkthrough from first file to synced output
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/sysinfo/","level":1,"title":"Sysinfo","text":"","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/sysinfo/#ctx-sysinfo","level":3,"title":"ctx sysinfo","text":"

    Display a snapshot of system resources (memory, swap, disk, load) with threshold-based alert severities. Mirrors what the check-resource hook plumbing monitors in the background — but this command prints the full report at any severity level, not only at DANGER.

    ctx sysinfo [flags]\n

    Flags:

    Flag Description --json Output in JSON format

    Alert thresholds:

    Resource WARNING DANGER Memory ≥ 75% ≥ 90% Swap ≥ 50% ≥ 75% Disk ≥ 85% ≥ 95% Load ≥ 1.0x CPUs ≥ 1.5x CPUs

    Examples:

    ctx sysinfo                  # Human-readable table\nctx sysinfo --json           # Structured output\n
    ","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/system/","level":1,"title":"System","text":"","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system","level":3,"title":"ctx system","text":"

    Hidden parent command that hosts Claude Code hook plumbing and a small set of session-lifecycle plumbing subcommands used by skills and editor integrations. The parent is registered without a visible group in ctx --help; run ctx system --help to see its subcommands.

    ctx system <subcommand>\n

    Commands Previously under ctx system

    Several user-facing maintenance commands used to live under ctx system and were promoted to top-level:

    • ctx system backupctx backup
    • ctx system eventsctx hook event
    • ctx system messagectx hook message
    • ctx system prunectx prune
    • ctx system resourcesctx sysinfo
    • ctx system statsctx usage

    ctx system bootstrap remains under ctx system as a hidden, agent-only command. Update any scripts or personal docs that reference the old paths.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#plumbing-subcommands","level":2,"title":"Plumbing Subcommands","text":"

    These are not hook handlers — they're called by skills and editor integrations during the session lifecycle. Safe to run manually.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-journal","level":4,"title":"ctx system mark-journal","text":"

    Update processing state for a journal entry. Records the current date in .context/journal/.state.json. Used by journal skills to record pipeline progress.

    ctx system mark-journal <filename> <stage>\n

    Stages: exported, enriched, normalized, fences_verified

    Flag Description --check Check if stage is set (exit 1 if not)

    Example:

    ctx system mark-journal 2026-01-21-session-abc12345.md enriched\nctx system mark-journal 2026-01-21-session-abc12345.md normalized\nctx system mark-journal --check 2026-01-21-session-abc12345.md fences_verified\n
    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-wrapped-up","level":4,"title":"ctx system mark-wrapped-up","text":"

    Suppress context checkpoint nudges after a wrap-up ceremony. Writes a marker file that check-context-size checks before emitting checkpoint boxes. The marker expires after 2 hours.

    Called automatically by /ctx-wrap-up after persisting context (not intended for direct use).

    ctx system mark-wrapped-up\n

    No flags, no arguments. Idempotent: running it again updates the marker timestamp.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-pause-ctx-system-resume","level":4,"title":"ctx system pause / ctx system resume","text":"

    Session-scoped hook suppression. ctx system pause writes a marker file that causes hook plumbing to no-op for the current session; ctx system resume removes it. These are the hook-plumbing counterparts to the ctx hook pause / ctx hook resume commands (which call them internally).

    Read the session ID from stdin JSON (same as hooks) or pass --session-id.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-session-event","level":4,"title":"ctx system session-event","text":"

    Records a session lifecycle event (start or end) to the event log. Called by editor integrations when a workspace is opened or closed.

    ctx system session-event --type start --caller vscode\nctx system session-event --type end --caller vscode\n
    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#hook-subcommands","level":2,"title":"Hook Subcommands","text":"

    Hidden Claude Code hook handlers implementing the hook contract: read JSON from stdin, perform logic, emit output on stdout, exit 0. Block commands output JSON with a decision field.

    UserPromptSubmit hooks: context-load-gate, check-context-size, check-persistence, check-ceremony, check-journal, check-version, check-resource, check-knowledge, check-map-staleness, check-memory-drift, check-reminder, check-freshness, check-hub-sync, check-backup-age, check-skill-discovery, heartbeat.

    PreToolUse hooks: block-non-path-ctx, block-dangerous-command, qa-reminder, specs-nudge.

    PostToolUse hooks: post-commit, check-task-completion.

    See AI Tools for registration details and the Claude Code plugin integration.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/trace/","level":1,"title":"Commit Context Tracing","text":"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace","level":3,"title":"ctx trace","text":"

    Show the context behind git commits. Links commits back to the decisions, tasks, learnings, and sessions that motivated them.

    git log shows what changed, git blame shows who — ctx trace shows why.

    ctx trace [commit] [flags]\n

    Flags:

    Flag Description --last N Show context for last N commits --json Output as JSON for scripting

    Examples:

    # Show context for a specific commit\nctx trace abc123\n\n# Show context for last 10 commits\nctx trace --last 10\n\n# JSON output\nctx trace abc123 --json\n

    Output:

    Commit: abc123 \"Fix auth token expiry\"\nDate:   2026-03-14 10:00:00 -0700\nContext:\n  [Decision] #12: Use short-lived tokens with server-side refresh\n    Date: 2026-03-10\n\n  [Task] #8: Implement token rotation for compliance\n    Status: completed\n

    When listing recent commits with --last:

    abc123  Fix auth token expiry         decision:12, task:8\ndef456  Add rate limiting             decision:15, learning:7\n789abc  Update dependencies           (none)\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-file","level":3,"title":"ctx trace file","text":"

    Show the context trail for a file. Combines git log with context resolution.

    ctx trace file <path[:line-range]> [flags]\n

    Flags:

    Flag Description --last N Maximum commits to show (default: 20)

    Examples:

    # Show context trail for a file\nctx trace file src/auth.go\n\n# Show context for specific line range\nctx trace file src/auth.go:42-60\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-tag","level":3,"title":"ctx trace tag","text":"

    Manually tag a commit with context. For commits made without the hook, or to add extra context after the fact.

    Tags are stored in .context/trace/overrides.jsonl since git trailers cannot be added to existing commits without rewriting history.

    ctx trace tag <commit> --note \"<text>\"\n

    Examples:

    ctx trace tag HEAD --note \"Hotfix for production outage\"\nctx trace tag abc123 --note \"Part of Q1 compliance initiative\"\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-hook","level":3,"title":"ctx trace hook","text":"

    Enable or disable the prepare-commit-msg hook for automatic context tracing. When enabled, commits automatically receive a ctx-context trailer with references to relevant decisions, tasks, learnings, and sessions.

    ctx trace hook <enable|disable>\n

    Prerequisites: ctx must be on your $PATH. If you installed via go install, ensure $GOPATH/bin (or $HOME/go/bin) is in your shell's $PATH.

    What the hook does:

    1. Before each commit, collects context from three sources:
    2. Pending context accumulated during work (ctx add, ctx task complete)
    3. Staged file changes to .context/ files
    4. Working state (in-progress tasks, active AI session)
    5. Injects a ctx-context trailer into the commit message
    6. After commit, records the mapping in .context/trace/history.jsonl

    Examples:

    # Install the hook\nctx trace hook enable\n\n# Remove the hook\nctx trace hook disable\n

    Resulting commit message:

    Fix auth token expiry handling\n\nRefactored token refresh logic to handle edge case\nwhere refresh token expires during request.\n\nctx-context: decision:12, task:8, session:abc123\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#reference-types","level":3,"title":"Reference Types","text":"

    The ctx-context trailer supports these reference types:

    Prefix Points to Example decision:<n> Entry #n in DECISIONS.md decision:12learning:<n> Entry #n in LEARNINGS.md learning:5task:<n> Task #n in TASKS.md task:8convention:<n> Entry #n in CONVENTIONS.md convention:3session:<id> AI session by ID session:abc123\"<text>\" Free-form context note \"Performance fix for P1 incident\"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#storage","level":3,"title":"Storage","text":"

    Context trace data is stored in the .context/ directory:

    File Purpose Lifecycle state/pending-context.jsonl Accumulates refs during work Truncated after each commit trace/history.jsonl Permanent commit-to-context map Append-only, never truncated trace/overrides.jsonl Manual tags for existing commits Append-only","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trigger/","level":1,"title":"Trigger","text":"","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger","level":2,"title":"ctx trigger","text":"

    Manage lifecycle triggers: executable scripts that fire at specific events during an AI session. Triggers can block tool calls, inject context, automate reactions — any side effect you want at session boundaries, tool boundaries, or file-save events.

    ctx trigger <subcommand>\n

    Triggers Execute Arbitrary Scripts

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks: only enable scripts you've read and understand. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#where-triggers-live","level":3,"title":"Where Triggers Live","text":"

    Triggers live in .context/hooks/<trigger-type>/ as executable scripts. The on-disk directory name is still hooks/ for historical reasons even though the command is ctx trigger. Each script:

    • Reads a JSON payload from stdin.
    • Returns a JSON payload on stdout.
    • Returns a non-zero exit code to block or error.
    .context/\n└── hooks/\n    ├── session-start/\n    │   └── inject-context.sh\n    ├── pre-tool-use/\n    │   └── block-legacy.sh\n    └── post-tool-use/\n        └── record-edit.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#trigger-types","level":3,"title":"Trigger Types","text":"Type Fires when session-start An AI session begins session-end An AI session ends pre-tool-use Before an AI tool call is executed post-tool-use After an AI tool call returns file-save When a file is saved context-add When a context entry is added","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#input-and-output-contract","level":3,"title":"Input and Output Contract","text":"

    Each trigger receives a JSON object on stdin with the event details. Minimal contract (fields vary by trigger type):

    {\n  \"type\": \"pre-tool-use\",\n  \"tool\": \"write_file\",\n  \"path\": \"src/auth.go\",\n  \"session_id\": \"abc123-...\"\n}\n

    The trigger may write a JSON object to stdout to influence behavior. Example for a blocking pre-tool-use trigger:

    {\n  \"action\": \"block\",\n  \"message\": \"Editing src/auth.go requires approval from #security\"\n}\n

    For non-blocking event loggers, simply read stdin and exit 0 without writing to stdout.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-add","level":3,"title":"ctx trigger add","text":"

    Create a new trigger script with a template. The generated file has a bash shebang, a stdin reader using jq, and a basic JSON output structure.

    ctx trigger add <trigger-type> <name>\n

    Arguments:

    • trigger-type: One of session-start, session-end, pre-tool-use, post-tool-use, file-save, context-add
    • name: Script name (without .sh extension)

    Examples:

    ctx trigger add session-start inject-context\n# Created .context/hooks/session-start/inject-context.sh\n\nctx trigger add pre-tool-use block-legacy\n# Created .context/hooks/pre-tool-use/block-legacy.sh\n

    The generated script is not executable by default. Enable it with ctx trigger enable after reviewing the contents.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-list","level":3,"title":"ctx trigger list","text":"

    List all discovered triggers, grouped by trigger type, with their enabled/disabled status.

    Examples:

    ctx trigger list\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-test","level":3,"title":"ctx trigger test","text":"

    Run all enabled triggers of a given type against a mock payload. Use --tool and --path to customize the mock input for tool-related events.

    ctx trigger test <trigger-type> [flags]\n

    Flags:

    Flag Description --tool Tool name to put in mock input --path File path to put in mock input

    Examples:

    ctx trigger test session-start\nctx trigger test pre-tool-use --tool write_file --path src/main.go\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-enable","level":3,"title":"ctx trigger enable","text":"

    Enable a trigger by setting its executable permission bit. Searches every trigger-type directory for a script matching <name>.

    ctx trigger enable <name>\n

    Examples:

    ctx trigger enable inject-context\n# Enabled .context/hooks/session-start/inject-context.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-disable","level":3,"title":"ctx trigger disable","text":"

    Disable a trigger by clearing its executable permission bit. Searches every trigger-type directory for a script matching <name>.

    ctx trigger disable <name>\n

    Examples:

    ctx trigger disable inject-context\n# Disabled .context/hooks/session-start/inject-context.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#three-hooking-concepts-in-ctx-dont-confuse-them","level":3,"title":"Three Hooking Concepts in ctx — Don't Confuse Them","text":"

    This is a common source of confusion. ctx has three distinct hook-like layers, and they serve different purposes:

    Layer Owned by Where it runs Configured via ctx trigger You .context/hooks/<type>/*.shctx trigger add/enablectx system hooks ctx itself built-in, called by ctx's own lifecycle internal (see ctx system --help) Claude Code hooks Claude Code .claude/settings.local.json edit JSON, or /ctx-sanitize-permissions

    Use ctx trigger when you want project-specific automation that your AI tool will run at lifecycle events. Use Claude Code hooks for tool-specific integrations that don't need to be portable across tools. ctx system hooks are not something you author — they're the internal nudge machinery that ships with ctx.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#see-also","level":3,"title":"See Also","text":"
    • ctx steering — persistent AI behavioral rules (a different concept; rules vs scripts)
    • Authoring triggers recipe — a full walkthrough with security guidance
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/usage/","level":1,"title":"Usage","text":"","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/usage/#ctx-usage","level":3,"title":"ctx usage","text":"

    Display per-session token usage statistics from the local stats JSONL files written by the heartbeat hook. By default, shows the last 20 entries across all sessions. Use --follow to stream new entries as they arrive (like tail -f).

    ctx usage [flags]\n

    Flags:

    Flag Description -f, --follow Stream new entries as they arrive -s, --session Filter by session ID (prefix match) -n, --last Show last N entries (default: 20) -j, --json Output raw JSONL

    Examples:

    ctx usage                     # Last 20 entries across all sessions\nctx usage --follow            # Live stream (like tail -f)\nctx usage --session abc123    # Filter to one session\nctx usage --last 100 --json   # Last 100 as raw JSONL\n
    ","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/watch/","level":1,"title":"Watch","text":"","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/watch/#ctx-watch","level":2,"title":"ctx watch","text":"

    Watch for AI output and auto-apply context updates.

    Parses <context-update> XML commands from AI output and applies them to context files.

    ctx watch [flags]\n

    Flags:

    Flag Description --log <file> Log file to watch (default: stdin) --dry-run Preview updates without applying

    Examples:

    # Watch stdin\nai-tool | ctx watch\n\n# Watch a log file\nctx watch --log /path/to/ai-output.log\n\n# Preview without applying\nctx watch --dry-run\n
    ","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/why/","level":1,"title":"Why","text":"","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"cli/why/#ctx-why","level":2,"title":"ctx why","text":"

    Read ctx's philosophy documents directly in the terminal.

    ctx why [DOCUMENT]\n

    Documents:

    Name Description manifesto The ctx Manifesto: creation, not code about About ctx: what it is and why it exists invariants Design invariants: properties that must hold

    Examples:

    # Interactive numbered menu\nctx why\n\n# Show a specific document\nctx why manifesto\nctx why about\nctx why invariants\n\n# Pipe to a pager\nctx why manifesto | less\n
    ","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"home/","level":1,"title":"Home","text":"
    • ctx is not a prompt.
    • ctx is version-controlled cognitive state.

    ctx is the persistence layer for human-AI reasoning.

    Deterministic. Git-native. Human-readable. Local-first.

    Start here.

    Learn what ctx does, set it up, and run your first session.

    Pre-1.0: Moving Fast

    ctx is under active development. This website tracks the development branch, not the latest release:

    Some features described here may not exist in the binary you have installed.

    Expect rough edges.

    If something is missing or broken, open an issue.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#introduction","level":2,"title":"Introduction","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#about","level":3,"title":"About","text":"

    What ctx is, how it works, and why persistent context changes how you work with AI.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#is-it-right-for-me","level":3,"title":"Is It Right for Me?","text":"

    Good fit, not-so-good fit, and a 5-minute trial to find out for yourself.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#faq","level":3,"title":"FAQ","text":"

    Quick answers to the questions newcomers ask most about ctx, files, tooling, and trade-offs.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#get-started","level":2,"title":"Get Started","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#getting-started","level":3,"title":"Getting Started","text":"

    Install the binary, set up the plugin, and verify it works.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#your-first-session","level":3,"title":"Your First Session","text":"

    Step-by-step walkthrough from ctx init to verified recall.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#common-workflows","level":3,"title":"Common Workflows","text":"

    Day-to-day commands for tracking context, checking health, and browsing history.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#concepts","level":2,"title":"Concepts","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#context-files","level":3,"title":"Context Files","text":"

    What each .context/ file does. What's their purpose. How do we best leverage them.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#configuration","level":3,"title":"Configuration","text":"

    Flexible configuration: .ctxrc, environment variables, and CLI flags.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#hub","level":3,"title":"Hub","text":"

    A fan-out channel for decisions, learnings, conventions, and tasks that need to cross project boundaries — without replicating everything else.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#working-with-ai","level":2,"title":"Working with AI","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#prompting-guide","level":3,"title":"Prompting Guide","text":"

    Effective prompts for AI sessions with ctx.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#keeping-ai-honest","level":3,"title":"Keeping AI Honest","text":"

    AI agents confabulate: they invent history, claim familiarity with decisions never made, and sometimes declare tasks complete when they aren't. Tools and habits to push back.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#my-ai-keeps-making-the-same-mistakes","level":3,"title":"My AI Keeps Making the Same Mistakes","text":"

    Stop rediscovering the same bugs and dead-ends across sessions.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#joining-a-project","level":3,"title":"Joining a Project","text":"

    You inherited a .context/ directory. Get oriented fast: priority order, what to read first, how to ramp up.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#customization","level":2,"title":"Customization","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#steering-files","level":3,"title":"Steering Files","text":"

    Tell the assistant how to behave when a specific kind of prompt arrives.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#lifecycle-triggers","level":3,"title":"Lifecycle Triggers","text":"

    Make things happen at session boundaries: block dangerous tool calls, inject standup notes, log file saves.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#community","level":2,"title":"Community","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#ctx","level":3,"title":"#ctx","text":"

    We are the builders who care about durable context. Join the community. Hang out in IRC. Star ctx on GitHub.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#contributing","level":3,"title":"Contributing","text":"

    Development setup, project layout, and pull request process.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/about/","level":1,"title":"About","text":"

    \"Creation, not code; Context, not prompts; Verification, not vibes.\"

    Read the ctx Manifesto →

    \"Without durable context, intelligence resets; with ctx, creation compounds.\"

    Without persistent memory, every session starts at zero; ctx makes sessions cumulative.

    Join the ctx Community →

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#what-is-ctx","level":2,"title":"What Is ctx?","text":"

    ctx (Context) is a file-based system that enables AI coding assistants to persist project knowledge across sessions. It lives in a .context/ directory in your repo.

    • A session is interactive.
    • ctx enables cognitive continuity.
    • Cognitive continuity enables durable, symbiotic-like human-AI workflows.

    Context Files

    Context files let AI tools remember decisions, conventions, and learnings:

    Context files are explicit and versionable contracts between you and your agents.

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#why-do-i-keep-re-explaining-my-codebase","level":2,"title":"Why Do I Keep Re-Explaining My Codebase?!?!","text":"

    You open a new AI session. The first thing you do is re-explain your project.

    Again.

    The architecture, the database choice, the naming conventions, the thing you tried last week that didn't work. You have said all of this before (maybe yesterday, maybe an hour ago), but the AI doesn't know that.

    • You explain the same architecture every session;
    • The AI suggests an approach you already rejected, again;
    • A decision you made three sessions ago gets relitigated from scratch;
    • You spend more time setting context than building features.

    This isn't AI failure: It is staleness.

    Without persistent memory, every session starts at zero.

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#day-and-night","level":3,"title":"Day and Night","text":"

    ctx gives your AI a memory that persists across sessions:

    Without ctxWith ctx
    Session 12: Monday morning\n\nAI:  \"I'll scaffold a MySQL schema for this.\"\nYou: \"We use PostgreSQL.\"\n\nAI:  \"For auth, session-based would be simplest.\"\nYou: \"We decided on JWT.\"\n\nAI:  \"Endpoints in camelCase?\"\nYou: \"snake_case! Like the rest of the API!!!!!!\"\n\nYou: *scrolls up to copy an existing endpoint*\n\n20 minutes spent restoring decisions that already exist.\n
    Session 12: Monday morning\n\nYou:  \"Let's continue the auth implementation.\"\nAI:   \"Per DECISIONS.md (2026-02-12): JWT, not sessions: \n       The project uses PostgreSQL with JWT\n       authentication. The API follows snake_case conventions.\n       Last session you completed the login endpoint: the\n       registration endpoint is next on TASKS.md. Want me to\n       start there?\"\n\n0 minutes re-explaining. Straight to building.\n
    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#how-ctx-solves-this","level":3,"title":"How ctx Solves This","text":"

    ctx creates a .context/ directory in your project that stores structured knowledge files:

    File What It Remembers TASKS.md What you're working on and what's next DECISIONS.md Architectural choices and why you made them LEARNINGS.md Gotchas, bugs, things that didn't work CONVENTIONS.md Naming patterns, code style, project rules CONSTITUTION.md Hard rules the AI must never violate

    These files can version with your code in git:

    • They load automatically at the session start (via hooks in Claude Code, or manually with ctx agent for other tools).
    • The AI reads them, cites them, and builds on them, instead of asking you to start over.
      • And when it acts, it can point to the exact file and line that justifies the choice.

    Every decision you record, every lesson you capture, makes the next session smarter.

    ctx accumulates.

    Connect with ctx

    • Join the Community →: ask questions, share workflows, and help shape what comes next
    • Read the Blog →: real-world patterns, ponderings, and lessons learned from building ctx using ctx

    Ready to Get Started?

    • Getting Started →: full installation and setup
    • Your First Session →: step-by-step walkthrough from ctx init to verified recall
    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/common-workflows/","level":1,"title":"Common Workflows","text":"

    The commands below cover what you'll use most often:

    • recording context,
    • checking health,
    • browsing history,
    • and running loops.

    Each section is a self-contained snippet you can copy into your terminal.

    For deeper, step-by-step guides, see Recipes.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#track-context","level":2,"title":"Track Context","text":"

    Prefer Skills over Raw Commands

    When working with an AI agent, use /ctx-task-add, /ctx-decision-add, or /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context — no manual flags needed.

    # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (full ADR fields required)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning\nctx add learning \"Mock functions must be hoisted in Jest\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Jest hoists mock calls to top of file\" \\\n  --application \"Place jest.mock() before imports\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Mark task complete\nctx task complete \"user auth\"\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#leave-a-reminder-for-next-session","level":2,"title":"Leave a Reminder for Next Session","text":"

    Drop a note that surfaces automatically at the start of your next session:

    # Leave a reminder\nctx remind \"refactor the swagger definitions\"\n\n# Date-gated: don't surface until a specific date\nctx remind \"check CI after the deploy\" --after 2026-02-25\n\n# List pending reminders\nctx remind list\n\n# Dismiss reminders by ID (supports ranges)\nctx remind dismiss 1\nctx remind dismiss 3 5-7\n

    Reminders are relayed verbatim at session start by the check-reminders hook and repeat every session until you dismiss them.

    See Session Reminders for the full recipe.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#check-context-health","level":2,"title":"Check Context Health","text":"
    # Detect stale paths, missing files, potential secrets\nctx drift\n\n# See full context summary\nctx status\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#browse-session-history","level":2,"title":"Browse Session History","text":"

    List and search past AI sessions from the terminal:

    ctx journal source --limit 5\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#journal-site","level":3,"title":"Journal Site","text":"

    Import session transcripts to a browsable static site with search, navigation, and topic indices.

    The ctx journal command requires zensical (Python >= 3.10).

    zensical is a Python-based static site generator from the Material for MkDocs team.

    (why zensical?).

    If you don't have it on your system, install zensical once with pipx:

    # One-time setup\npipx install zensical\n

    Avoid pip install zensical

    pip install often fails: For example, on macOS, system Python installs a non-functional stub (zensical requires Python >= 3.10), and Homebrew Python blocks system-wide installs (PEP 668).

    pipx creates an isolated environment with the correct Python version automatically.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#import-and-serve","level":3,"title":"Import and Serve","text":"

    Then, import and serve:

    # Import all sessions to .context/journal/ (only new files)\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

    Open http://localhost:8000 to browse.

    To update after new sessions, run the same two commands again.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#safe-by-default","level":3,"title":"Safe by Default","text":"

    ctx journal import --all is safe by default:

    • It only imports new sessions and skips existing files.
    • Locked entries (via ctx journal lock) are always skipped by both import and enrichment skills.
    • If you add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#re-importing-existing-files","level":3,"title":"Re-Importing Existing Files","text":"

    Here is how you regenerate existing files.

    Backup your .context folder before regeneration, as this is a potentially destructive action.

    To re-import journal files, you need to explicitly opt-in using the --regenerate flag:

    Flag combination Frontmatter Body --regenerate Preserved Overwritten from source --regenerate --keep-frontmatter=false Overwritten Overwritten

    Regeneration Overwrites Body Edits

    --regenerate preserves your YAML frontmatter (tags, summary, enrichment metadata) but it replaces the Markdown body with a fresh import.

    Any manual edits you made to the transcript will be lost.

    Lock entries you want to protect first: ctx journal lock <session-id>.

    See Session Journal for the full pipeline including normalization and enrichment.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#scratchpad","level":2,"title":"Scratchpad","text":"

    Store short, sensitive one-liners in an encrypted scratchpad that travels with the project:

    # Write a note\nctx pad set db-password \"postgres://user:pass@localhost/mydb\"\n\n# Read it back\nctx pad get db-password\n\n# List all keys\nctx pad list\n

    The scratchpad is encrypted with a key stored at ~/.ctx/.ctx.key (outside the project, never committed).

    See Scratchpad for details.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#run-an-autonomous-loop","level":2,"title":"Run an Autonomous Loop","text":"

    Generate a script that iterates an AI agent until a completion signal is detected:

    ctx loop\nchmod +x loop.sh\n./loop.sh\n

    See Autonomous Loops for configuration and advanced usage.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#trace-commit-context","level":2,"title":"Trace Commit Context","text":"

    Link your git commits back to the decisions, tasks, and learnings that motivated them. Enable the hook once:

    # Install the git hook (one-time setup)\nctx trace hook enable\n

    From now on, every git commit automatically gets a ctx-context trailer linking it to relevant context. No extra steps needed — just use ctx add, ctx task complete, and commit as usual.

    # Later: why was this commit made?\nctx trace abc123\n\n# Recent commits with their context\nctx trace --last 10\n\n# Context trail for a specific file\nctx trace file src/auth.go\n\n# Manually tag a commit after the fact\nctx trace tag HEAD --note \"Hotfix for production outage\"\n

    To stop: ctx trace hook disable.

    See CLI Reference: trace for details.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#agent-session-start","level":2,"title":"Agent Session Start","text":"

    The first thing an AI agent should do at session start is discover where context lives:

    ctx system bootstrap\n

    This prints the resolved context directory, the files in it, and the operating rules. The CLAUDE.md template instructs the agent to run this automatically. See CLI Reference: bootstrap.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#the-two-skills-you-should-always-use","level":2,"title":"The Two Skills You Should Always Use","text":"

    Using /ctx-remember at session start and /ctx-wrap-up at session end are the highest-value skills in the entire catalog:

    # session begins:\n/ctx-remember\n... do work ...\n# before closing the session:\n/ctx-wrap-up\n

    Let's provide some context, because this is important:

    Although the agent will eventually discover your context through CLAUDE.md → AGENT_PLAYBOOK.md, /ctx-remember hydrates the full context up front (tasks, decisions, recent sessions) so the agent starts informed rather than piecing things together over several turns.

    /ctx-wrap-up is the other half: A structured review that captures learnings, decisions, and tasks before you close the window.

    Hooks like check-persistence remind you (the user) mid-session that context hasn't been saved in a while, but they don't trigger persistence automatically: You still have to act. Also, a CTRL+C can end things at any moment with no reliable \"before session end\" event.

    In short, /ctx-wrap-up is the deliberate checkpoint that makes sure nothing slips through. And /ctx-remember it its mirror skill to be used at session start.

    See Session Ceremonies for the full workflow.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-commands-vs-ai-skills","level":2,"title":"CLI Commands vs. AI Skills","text":"

    Most ctx operations come in two flavors: a CLI command you run in your terminal and an AI skill (slash command) you invoke inside your coding assistant.

    Commands and skills are not interchangeable: Each has a distinct role.

    ctx CLI command ctx AI skill Runs where Your terminal Inside the AI assistant Speed Fast (milliseconds) Slower (LLM round-trip) Cost Free Consumes tokens and context Analysis Deterministic heuristics Semantic / judgment-based Best for Quick checks, scripting, CI Deep analysis, generation, workflow orchestration","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#paired-commands","level":3,"title":"Paired Commands","text":"

    These have both a CLI and a skill counterpart. Use the CLI for quick, deterministic checks; use the skill when you need the agent's judgment.

    CLI Skill When to prefer the skill ctx drift/ctx-drift Semantic analysis: catches meaning drift the CLI misses ctx status/ctx-status Interpreted summary with recommendations ctx add task/ctx-task-add Agent decomposes vague goals into concrete tasks ctx add decision/ctx-decision-add Agent drafts rationale and consequences from discussion ctx add learning/ctx-learning-add Agent extracts the lesson from a debugging session ctx add convention/ctx-convention-add Agent observes a repeated pattern and codifies it ctx task archive/ctx-archive Agent reviews which tasks are truly done ctx pad/ctx-pad Agent reads/writes scratchpad entries in conversation flow ctx journal/ctx-history Agent searches session history with semantic understanding ctx agent/ctx-agent Agent loads and acts on the context packet ctx loop/ctx-loop Agent tailors the loop script to your project ctx doctor/ctx-doctor Agent adds semantic analysis to structural checks ctx hook pause/ctx-pause Agent pauses hooks with session-aware reasoning ctx hook resume/ctx-resume Agent resumes hooks after a pause ctx remind/ctx-remind Agent manages reminders in conversation flow","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#ai-only-skills","level":3,"title":"AI-Only Skills","text":"

    These have no CLI equivalent. They require the agent's reasoning.

    Skill Purpose /ctx-remember Load context and present structured readback at session start /ctx-wrap-up End-of-session ceremony: persist learnings, decisions, tasks /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Pause and assess session progress /ctx-consolidate Merge overlapping learnings or decisions /ctx-prompt-audit Analyze prompting patterns for improvement /ctx-plan-import Import Claude Code plan files into project specs /ctx-implement Execute a plan step-by-step with verification /ctx-worktree Manage parallel agent worktrees /ctx-journal-enrich Add metadata, tags, and summaries to journal entries /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich /ctx-blog Generate a blog post (zensical-flavored Markdown) /ctx-blog-changelog Generate themed blog post from commits between releases /ctx-architecture Build and maintain architecture maps (ARCHITECTURE.md, DETAILED_DESIGN.md)","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-only-commands","level":3,"title":"CLI-Only Commands","text":"

    These are infrastructure: used in scripts, CI, or one-time setup.

    Command Purpose ctx init Initialize .context/ directory ctx load Output assembled context for piping ctx task complete Mark a task done by substring match ctx sync Reconcile context with codebase state ctx compact Consolidate and clean up context files ctx trace Show context behind git commits ctx trace hook Enable/disable commit context tracing hook ctx setup Generate AI tool integration config ctx watch Watch AI output and auto-apply context updates ctx serve Serve any zensical directory (default: journal) ctx permission snapshot Save settings as a golden image ctx permission restore Restore settings from golden image ctx journal site Generate browsable journal from exports ctx hook notify setup Configure webhook notifications ctx decision List and filter decisions ctx learning List and filter learnings ctx task List tasks, manage archival and snapshots ctx why Read the philosophy behind ctx ctx guide Quick-reference cheat sheet ctx site Site management commands ctx config Manage runtime configuration profiles ctx system System diagnostics and hook commands ctx backup Back up context and Claude data to tar.gz / SMB ctx completion Generate shell autocompletion scripts

    Rule of Thumb

    Quick check? Use the CLI.

    Need judgment? Use the skill.

    When in doubt, start with the CLI: It's free and instant.

    Escalate to the skill when heuristics aren't enough.

    Next Up: Context Files →: what each .context/ file does and how to use it

    See Also:

    • Recipes: targeted how-to guides for specific tasks
    • Knowledge Capture: patterns for recording decisions, learnings, and conventions
    • Context Health: keeping your .context/ accurate and drift-free
    • Session Archaeology: digging into past sessions
    • Task Management: tracking and completing work items
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/community/","level":1,"title":"#ctx","text":"

    Open source is better together.

    We are the builders who care about durable context, verifiable decisions, and human-AI workflows that compound over time.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#help-ctx-change-how-ai-remembers","level":2,"title":"Help ctx Change How AI Remembers","text":"

    If you like the idea, a star helps ctx reach engineers who run into context drift every day:

    Star ctx on GitHub ⭐

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#ctx-you","level":2,"title":"ctx ♥️ You","text":"

    Join the community to ask questions, share feedback, and connect with other users:

    • Discord join the ctx Discord: Real-time discussion, field notes, and early ideas.
    • Read the ctx Source on GitHub: Issues, discussions, and contributions.
    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#want-to-contribute","level":2,"title":"Want to Contribute?","text":"

    Early adopters shape the conventions.

    ctx is free and open source software.

    Contributions are always welcome and appreciated.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

    Clear context requires respectful collaboration.

    ctx follows the Contributor Covenant.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/configuration/","level":1,"title":"Configuration","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#configuration","level":2,"title":"Configuration","text":"

    ctx uses three layers of configuration. Each layer overrides the one below it:

    1. CLI flags: Per-invocation overrides (highest priority)
    2. Environment variables: Shell or CI/CD overrides
    3. The .ctxrc file: Project-level defaults (YAML)
    4. Built-in defaults: Hardcoded fallbacks (lowest priority)

    All settings are optional: If nothing is configured, ctx works out of the box with sensible defaults.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#the-ctxrc-file","level":2,"title":"The .ctxrc File","text":"

    The .ctxrc file is an optional YAML file placed in the project root (next to your .context/ directory). It lets you set project-level defaults that apply to every ctx command.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#location","level":3,"title":"Location","text":"
    my-project/\n├── .ctxrc              ← configuration file\n├── .context/\n│   ├── TASKS.md\n│   ├── DECISIONS.md\n│   └── ...\n└── src/\n

    ctx looks for .ctxrc in the current working directory when any command runs. There is no global or user-level config file: Configuration is always per-project.

    Contributors: Dev Configuration Profile

    The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy is gitignored and swapped between them via ctx config switch dev / ctx config switch base. See Contributing: Configuration Profiles.

    Using a Different .Context Directory

    The default .context/ directory can be changed per-project via the context_dir key in .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

    See Environment Variables and CLI Global Flags below for details.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#full-reference","level":3,"title":"Full Reference","text":"

    A commented .ctxrc showing all options and their defaults:

    # .ctxrc: ctx runtime configuration\n# https://ctx.ist/configuration/\n#\n# All settings are optional. Missing values use defaults.\n# Priority: CLI flags > environment variables > .ctxrc > defaults\n#\n# context_dir: .context\n# token_budget: 8000\n# auto_archive: true\n# archive_after_days: 7\n# scratchpad_encrypt: true\n# allow_outside_cwd: false\n# event_log: false\n# entry_count_learnings: 30\n# entry_count_decisions: 20\n# convention_line_count: 200\n# injection_token_warn: 15000\n# context_window: 200000      # auto-detected for Claude Code; override for other tools\n# billing_token_warn: 0       # one-shot warning at this token count (0 = disabled)\n#\n# stale_age_days: 30      # days before drift flags a context file as stale (0 = disabled)\n# key_rotation_days: 90\n# task_nudge_interval: 5   # Edit/Write calls between task completion nudges\n#\n# notify:               # requires: ctx hook notify setup\n#   events:             # required: no events sent unless listed\n#     - loop\n#     - nudge\n#     - relay\n#\n# tool: \"\"              # Active AI tool: claude, cursor, cline, kiro, codex\n#\n# steering:             # Steering layer configuration\n#   dir: .context/steering\n#   default_inclusion: manual\n#   default_tools: []\n#\n# hooks:                # Hook system configuration\n#   dir: .context/hooks\n#   timeout: 10\n#   enabled: true\n#\n# provenance_required:  # Relax provenance flags for ctx add\n#   session_id: true    # Require --session-id (default: true)\n#   branch: true        # Require --branch (default: true)\n#   commit: true        # Require --commit (default: true)\n#\n# priority_order:\n#   - CONSTITUTION.md\n#   - TASKS.md\n#   - CONVENTIONS.md\n#   - ARCHITECTURE.md\n#   - DECISIONS.md\n#   - LEARNINGS.md\n#   - GLOSSARY.md\n#   - AGENT_PLAYBOOK.md\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#option-reference","level":3,"title":"Option Reference","text":"Option Type Default Description context_dirstring.context Context directory name (relative to project root) token_budgetint8000 Default token budget for ctx agent and ctx loadauto_archivebooltrue Auto-archive completed tasks during ctx compactarchive_after_daysint7 Days before completed tasks are archived scratchpad_encryptbooltrue Encrypt scratchpad with AES-256-GCM allow_outside_cwdboolfalse Allow context directory outside the current working directory event_logboolfalse Enable local hook event logging to .context/state/events.jsonlentry_count_learningsint30 Drift warning when LEARNINGS.md exceeds this entry count (0 = disable) entry_count_decisionsint20 Drift warning when DECISIONS.md exceeds this entry count (0 = disable) convention_line_countint200 Drift warning when CONVENTIONS.md exceeds this line count (0 = disable) injection_token_warnint15000 Warn when auto-injected context exceeds this token count (0 = disable) context_windowint200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warnint0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled). For plans where tokens beyond an included allowance cost extra stale_age_daysint30 Days before ctx drift flags a context file as stale (0 = disable) key_rotation_daysint90 Days before encryption key rotation nudge task_nudge_intervalint5 Edit/Write calls between task completion nudges notify.events[]string (all) Event filter for webhook notifications (empty = all) priority_order[]string (see below) Custom file loading priority for context assembly toolstring (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex). Used by steering sync and hook dispatch steering.dirstring.context/steering Steering files directory steering.default_inclusionstringmanual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools[]string (all) Default tool filter for new steering files (empty = all tools) hooks.dirstring.context/hooks Hook scripts directory hooks.timeoutint10 Per-hook execution timeout in seconds hooks.enabledbooltrue Whether hook execution is enabled provenance_required.session_idbooltrue Require --session-id on ctx add for tasks, decisions, learnings provenance_required.branchbooltrue Require --branch on ctx add for tasks, decisions, learnings provenance_required.commitbooltrue Require --commit on ctx add for tasks, decisions, learnings

    Default priority order (used when priority_order is not set):

    1. CONSTITUTION.md
    2. TASKS.md
    3. CONVENTIONS.md
    4. ARCHITECTURE.md
    5. DECISIONS.md
    6. LEARNINGS.md
    7. GLOSSARY.md
    8. AGENT_PLAYBOOK.md

    See Context Files for the rationale behind this ordering.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#environment-variables","level":2,"title":"Environment Variables","text":"

    Environment variables override .ctxrc values but are overridden by CLI flags.

    Variable Description Equivalent .ctxrc key CTX_DIR Override the context directory path context_dirCTX_TOKEN_BUDGET Override the default token budget token_budget","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples","level":3,"title":"Examples","text":"
    # Use a shared context directory\nCTX_DIR=/shared/team-context ctx status\n\n# Increase token budget for a single run\nCTX_TOKEN_BUDGET=16000 ctx agent\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#cli-global-flags","level":2,"title":"CLI Global Flags","text":"

    CLI flags have the highest priority and override both environment variables and .ctxrc settings. These flags are available on every ctx command.

    Flag Description --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor) --version Show version and exit --help Show command help and exit","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_1","level":3,"title":"Examples","text":"
    # Point to a different context directory:\nctx status --context-dir /path/to/shared/.context\n\n# Allow external context directory (skips boundary check):\nctx status --context-dir /mnt/nas/project-context --allow-outside-cwd\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#priority-order","level":2,"title":"Priority Order","text":"

    When the same setting is configured in multiple layers, the highest-priority layer wins:

    CLI flags  >  Environment variables  >  .ctxrc  >  Built-in defaults\n(highest)                                          (lowest)\n

    Example resolution for context_dir:

    Layer Value Wins? --context-dir/tmp/ctx Yes CTX_DIR/shared/context No .ctxrc.my-context No Default .context No

    The CLI flag /tmp/ctx is used because it has the highest priority.

    If the CLI flag were absent, CTX_DIR=/shared/context would win. If neither the flag nor the env var were set, the .ctxrc value .my-context would be used. With nothing configured, the default .context applies.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_2","level":2,"title":"Examples","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#external-context-directory","level":3,"title":"External .context Directory","text":"

    Store context outside the project tree (useful for monorepos or shared context):

    # .ctxrc\ncontext_dir: /home/team/shared-context\nallow_outside_cwd: true\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-token-budget","level":3,"title":"Custom Token Budget","text":"

    Increase the token budget for projects with large context:

    # .ctxrc\ntoken_budget: 16000\n

    This affects the default budget for ctx agent and ctx load. You can still override per-invocation with ctx agent --budget 4000.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#disabled-scratchpad-encryption","level":3,"title":"Disabled Scratchpad Encryption","text":"

    Turn off encryption for the scratchpad (useful in ephemeral environments where key management is unnecessary):

    # .ctxrc\nscratchpad_encrypt: false\n

    Unencrypted Scratchpads Store Secrets in Plaintext

    Only disable encryption if you understand the security implications.

    The scratchpad may contain sensitive data such as API keys, database URLs, or deployment credentials.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-priority-order","level":3,"title":"Custom Priority Order","text":"

    Reorder context files to prioritize architecture over conventions:

    # .ctxrc\npriority_order:\n  - CONSTITUTION.md\n  - TASKS.md\n  - ARCHITECTURE.md\n  - DECISIONS.md\n  - CONVENTIONS.md\n  - LEARNINGS.md\n  - GLOSSARY.md\n  - AGENT_PLAYBOOK.md\n

    Files not listed in priority_order receive the lowest priority (100). The order affects ctx agent, ctx load, and drift's file-priority calculations.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#billing-token-threshold","level":3,"title":"Billing Token Threshold","text":"

    Get a one-shot warning when your session crosses a token threshold where extra charges begin (e.g., Claude Pro includes 200k tokens; beyond that costs extra):

    # .ctxrc\nbilling_token_warn: 180000   # warn before hitting the 200k paid boundary\n

    The warning fires once per session the first time token usage exceeds the threshold. Set to 0 (or omit) to disable.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#adjusted-drift-thresholds","level":3,"title":"Adjusted Drift Thresholds","text":"

    Raise or lower the entry-count thresholds that trigger drift warnings:

    # .ctxrc\nentry_count_learnings: 50   # warn above 50 learnings (default: 30)\nentry_count_decisions: 10   # warn above 10 decisions (default: 20)\nconvention_line_count: 300  # warn above 300 lines (default: 200)\n

    Set any threshold to 0 to disable that specific check.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

    Get notified when loops complete, hooks fire, or agents reach milestones:

    # Configure the webhook URL (encrypted, safe to commit)\nctx hook notify setup\n\n# Test delivery\nctx hook notify test\n

    Filter which events reach your webhook:

    # .ctxrc\nnotify:\n  events:\n    - loop      # loop completion/max-iteration\n    - nudge     # VERBATIM relay hooks fired\n    # - relay   # all hook output (verbose, for debugging)\n    # - heartbeat  # every-prompt session-alive signal\n

    Notifications are opt-in: No events are sent unless explicitly listed.

    See Webhook Notifications for a step-by-step recipe.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#hook-message-overrides","level":2,"title":"Hook Message Overrides","text":"

    Hook messages control what text hooks emit when they fire. Each message can be overridden per-project by placing a text file at the matching path under .context/:

    .context/hooks/messages/{hook}/{variant}.txt\n

    The override takes priority over the embedded default compiled into the ctx binary. An empty file silences the message while preserving the hook's logic (counting, state tracking, cooldowns).

    Use ctx hook message to discover and manage overrides:

    ctx hook message list                      # see all messages\nctx hook message show qa-reminder gate     # view the current template\nctx hook message edit qa-reminder gate     # copy default for editing\nctx hook message reset qa-reminder gate    # revert to default\n

    See Customizing Hook Messages for detailed examples including Python, JavaScript, and silence configurations.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#agent-bootstrapping","level":2,"title":"Agent Bootstrapping","text":"

    AI agents need to know the resolved context directory at session start. The ctx system bootstrap command prints the context path, file list, and operating rules in both text and JSON formats:

    ctx system bootstrap          # text output for agents\nctx system bootstrap -q       # just the context directory path\nctx system bootstrap --json   # structured output for automation\n

    The CLAUDE.md template instructs the agent to run this as its first action. Every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: <dir> footer that re-anchors the agent to the correct directory throughout the session.

    This replaces the previous approach of hardcoding .context/ paths in agent instructions.

    See CLI Reference: bootstrap for full details.

    See also: CLI Reference | Context Files | Scratchpad

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/context-files/","level":1,"title":"Context Files","text":"","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#context","level":2,"title":".context/","text":"

    Each context file in .context/ serves a specific purpose.

    Files are designed to be human-readable, AI-parseable, and token-efficient.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#file-overview","level":2,"title":"File Overview","text":"

    The core context files live directly under .context/. They are the substrate ctx reads in priority order when assembling the agent context packet:

    File Purpose Priority CONSTITUTION.md Hard rules that must NEVER be violated 1 (highest) TASKS.md Current and planned work 2 CONVENTIONS.md Project patterns and standards 3 ARCHITECTURE.md System overview and components 4 DECISIONS.md Architectural decisions with rationale 5 LEARNINGS.md Lessons learned, gotchas, tips 6 GLOSSARY.md Domain terms and abbreviations 7 AGENT_PLAYBOOK.md Instructions for AI tools 8 (lowest)

    Two subdirectories under .context/ are implementation details that are user-editable but not part of the priority read order:

    • .context/templates/ — format templates for ctx add decision and ctx add learning. See templates below.
    • .context/steering/ — behavioral rules with YAML frontmatter that get synced into each AI tool's native config. See steering below, and the full Steering files page for the design and workflow.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#outside-context","level":3,"title":"Outside .context/","text":"

    Two other moving parts are often confused with context files but are not under .context/:

    • Skills live in .claude/skills/ (project-local) or are provided by the installed ctx plugin. A typical project doesn't see the plugin's skills at all — they ride with the plugin and are owned by its update cycle. See ctx skill and Skills reference.
    • Hooks are Claude Code PreToolUse/PostToolUse/ UserPromptSubmit entries configured in .claude/settings.json or shipped by a plugin. The ctx plugin registers its own hooks automatically; a typical project does not author hooks by hand, and any local edits to plugin-owned hook files will be overridden on the next plugin update. If you need to customize behavior, edit your own project settings, not the plugin's files. See Hook sequence diagrams.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#read-order-rationale","level":2,"title":"Read Order Rationale","text":"

    The priority order follows a logical progression for AI tools:

    1. CONSTITUTION.md: Inviolable rules first. The AI tool must know what it cannot do before attempting anything.
    2. TASKS.md: Current work items. What the AI tool should focus on.
    3. CONVENTIONS.md: How to write code. Patterns and standards to follow when implementing tasks.
    4. ARCHITECTURE.md: System structure. Understanding of components and boundaries before making changes.
    5. DECISIONS.md: Historical context. Why things are the way they are, to avoid re-debating settled decisions.
    6. LEARNINGS.md: Gotchas and tips. Lessons from past work that inform the current implementation.
    7. GLOSSARY.md: Reference material. Domain terms and abbreviations for lookup as needed.
    8. AGENT_PLAYBOOK.md: Meta instructions last. How to use this context system itself. Loaded last because the agent should understand the content (rules, tasks, patterns) before the operating manual.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#constitutionmd","level":2,"title":"CONSTITUTION.md","text":"

    Purpose: Define hard invariants: Rules that must NEVER be violated, regardless of the task.

    AI tools read this first and should refuse tasks that violate these rules.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure","level":3,"title":"Structure","text":"
    # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these, the task \nis wrong.\n\n## Security Invariants\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never store customer/user data in context files\n* [ ] Never disable security linters without documented exception\n\n## Quality Invariants\n\n* [ ] All code must pass tests before commit\n* [ ] No `any` types in TypeScript without documented reason\n* [ ] No TODO comments in main branch (*move to `TASKS.md`*)\n\n## Process Invariants\n\n* [ ] All architectural changes require a decision record\n* [ ] Breaking changes require version bump\n* [ ] Generated files are never committed\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines","level":3,"title":"Guidelines","text":"
    • Keep rules minimal and absolute
    • Each rule should be enforceable (can verify compliance)
    • Use checkbox format for clarity
    • Never compromise on these rules
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tasksmd","level":2,"title":"TASKS.md","text":"

    Purpose: Track current work, planned work, and blockers.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_1","level":3,"title":"Structure","text":"

    Tasks are organized by Phase: logical groupings that preserve order and enable replay.

    Tasks stay in their Phase permanently; status is tracked via checkboxes and inline tags.

    # Tasks\n\n## Phase 1: Initial Setup\n\n* [x] Set up project structure\n* [x] Configure linting and formatting\n* [ ] Add CI/CD pipeline `#in-progress`\n\n## Phase 2: Core Features\n\n* [ ] Implement user authentication `#priority:high`\n* [ ] Add API rate limiting `#priority:medium`\n  * Blocked by: Need to finalize auth first\n\n## Backlog\n\n* [ ] Performance optimization `#priority:low`\n* [ ] Add metrics dashboard `#priority:deferred`\n

    Key principles:

    • Tasks never move between sections: mark as [x] or [-] in place
    • Use #in-progress inline tag to indicate current work
    • Phase headers provide structure and replay order
    • Backlog section for unscheduled work
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tags","level":3,"title":"Tags","text":"

    Use inline backtick-wrapped tags for metadata:

    Tag Values Purpose #priorityhigh, medium, low Task urgency #areacore, cli, docs, tests Codebase area #estimate1h, 4h, 1d Time estimate (optional) #in-progress (none) Currently being worked on

    Lifecycle tags (for session correlation):

    Tag Format When to add #addedYYYY-MM-DD-HHMMSS Auto-added by ctx add task#startedYYYY-MM-DD-HHMMSS When beginning work on the task

    These timestamps help correlate tasks with session files and track which session started vs completed work.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-markers","level":3,"title":"Status Markers","text":"Marker Meaning [ ] Pending [x] Completed [-] Skipped (include reason)","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_1","level":3,"title":"Guidelines","text":"
    • Never delete tasks; mark as [x] completed or [-] skipped
    • Never move tasks between sections; use inline tags for status
    • Use ctx task archive periodically to move completed tasks to archive
    • Mark current work with #in-progress inline tag
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#decisionsmd","level":2,"title":"DECISIONS.md","text":"

    Purpose: Record architectural decisions with rationale so they don't get re-debated.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_2","level":3,"title":"Structure","text":"
    # Decisions\n\n## [YYYY-MM-DD] Decision Title\n\n**Status**: Accepted | Superseded | Deprecated\n\n**Context**: What situation prompted this decision?\n\n**Decision**: What was decided?\n\n**Rationale**: Why was this the right choice?\n\n**Consequence**: What are the implications?\n\n**Alternatives Considered**:\n* Alternative A: Why rejected\n* Alternative B: Why rejected\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example","level":3,"title":"Example","text":"
    ## [2025-01-15] Use TypeScript Strict Mode\n\n**Status**: Accepted\n\n**Context**: Starting a new project, need to choose the type-checking level.\n\n**Decision**: Enable TypeScript strict mode with all strict flags.\n\n**Rationale**: Catches more bugs at compile time. Team has experience\nwith strict mode. Upfront cost pays off in reduced runtime errors.\n\n**Consequence**: More verbose type annotations required. Some\nthird-party libraries need type assertions.\n\n**Alternatives Considered**:\n- Basic TypeScript: Rejected because it misses null checks\n- JavaScript with JSDoc: Rejected because tooling support is weaker\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-values","level":3,"title":"Status Values","text":"Status Meaning Accepted Current, active decision Superseded Replaced by newer decision (link to it) Deprecated No longer relevant","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#learningsmd","level":2,"title":"LEARNINGS.md","text":"

    Purpose: Capture lessons learned, gotchas, and tips that shouldn't be forgotten.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_3","level":3,"title":"Structure","text":"
    # Learnings\n\n## Category Name\n\n### Learning Title\n\n**Discovered**: YYYY-MM-DD\n\n**Context**: When/how was this learned?\n\n**Lesson**: What's the takeaway?\n\n**Application**: How should this inform future work?\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example_1","level":3,"title":"Example","text":"
    ## Testing\n\n### Vitest Mocks Must Be Hoisted\n\n**Discovered**: 2025-01-15\n\n**Context**: Tests were failing intermittently when mocking fs module.\n\n**Lesson**: Vitest requires `vi.mock()` calls to be hoisted to the\ntop of the file. Dynamic mocks need `vi.doMock()` instead.\n\n**Application**: Always use `vi.mock()` at file top. Use `vi.doMock()`\nonly when mock needs runtime values.\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#categories","level":3,"title":"Categories","text":"

    Organize learnings by topic:

    • Testing
    • Build & Deploy
    • Performance
    • Security
    • Third-Party Libraries
    • Git and Workflow
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#conventionsmd","level":2,"title":"CONVENTIONS.md","text":"

    Purpose: Document project patterns, naming conventions, and standards.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_4","level":3,"title":"Structure","text":"
    # Conventions\n\n## Naming\n\n* **Files**: kebab-case for all source files\n* **Components**: PascalCase for React components\n* **Functions**: camelCase, verb-first (getUser, parseConfig)\n* **Constants**: SCREAMING_SNAKE_CASE\n\n## Patterns\n\n### Pattern Name\n\n**When to use**: Situation description\n\n**Implementation**:\n// in triple backticks\n// Example code\n\n**Why**: Rationale for this pattern\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_2","level":3,"title":"Guidelines","text":"
    • Include concrete examples
    • Explain the \"why\" not just the \"what\"
    • Keep patterns minimal: Only document what's non-obvious
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#architecturemd","level":2,"title":"ARCHITECTURE.md","text":"

    Purpose: Provide system overview and component relationships.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_5","level":3,"title":"Structure","text":"
    # Architecture\n\n## Overview\n\nBrief description of what the system does and how it's organized.\n\n## Components\n\n### Component Name\n\n**Responsibility**: What this component does\n\n**Dependencies**: What it depends on\n\n**Dependents**: What depends on it\n\n**Key Files**:\n* path/to/file.ts: Description\n\n## Data Flow\n\nDescription or diagram of how data moves through the system.\n\n## Boundaries\n\nWhat's in scope vs out of scope for this codebase.\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_3","level":3,"title":"Guidelines","text":"
    • Keep diagrams simple (Mermaid works well)
    • Focus on boundaries and interfaces
    • Update when major structural changes occur
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#glossarymd","level":2,"title":"GLOSSARY.md","text":"

    Purpose: Define domain terms, abbreviations, and project vocabulary.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_6","level":3,"title":"Structure","text":"
    # Glossary\n\n## Domain Terms\n\n### Term Name\n\n**Definition**: What it means in this project's context\n\n**Not to be confused with**: Similar terms that mean different things\n\n**Example**: How it's used\n\n## Abbreviations\n\n| Abbrev | Expansion                     | Context                |\n|--------|-------------------------------|------------------------|\n| ADR    | Architectural Decision Record | Decision documentation |\n| SUT    | System Under Test             | Testing                |\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_4","level":3,"title":"Guidelines","text":"
    • Define project-specific meanings
    • Clarify potentially ambiguous terms
    • Include abbreviations used in code or docs
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#agent_playbookmd","level":2,"title":"AGENT_PLAYBOOK.md","text":"

    Purpose: Explicit instructions for how AI tools should read, apply, and update context.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#key-sections","level":3,"title":"Key Sections","text":"

    Read Order: Priority order for loading context files

    When to Update: Events that trigger context updates

    How to Avoid Hallucinating Memory: Critical rules:

    1. Never assume: If not in files, you don't know it
    2. Never invent history: Don't claim \"we discussed\" without evidence
    3. Verify before referencing: Search files before citing
    4. When uncertain, say so
    5. Trust files over intuition

    Context Update Commands: Format for automated updates via ctx watch:

    <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"complete\">user auth</context-update>\n<context-update type=\"learning\"\n  context=\"Debugging hooks\"\n  lesson=\"Hooks receive JSON via stdin\"\n  application=\"Parse JSON stdin with the host language\"\n>Hook Input Format</context-update>\n<context-update type=\"decision\"\n  context=\"Need a caching layer\"\n  rationale=\"Redis is fast and team has experience\"\n  consequence=\"Must provision Redis infrastructure\"\n>Use Redis for caching</context-update>\n

    See Integrations for full documentation.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#templates","level":2,"title":"templates/","text":"

    Location: .context/templates/. Status: implementation detail, user-editable.

    Purpose: Format templates for ctx add decision and ctx add learning. These control the structure of new entries appended to DECISIONS.md and LEARNINGS.md.

    ctx init deploys two starter templates:

    • decision.md — sections: Context, Rationale, Consequence
    • learning.md — sections: Context, Lesson, Application
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing","level":3,"title":"Customizing","text":"

    Edit the templates directly. Changes take effect immediately on the next ctx add command. For example, to add a \"References\" section to all new decisions, edit .context/templates/decision.md.

    Templates are committed to git, so customizations are shared with the team.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#steering","level":2,"title":"steering/","text":"

    Location: .context/steering/. Status: implementation detail, user-editable.

    Purpose: Behavioral rules with YAML frontmatter that tell an AI assistant how to behave when a specific kind of prompt arrives. Unlike the core context files (which describe what the project is), steering files describe what to do and ride alongside the prompt through the AI tool's native rule pipeline (Claude Code, Cursor, Kiro, Cline). ctx matches steering files to prompts and syncs them out to each tool's config.

    ctx init scaffolds four foundation files:

    • product.md — who this project serves and why
    • tech.md — the technology stack and its constraints
    • structure.md — how the code is organized
    • workflow.md — how work moves through the system

    Each file carries YAML frontmatter describing when it applies (always, matching prompts, or manually referenced) and what tool scope it covers. The foundation files use inclusion: always by default so every session picks them up.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing_1","level":3,"title":"Customizing","text":"

    Edit the files directly. Add your own steering files with ctx steering add, preview the match set with ctx steering preview, and run ctx steering sync to push them into each AI tool's config after changes. Steering files are committed to git, so they're shared with the team.

    For the design rationale, the full inclusion/priority model, and the end-to-end sync workflow, see the dedicated Steering files page.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#parsing-rules","level":2,"title":"Parsing Rules","text":"

    All context files follow these conventions:

    1. Headers define structure: # for title, ## for sections, ### for items
    2. Bold keys for fields: **Key**: followed by value
    3. Code blocks are literal: Never parse code block content as structure
    4. Lists are ordered: Items appear in priority/chronological order
    5. Tags are inline: Backtick-wrapped tags like #priority:high
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#further-reading","level":2,"title":"Further Reading","text":"
    • Refactoring with Intent: how persistent context prevents drift during refactoring sessions
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#token-efficiency","level":2,"title":"Token Efficiency","text":"

    Keep context files concise:

    • Use abbreviations in tags, not prose;
    • Omit obvious words (\"The,\" \"This\");
    • Prefer bullet points over paragraphs;
    • Keep examples minimal but illustrative;
    • Archive old completed items periodically.

    Next Up: Prompting Guide →: effective prompts for AI sessions with ctx

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/contributing/","level":1,"title":"Contributing","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#development-setup","level":2,"title":"Development Setup","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#prerequisites","level":3,"title":"Prerequisites","text":"
    • Go (version defined in go.mod)
    • Claude Code
    • Git
    • GNU Make
    • Zensical
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#1-fork-or-clone-the-repository","level":3,"title":"1. Fork (or Clone) the Repository","text":"
    # Fork on GitHub, then:\ngit clone https://github.com/<you>/ctx.git\ncd ctx\n\n# Or, if you have push access:\ngit clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#2-build-and-install-the-binary","level":3,"title":"2. Build and Install the Binary","text":"
    make build\nsudo make install\n

    This compiles the ctx binary and places it in /usr/local/bin/.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#3-install-the-plugin-from-your-local-clone","level":3,"title":"3. Install the Plugin from Your Local Clone","text":"

    The repository ships a Claude Code plugin under internal/assets/claude/. Point Claude Code at your local copy so that skills and hooks reflect your working tree: no reinstall needed after edits:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace
    4. Enter the absolute path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: it points Claude Code to the actual plugin in internal/assets/claude);
    5. Back in /plugin, select Install and choose ctx.

    Claude Code Caches Plugin Files

    Even though the marketplace points at a directory on disk, Claude Code caches skills and hooks. After editing files under internal/assets/claude/, clear the cache and restart:

    make plugin-reload   # then restart Claude Code\n

    See Skill or Hook Changes for details.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#4-verify","level":3,"title":"4. Verify","text":"
    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

    You should see the ctx plugin listed, sourced from your local path.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#project-layout","level":2,"title":"Project Layout","text":"
    ctx/\n├── cmd/ctx/            # CLI entry point\n├── internal/\n│   ├── assets/claude/  # ← Claude Code plugin (skills, hooks)\n│   ├── bootstrap/      # Project initialization templates\n│   ├── claude/         # Claude Code integration helpers\n│   ├── cli/            # Command implementations\n│   ├── config/         # Configuration loading\n│   ├── context/        # Core context logic\n│   ├── crypto/         # Scratchpad encryption\n│   ├── drift/          # Drift detection\n│   ├── index/          # Context file indexing\n│   ├── journal/        # Journal site generation\n│   ├── memory/         # Memory bridge (discover, mirror, import, publish)\n│   ├── notify/         # Webhook notifications\n│   ├── rc/             # .ctxrc parsing\n│   ├── journal/        # Session history, parsers, and state\n│   ├── sysinfo/        # System resource monitoring\n│   ├── task/           # Task management\n│   └── validation/     # Input validation\n├── .claude/\n│   └── skills/         # Dev-only skills (not distributed)\n├── assets/             # Static assets (banners, logos)\n├── docs/               # Documentation site source\n├── editors/            # Editor extensions (VS Code)\n├── examples/           # Example configurations\n├── hack/               # Build scripts\n├── specs/              # Feature specifications\n└── .context/           # ctx's own context (dogfooding)\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skills-two-directories-one-rule","level":3,"title":"Skills: Two Directories, One Rule","text":"Directory What lives here Distributed to users? internal/assets/claude/skills/ The 39 ctx-* skills that ship with the plugin Yes .claude/skills/ Dev-only skills (release, QA, backup, etc.) No

    internal/assets/claude/skills/ is the single source of truth for user-facing skills. If you are adding or modifying a ctx-* skill, edit it there.

    .claude/skills/ holds skills that only make sense inside this repository (release automation, QA checks, backup scripts). These are never distributed to users.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#dev-only-skills-reference","level":4,"title":"Dev-Only Skills Reference","text":"Skill When to use /_ctx-absorb Merge deltas from a parallel worktree or separate checkout /_ctx-audit Detect code-level drift after YOLO sprints or before releases /_ctx-backup Backup context and Claude data to SMB share /_ctx-qa Run QA checks before committing /_ctx-release Run the full release process /_ctx-release-notes Generate release notes for dist/RELEASE_NOTES.md/_ctx-alignment-audit Audit doc claims against agent instructions /_ctx-update-docs Check docs/code consistency after changes /_ctx-command-audit Audit CLI surface after renames, moves, or deletions

    Six skills previously in this list have been promoted to bundled plugin skills and are now available to all ctx users: /ctx-brainstorm, /ctx-link-check, /ctx-permission-sanitize, /ctx-skill-create, /ctx-spec.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#how-to-add-things","level":2,"title":"How to Add Things","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-cli-command","level":3,"title":"Adding a New CLI Command","text":"
    1. Create a package under internal/cli/<name>/ with doc.go, cmd.go, and run.go;
    2. Implement Cmd() *cobra.Command as the entry point;
    3. Add Use* and DescKey* constants in internal/config/embed/cmd/<name>.go;
    4. Add command descriptions in internal/assets/commands/commands.yaml;
    5. Add examples in internal/assets/commands/examples.yaml;
    6. Add flag descriptions in internal/assets/commands/flags.yaml;
    7. Register the command in internal/bootstrap/group.go (add import + entry in the appropriate group function);
    8. Create an output package at internal/write/<name>/ for all user-facing output (see Package Taxonomy);
    9. Create error constructors at internal/err/<name>/ for domain-specific errors;
    10. Add tests in the same package (<name>_test.go);
    11. Add a doc page at docs/cli/<name>.md and update docs/cli/index.md;
    12. Add the page to zensical.toml nav.

    Pattern to follow: internal/cli/pad/pad.go (parent with subcommands) or internal/cli/drift/ (single command).

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#package-taxonomy","level":3,"title":"Package Taxonomy","text":"

    ctx separates concerns into a strict package taxonomy. Knowing where things go prevents code review friction and keeps the AST lint tests happy.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#output-internalwrite","level":4,"title":"Output: internal/write/","text":"

    Every CLI command's user-facing output lives in its own sub-package under internal/write/<domain>/. Output functions accept *cobra.Command and call cmd.Println(...) — never fmt.Print* directly. All text strings are loaded from YAML via desc.Text(text.DescKey*), never inline.

    internal/write/add/add.go       # output for ctx add\ninternal/write/stat/stat.go     # output for ctx usage\ninternal/write/resource/        # output for ctx sysinfo\n

    Exception: write/rc/ writes to os.Stderr because rc loads before cobra is initialized.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#errors-internalerr","level":4,"title":"Errors: internal/err/","text":"

    Domain-specific error constructors live under internal/err/<domain>/. Each package mirrors the write structure. Functions return error (never custom error types) and load messages from YAML via desc.Text(text.DescKey*).

    internal/err/add/add.go         # errors for ctx add\ninternal/err/config/config.go   # errors for configuration\ninternal/err/cli/cli.go         # errors for CLI argument validation\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#config-constants-internalconfig","level":4,"title":"Config Constants: internal/config/","text":"

    Pure-constant leaf packages with zero internal dependencies (stdlib only). Over 60 sub-packages, organized by domain. See internal/config/README.md for the full decision tree.

    What you're adding Where it goes File names, extensions, paths config/file/, config/dir/ Regex patterns config/regex/ CLI flag names (--flag-name) config/flag/flag.go Flag description YAML keys config/embed/flag/<cmd>.go Command Use/DescKey strings config/embed/cmd/<cmd>.go User-facing text YAML keys config/embed/text/<domain>.go Time durations, thresholds config/<domain>/","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#the-assets-pipeline","level":4,"title":"The Assets Pipeline","text":"

    User-facing text flows through a three-level chain:

    1. Go constant (config/embed/text/) defines a string key: DescKeyWriteAddedTo = \"write.added-to\"
    2. Call site resolves it: desc.Text(text.DescKeyWriteAddedTo)
    3. YAML (internal/assets/commands/text/write.yaml) holds the actual text: write.added-to: { short: \"Added to %s\" }

    The same pattern applies to command descriptions (commands.yaml), flag descriptions (flags.yaml), and examples (examples.yaml). The TestDescKeyYAMLLinkage test verifies every constant resolves to a non-empty YAML value.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-session-parser","level":3,"title":"Adding a New Session Parser","text":"

    The journal system uses a SessionParser interface. To add support for a new AI tool (e.g. Aider, Cursor):

    1. Create internal/journal/parser/<tool>.go;
    2. Implement parsing logic that returns []*Session;
    3. Register the parser in FindSessions() / FindSessionsForCWD();
    4. Use config.Tool* constants for the tool identifier;
    5. Add test fixtures and parser tests.

    Pattern to follow: the Claude Code JSONL parser in internal/journal/parser/.

    Multilingual Session Headers

    The Markdown parser recognizes session header prefixes configured via session_prefixes in .ctxrc (default: Session:). To support a new language, users add a prefix to their .ctxrc - no code change needed. New parser implementations can use rc.SessionPrefixes() if they also need prefix-based header detection.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-bundled-skill","level":3,"title":"Adding a Bundled Skill","text":"
    1. Create internal/assets/claude/skills/<skill-name>/SKILL.md;
    2. Follow the skill format: trigger, negative triggers, steps, quality gate;
    3. Run make plugin-reload and restart Claude Code to test;
    4. Add a Skill entry to .claude-plugin/plugin.json if user-invocable;
    5. Document in docs/reference/skills.md.

    Pattern to follow: any skill in internal/assets/claude/skills/ctx-status/.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#test-expectations","level":3,"title":"Test Expectations","text":"
    • Unit tests: colocated with source (foo.gofoo_test.go);
    • Test helpers: use t.Helper() so failures point to callers;
    • HOME isolation: use t.TempDir() + t.Setenv(\"HOME\", ...) for tests that touch ~/.claude/ or ~/.ctx/;
    • rc.Reset(): call after os.Chdir in tests that change working directory (rc caches on first access);
    • No network: all tests run offline, use fixtures.

    Run make test before submitting. Target: no failures, no skips.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#day-to-day-workflow","level":2,"title":"Day-to-Day Workflow","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#go-code-changes","level":3,"title":"Go Code Changes","text":"

    After modifying Go source files, rebuild and reinstall:

    make build && sudo make install\n

    The ctx binary is statically compiled. There is no hot reload. You must rebuild for Go changes to take effect.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skill-or-hook-changes","level":3,"title":"Skill or Hook Changes","text":"

    Edit files under internal/assets/claude/skills/ or internal/assets/claude/hooks/.

    Claude Code caches plugin files, so edits aren't picked up automatically.

    Clear the cache and restart:

    make plugin-reload   # nukes ~/.claude/plugins/cache/activememory-ctx/\n# then restart Claude Code\n

    The plugin will be re-installed from your local marketplace on startup. No version bump is needed during development.

    Version Bumps Are for Releases, Not Iteration

    Only bump VERSION, plugin.json, and marketplace.json when cutting a release. During development, make plugin-reload is all you need.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

    The repo ships two .ctxrc source profiles. The working copy (.ctxrc) is gitignored and swapped between them:

    File Purpose .ctxrc.base Golden baseline: all defaults, no logging .ctxrc.dev Dev profile: notify events enabled, verbose logging .ctxrc Working copy (gitignored: copied from one of the above)

    Use ctx commands to switch:

    ctx config switch dev      # switch to dev profile\nctx config switch base     # switch to base profile\nctx config status          # show which profile is active\n

    After cloning, run ctx config switch dev to get started with full logging.

    See Configuration for the full .ctxrc option reference.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#backups","level":3,"title":"Backups","text":"

    Back up project context and global Claude Code data with:

    ctx backup                    # both project + global (default)\nctx backup --scope project    # .context/, .claude/, ideas/ only\nctx backup --scope global     # ~/.claude/ only\n

    Archives are saved to /tmp/. When CTX_BACKUP_SMB_URL is configured, they are also copied to an SMB share. See CLI Reference: backup for details.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-tests","level":3,"title":"Running Tests","text":"
    make test   # fast: all tests\nmake audit  # full: fmt + vet + lint + drift + docs + test\nmake smoke  # build + run basic commands end-to-end\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-the-docs-site-locally","level":3,"title":"Running the Docs Site Locally","text":"
    make site-setup  # one-time: install zensical via pipx\nmake site-serve  # serve at localhost\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#submitting-changes","level":2,"title":"Submitting Changes","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#before-you-start","level":3,"title":"Before You Start","text":"
    1. Check existing issues to avoid duplicating effort;
    2. For large changes, open an issue first to discuss the approach;
    3. Read the specs in specs/ for design context.
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#pull-request-process","level":3,"title":"Pull Request Process","text":"

    Respect the maintainers' time and energy: Keep your pull requests isolated and strive to minimze code changes.

    If you Pull Request solves more than one distinct issues, it's better to create separate pull requests instead of sending them in one large bundle.

    1. Create a feature branch: git checkout -b feature/my-feature;
    2. Make your changes;
    3. Run make audit to catch issues early;
    4. Commit with a clear message;
    5. Push and open a pull request.

    Audit Your Code Before Submitting

    Run make audit before submitting:

    make audit covers formatting, vetting, linting, drift checks, doc consistency, and tests in one pass.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#commit-messages","level":3,"title":"Commit Messages","text":"

    Following conventional commits is recommended but not required:

    Types: feat, fix, docs, test, refactor, chore

    Examples:

    • feat(cli): add ctx export command
    • fix(drift): handle missing files gracefully
    • docs: update installation instructions
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-style","level":3,"title":"Code Style","text":"
    • Follow Go conventions (gofmt, go vet);
    • Keep functions focused and small;
    • Add tests for new functionality;
    • Handle errors explicitly — use descriptive names (readErr, writeErr) not repeated err;
    • No magic strings — all repeated literals go in internal/config/;
    • Output goes through internal/write/ packages, not fmt.Print*;
    • Errors go through internal/err/ constructors, not inline fmt.Errorf;
    • See Package Taxonomy and .context/CONVENTIONS.md for the full reference.
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

    A clear context requires respectful collaboration.

    ctx follows the Contributor Covenant.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#boring-legal-stuff","level":2,"title":"Boring Legal Stuff","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#developer-certificate-of-origin-dco","level":3,"title":"Developer Certificate of Origin (DCO)","text":"

    By contributing, you agree to the Developer Certificate of Origin.

    All commits must be signed off:

    git commit -s -m \"feat: add new feature\"\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#license","level":3,"title":"License","text":"

    Contributions are licensed under the Apache 2.0 License.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/faq/","level":1,"title":"FAQ","text":"","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-markdown","level":2,"title":"Why Markdown?","text":"

    Markdown is human-readable, version-controllable, and tool-agnostic. Every AI model can parse it natively. Every developer can read it in a terminal, a browser, or a code review. There's no schema to learn, no binary format to decode, no vendor lock-in. You can inspect your context with cat, diff it with git diff, and review it in a PR.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-ctx-work-offline","level":2,"title":"Does ctx Work Offline?","text":"

    Yes. ctx is completely local. It reads and writes files on disk, generates context packets from local state, and requires no network access. The only feature that touches the network is the optional webhook notifications hook, which you have to explicitly configure.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-gets-committed-to-git","level":2,"title":"What Gets Committed to Git?","text":"

    The .context/ directory: yes, commit it. That's the whole point. Team members and AI agents read the same context files.

    What not to commit:

    • .ctx.key: your encryption key. Stored at ~/.ctx/.ctx.key, never in the repo. ctx init handles this automatically.
    • journal/ and logs/: generated data, potentially large. ctx init adds these to .gitignore.
    • scratchpad.enc: your choice. It's encrypted, so it's safe to commit if you want shared scratchpad state. See Scratchpad for details.
    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#how-big-should-my-token-budget-be","level":2,"title":"How Big Should My Token Budget Be?","text":"

    The default is 8000 tokens, which works well for most projects. Configure it via .ctxrc or the CTX_TOKEN_BUDGET environment variable:

    # In .ctxrc\ntoken_budget = 12000\n\n# Or as an environment variable\nexport CTX_TOKEN_BUDGET=12000\n\n# Or per-invocation\nctx agent --budget 4000\n

    Higher budgets include more context but cost more tokens per request. Lower budgets force sharper prioritization: ctx drops lower-priority content first, so CONSTITUTION and TASKS always make the cut.

    See Configuration for all available settings.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-not-a-database","level":2,"title":"Why Not a Database?","text":"

    Files are inspectable, diffable, and reviewable in pull requests. You can grep them, cat them, pipe them through jq or awk. They work with every version control system and every text editor.

    A database would add a dependency, require migrations, and make context opaque. The design bet is that context should be as visible and portable as the code it describes.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-it-work-with-tools-other-than-claude-code","level":2,"title":"Does It Work with Tools Other than Claude Code?","text":"

    Yes. ctx agent outputs a context packet that any AI tool can consume: paste it into ChatGPT, Cursor, Copilot, Aider, or anything else that accepts text input.

    Claude Code gets first-class integration via the ctx plugin (hooks, skills, automatic context loading). VS Code Copilot Chat has a dedicated ctx extension. Other tools integrate via generated instruction files or manual pasting.

    See Integrations for tool-specific setup, including the multi-tool recipe.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#can-i-use-ctx-on-an-existing-project","level":2,"title":"Can I Use ctx on an Existing Project?","text":"

    Yes. Run ctx init in any repo and it creates .context/ with template files. Start recording decisions, tasks, and conventions as you work. Context grows naturally; you don't need to backfill everything on day one.

    See Getting Started for the full setup flow, or Joining a ctx Project if someone else already initialized it.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-happens-when-context-files-get-too-big","level":2,"title":"What Happens When Context Files Get Too Big?","text":"

    Token budgeting handles this automatically. ctx agent prioritizes content by file priority (CONSTITUTION first, GLOSSARY last) and trims lower-priority entries when the budget is tight.

    For manual maintenance, ctx compact archives completed tasks and old entries, keeping active context lean. You can also run ctx task archive to move completed tasks out of TASKS.md.

    The goal is to keep context files focused on current state. Historical entries belong in git history or the archive.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#is-context-meant-to-be-shared","level":2,"title":"Is .context/ Meant to Be Shared?","text":"

    Yes. Commit it to your repo. Every team member and every AI agent reads the same files. That's the mechanism for shared memory: decisions made in one session are visible in the next, regardless of who (or what) starts it.

    The only per-user state is the encryption key (~/.ctx/.ctx.key) and the optional scratchpad. Everything else is team-shared by design.

    Related:

    • Getting Started - installation and first setup
    • Configuration - .ctxrc, environment variables, and defaults
    • Context Files - what each file does and how to use it
    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/first-session/","level":1,"title":"Your First Session","text":"

    Here's what a complete first session looks like, from initialization to the moment your AI cites your project context back to you.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-1-initialize-your-project","level":2,"title":"Step 1: Initialize Your Project","text":"

    Run ctx init in your project root:

    cd your-project\nctx init\n

    Sample output:

    Context initialized in .context/\n\n  ✓ CONSTITUTION.md\n  ✓ TASKS.md\n  ✓ DECISIONS.md\n  ✓ LEARNINGS.md\n  ✓ CONVENTIONS.md\n  ✓ ARCHITECTURE.md\n  ✓ GLOSSARY.md\n  ✓ AGENT_PLAYBOOK.md\n\nSetting up encryption key...\n  ✓ ~/.ctx/.ctx.key\n\nClaude Code plugin (hooks + skills):\n  Install: claude /plugin marketplace add ActiveMemory/ctx\n  Then:    claude /plugin install ctx@activememory-ctx\n\nNext steps:\n  1. Edit .context/TASKS.md to add your current tasks\n  2. Run 'ctx status' to see context summary\n  3. Run 'ctx agent' to get AI-ready context packet\n

    This created your .context/ directory with template files.

    For Claude Code, install the ctx plugin to get automatic hooks and skills.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-2-populate-your-context","level":2,"title":"Step 2: Populate Your Context","text":"

    Add a task and a decision: These are the entries your AI will remember:

    ctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to TASKS.md\n\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to DECISIONS.md\n

    These entries are what the AI will recall in future sessions. You don't need to populate everything now: Context grows naturally as you work.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-3-check-your-context","level":2,"title":"Step 3: Check Your Context","text":"
    ctx status\n

    Sample output:

    Context Status\n====================\n\nContext Directory: .context/\nTotal Files: 8\nToken Estimate: 1,247 tokens\n\nFiles:\n  ✓ CONSTITUTION.md (loaded)\n  ✓ TASKS.md (1 items)\n  ✓ DECISIONS.md (1 items)\n  ○ LEARNINGS.md (empty)\n  ✓ CONVENTIONS.md (loaded)\n  ✓ ARCHITECTURE.md (loaded)\n  ✓ GLOSSARY.md (loaded)\n  ✓ AGENT_PLAYBOOK.md (loaded)\n\nRecent Activity:\n  - TASKS.md modified 2 minutes ago\n  - DECISIONS.md modified 1 minute ago\n

    Notice the token estimate: This is how much context your AI will load.

    The next to LEARNINGS.md means it's still empty; it will fill in as you capture lessons during development.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-4-start-an-ai-session","level":2,"title":"Step 4: Start an AI Session","text":"

    With Claude Code (and the ctx plugin), start every session with:

    /ctx-remember\n

    This loads your context and presents a structured readback so you can confirm the agent knows what is going on. Context also loads automatically via hooks, but the explicit ceremony gives you a readback to verify.

    Steering Files Fire Automatically

    If you edited the four foundation files scaffolded by ctx init (.context/steering/product.md, tech.md, structure.md, workflow.md), their inclusion: always rules are prepended to every tool call via the plugin's PreToolUse hook — no /ctx-remember needed, no MCP call. Edit a file, save, and the next tool call in Claude Code picks it up. See Steering files for details on the inclusion modes.

    Using VS Code?

    With VS Code Copilot Chat (and the ctx extension), type @ctx /agent in chat to load your context packet, or @ctx /status to check your project context. Run ctx setup copilot --write once to generate .github/copilot-instructions.md for automatic context loading.

    If you are not using Claude Code, generate a context packet for your AI tool:

    ctx agent --budget 8000\n

    Sample output:

    # Context Packet\nGenerated: 2026-02-14T15:30:45Z | Budget: 8000 tokens | Used: ~2450\n\n## Read These Files (in order)\n1. .context/CONSTITUTION.md\n2. .context/TASKS.md\n3. .context/CONVENTIONS.md\n...\n\n## Current Tasks\n- [ ] Implement user authentication\n- [ ] Add rate limiting to API endpoints\n\n## Key Conventions\n- Use gofmt for formatting\n- Path construction uses filepath.Join\n\n## Recent Decisions\n## [2026-02-14-120000] Use PostgreSQL for the primary database\n\n**Context**: Evaluated PostgreSQL, MySQL, and SQLite...\n**Rationale**: PostgreSQL offers better JSON support...\n\n## Key Learnings\n## [2026-02-14-100000] Connection pool sizing matters\n\n**Context**: Hit connection limits under load...\n**Lesson**: Default pool size of 10 is too low for concurrent requests...\n\n## Also Noted\n- Use JWT for session management\n- Always validate input at API boundary\n

    Paste this output into your AI tool's system prompt or conversation start.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-5-verify-it-works","level":2,"title":"Step 5: Verify It Works","text":"

    Ask your AI: \"What are our current tasks?\"

    A working setup produces a response like:

    Based on the project context, you have one active task:\n\n- **Implement user authentication** (pending)\n\nThere's also a recent architectural decision to **use PostgreSQL for\nthe primary database**, chosen for its ACID compliance and JSON support.\n\nWant me to start on the authentication task?\n

    That's the success moment:

    The AI is citing your exact context entries from Step 2, not hallucinating or asking you to re-explain.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-gets-created","level":2,"title":"What Gets Created","text":"
    .context/\n├── CONSTITUTION.md     # Hard rules: NEVER violate these\n├── TASKS.md            # Current and planned work\n├── CONVENTIONS.md      # Project patterns and standards\n├── ARCHITECTURE.md     # System overview\n├── DECISIONS.md        # Architectural decisions with rationale\n├── LEARNINGS.md        # Lessons learned, gotchas, tips\n├── GLOSSARY.md         # Domain terms and abbreviations\n└── AGENT_PLAYBOOK.md   # How AI tools should use this\n

    Claude Code integration (hooks + skills) is provided by the ctx plugin: See Integrations/Claude Code.

    VS Code Copilot Chat integration is provided by the ctx extension: See Integrations/VS Code.

    See Context Files for detailed documentation of each file.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-to-gitignore","level":2,"title":"What to .gitignore","text":"

    Rule of Thumb

    • If it's knowledge (decisions, tasks, learnings, conventions), commit it.
    • If it's generated output, raw session data, or a secret, .gitignore it.

    Commit your .context/ knowledge files: that's the whole point.

    You should .gitignore the generated and sensitive paths:

    # Journal data (large, potentially sensitive)\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Hook logs (machine-specific)\n.context/logs/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

    ctx init Patches Your .Gitignore for You

    ctx init automatically adds these entries to your .gitignore.

    Review the additions with cat .gitignore after init.

    See also:

    • Security Considerations
    • Scratchpad Encryption
    • Session Journal

    Next Up: Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/getting-started/","level":1,"title":"Getting Started","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"

    ctx does not require git, but using version control with your .context/ directory is strongly recommended:

    AI sessions occasionally modify or overwrite context files inadvertently. With git, the AI can check history and restore lost content: Without it, the data is gone.

    Also, several ctx features (journal changelog, blog generation) also use git history directly.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#installation","level":2,"title":"Installation","text":"

    Every setup starts with the ctx binary: the CLI tool itself.

    If you use Claude Code, you also install the ctx plugin, which adds hooks (context autoloading, persistence nudges) and 25+ /ctx-* skills. For other AI tools, ctx integrates via generated instruction files or manual context pasting: see Integrations for tool-specific setup.

    Pick one of the options below to install the binary. Claude Code users should also follow the plugin steps included in each option.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-1-build-from-source-recommended","level":3,"title":"Option 1: Build from Source (Recommended)","text":"

    Requires Go (version defined in go.mod) and Claude Code.

    git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\nmake build\nsudo make install\n

    Install the Claude Code plugin from your local clone:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace
    4. Enter the path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: It points Claude Code to the actual plugin in internal/assets/claude)
    5. Back in /plugin, select Install and choose ctx

    This points Claude Code at the plugin source on disk. Changes you make to hooks or skills take effect immediately: No reinstall is needed.

    Local Installs Need Manual Enablement

    Unlike marketplace installs, local plugin installs are not auto-enabled globally. The plugin will only work in projects that explicitly enable it. Run ctx init in each project (it auto-enables the plugin), or add the entry to ~/.claude/settings.json manually:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Verify:

    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

    Use the Source, Luke

    Building from source gives you the latest features and bug fixes.

    Since ctx is predominantly a developer tool, this is the recommended approach:

    You get the freshest code, can inspect what you are installing, and the plugin stays in sync with the binary.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-2-binary-download-marketplace","level":3,"title":"Option 2: Binary Download + Marketplace","text":"

    Pre-built binaries are available from the releases page.

    Linux (x86_64)Linux (ARM64)macOS (Apple Silicon)macOS (Intel)Windows
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64\nchmod +x ctx-0.8.1-linux-amd64\nsudo mv ctx-0.8.1-linux-amd64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-arm64\nchmod +x ctx-0.8.1-linux-arm64\nsudo mv ctx-0.8.1-linux-arm64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-arm64\nchmod +x ctx-0.8.1-darwin-arm64\nsudo mv ctx-0.8.1-darwin-arm64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-amd64\nchmod +x ctx-0.8.1-darwin-amd64\nsudo mv ctx-0.8.1-darwin-amd64 /usr/local/bin/ctx\n

    Download ctx-0.8.1-windows-amd64.exe from the releases page and add it to your PATH.

    Claude Code users: install the plugin from the marketplace:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace;
    4. Enter ActiveMemory/ctx;
    5. Back in /plugin, select Install and choose ctx.

    Other tool users: see Integrations for tool-specific setup (Cursor, Copilot, Aider, Windsurf, etc.).

    Verify the Plugin Is Enabled

    After installing, confirm the plugin is enabled globally. Check ~/.claude/settings.json for an enabledPlugins entry. If missing, run ctx init in your project (it auto-enables the plugin), or add it manually:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Verify:

    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed (Claude Code only)\n
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#verifying-checksums","level":4,"title":"Verifying Checksums","text":"

    Each binary has a corresponding .sha256 checksum file. To verify your download:

    # Download the checksum file\ncurl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64.sha256\n\n# Verify the binary\nsha256sum -c ctx-0.8.1-linux-amd64.sha256\n

    On macOS, use shasum -a 256 -c instead of sha256sum -c.

    Plugin Details

    After installation (either option) you get:

    • Context autoloading: ctx agent runs on every tool use (with cooldown)
    • Persistence nudges: reminders to capture learnings and decisions
    • Post-commit hooks: nudge context capture after git commit
    • Context size monitoring: alerts as sessions grow large
    • Project skills: /ctx-status, /ctx-task-add, /ctx-history, and more

    See Integrations for the full hook and skill reference.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#quick-start","level":2,"title":"Quick Start","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#1-initialize-context","level":3,"title":"1. Initialize Context","text":"
    cd your-project\nctx init\n

    This creates a .context/ directory with template files and an encryption key at ~/.ctx/ for the encrypted scratchpad. For Claude Code, install the ctx plugin for automatic hooks and skills.

    ctx init also scaffolds four foundation steering files in .context/steering/ — these are behavioral-rule templates that tell your AI how to act on your project:

    File What it captures product.md Product context, goals, and target users tech.md Technology stack, constraints, key dependencies structure.md Project structure and directory conventions workflow.md Development workflow and process rules

    Each file starts with a self-documenting HTML comment explaining the three inclusion modes (always / auto / manual), priority, and tool scoping. The defaults are set to inclusion: always and priority: 10 — they fire on every AI tool call until you edit them.

    You should open each of these files and replace the placeholder content with your project's actual rules. Running ctx init again won't clobber your edits — existing files are left alone. To opt out entirely, use ctx init --no-steering-init.

    See Writing Steering Files for the full walkthrough, or ctx steering for the command reference.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#2-check-status","level":3,"title":"2. Check Status","text":"
    ctx status\n

    Shows context summary: files present, token estimate, and recent activity.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3-start-using-with-ai","level":3,"title":"3. Start Using with AI","text":"

    With Claude Code (and the ctx plugin installed), context loads automatically via hooks.

    With VS Code Copilot Chat, install the ctx extension and use @ctx /status, @ctx /agent, and other slash commands directly in chat. Run ctx setup copilot --write to generate .github/copilot-instructions.md for automatic context loading.

    For other tools, paste the output of:

    ctx agent --budget 8000\n
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3b-set-up-for-your-ai-tool","level":3,"title":"3B. Set Up for Your AI Tool","text":"

    If you use an MCP-compatible tool, generate the integration config with ctx setup:

    KiroCursorCline
    ctx setup kiro --write\n# Creates .kiro/settings/mcp.json and syncs steering files\n
    ctx setup cursor --write\n# Creates .cursor/mcp.json and syncs steering files\n
    ctx setup cline --write\n# Creates .vscode/mcp.json and syncs steering files\n

    This registers the ctx MCP server and syncs any steering files into the tool's native format. Re-run after adding or changing steering files.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#4-verify-it-works","level":3,"title":"4. Verify It Works","text":"

    Ask your AI: \"Do you remember?\"

    It should cite specific context: current tasks, recent decisions, or previous session topics.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#5-set-up-companion-tools-highly-recommended","level":3,"title":"5. Set Up Companion Tools (Highly Recommended)","text":"

    ctx works on its own, but two companion MCP servers unlock significantly better agent behavior. The investment is small and the benefits compound over sessions:

    • Gemini Search — grounded web search with citations. Skills like /ctx-code-review and /ctx-explain use it for up-to-date documentation lookups instead of relying on training data.

    • GitNexus — code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Skills like /ctx-refactor and /ctx-code-review use it for impact analysis and dependency awareness.

    # Index your project for GitNexus (run once, then after major changes)\nnpx gitnexus analyze\n

    Both are optional MCP servers: if they are not connected, skills degrade gracefully to built-in capabilities. See Companion Tools for setup details and verification.

    Next Up:

    • Your First Session →: a step-by-step walkthrough from ctx init to verified recall
    • Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/hub/","level":1,"title":"Hub","text":"","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#sharing-is-caring","level":2,"title":"Sharing Is Caring","text":"

    ctx projects are normally independent: each project has its own .context/ directory, its own decisions, its own learnings, its own journal. That's the right default — most work is project-local, and mixing context across projects tends to dilute more than it helps.

    But sometimes a decision or a learning should cross project boundaries. A convention you codified in one project deserves to be visible in another. A gotcha you discovered debugging service A is the same gotcha waiting for you in service B. The ctx Hub is the feature that makes those specific entries travel, without replicating everything else.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#what-the-hub-actually-is","level":2,"title":"What the Hub Actually Is","text":"

    In one paragraph: the ctx Hub is a fan-out channel for four specific kinds of structured entries — decision, learning, convention, and task. You publish an entry with ctx add --share in one project, and it appears in .context/hub/ for every other project subscribed to that type. When you run ctx agent --include-hub, those shared entries become part of your next agent context packet.

    That is the entire feature. The Hub does not:

    • Share your session journal (.context/journal/). That stays local to each project.
    • Share your scratchpad (.context/pad). Encrypted notes never leave the machine that created them.
    • Share your TASKS.md, DECISIONS.md, LEARNINGS.md, or CONVENTIONS.md wholesale. Only entries you explicitly --share cross the boundary.
    • Provide user identity or attribution. The Hub identifies projects, not people.

    If you want \"my agent in project B sees everything my agent did in project A,\" that's not the Hub. Local session density stays local.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#who-its-for","level":2,"title":"Who It's For","text":"

    Two shapes, same mechanics, different trust models.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

    One developer, many projects. You want a learning from project A to show up when you open project B a week later. You want a convention you codified in your dotfiles project to be visible everywhere else on your workstation. Run a Hub on localhost, register each project, done.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#small-trusted-team","level":3,"title":"Small Trusted Team","text":"

    A few teammates on a LAN or a hub.ctx-like self-hosted server. You want team conventions to propagate without a wiki. You want lessons from one on-call engineer's 3 AM incident to reach everyone else's agent on the next session. Same mechanics as the personal case, plus TLS in front and a short security runbook.

    The Hub is not a multi-tenant public service. It assumes everyone holding a client token is friendly. Don't stand up hub.example.com for untrusted participants.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#going-further","level":2,"title":"Going Further","text":"
    • First-time setup: Hub: Getting Started — a five-minute walkthrough on localhost.
    • Mental model and user stories: Hub Overview — what flows, what doesn't, and when not to use it.
    • Team / LAN deployment: Multi-machine setup.
    • Redundancy: HA cluster.
    • Operating a Hub: Hub Operations and Hub Failure Modes.
    • Security posture: Hub Security Model.
    • Command reference: ctx serve, ctx connect, ctx hub.
    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/is-ctx-right/","level":1,"title":"Is It Right for Me?","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#good-fit","level":2,"title":"Good Fit","text":"

    ctx shines when context matters more than code.

    If any of these sound like your project, it's worth trying:

    • Multi-session AI work: You use AI across many sessions on the same codebase, and re-explaining is slowing you down.
    • Architectural decisions that matter: Your project has non-obvious choices (database, auth strategy, API design) that the AI keeps second-guessing.
    • \"Why\" matters as much as \"what\": you need the AI to understand rationale, not just current code
    • Team handoffs: Multiple people (or multiple AI tools) work on the same project and need shared context.
    • AI-assisted development across tools: Uou switch between Claude Code, Cursor, Copilot, or other tools and want context to follow the project, not the tool.
    • Long-lived projects: Anything you'll work on for weeks or months, where accumulated knowledge has compounding value.
    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#may-not-be-the-right-fit","level":2,"title":"May Not Be the Right Fit","text":"

    ctx adds overhead that isn't worth it for every project. Be honest about when to skip it:

    • One-off scripts: If the project is a single file you'll finish today, there's nothing to remember.
    • RAG-only workflows: If retrieval from an external knowledge base already gives the agent everything it needs for each session, adding ctx may be unnecessary. RAG retrieves information; ctx defines the project's working memory: They are complementary.
    • No AI involvement: ctx is designed for human-AI workflows; without an AI consumer, the files are just documentation.
    • Enterprise-managed context platforms: If your organization provides centralized context services, ctx may duplicate that layer.

    For a deeper technical comparison with RAG, prompt management tools, and agent frameworks, see ctx and Similar Tools.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#project-size-guide","level":2,"title":"Project Size Guide","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#solo-developer-single-repo","level":3,"title":"Solo Developer, Single Repo","text":"

    This is ctx's sweet spot.

    You get the most value here: one person, one project, decisions, and learnings accumulating over time. Setup takes 5 minutes and the .context/ directory directory stays small, and every session gets faster.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#small-team-one-or-two-repos","level":3,"title":"Small Team, One or Two Repos","text":"

    Works well.

    Context files commit to git, so the whole team shares the same decisions and conventions. Each person's AI starts with the team's decisions already loaded. Merge conflicts on .context/ files are rare and easy to resolve (they are just Markdown).

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#multiple-repos-or-larger-teams","level":3,"title":"Multiple Repos or Larger Teams","text":"

    ctx operates per repository.

    Each repo has its own .context/ directory with its own decisions, tasks, and learnings. This matches the way code, ownership, and history already work in git.

    There is no built-in cross-repo context layer.

    For organizations that need centralized, organization-wide knowledge, ctx complements a platform solution by providing durable, project-local working memory for AI sessions.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#5-minute-trial","level":2,"title":"5-Minute Trial","text":"

    Zero commitment. Try it, and delete .context/ if it's not for you.

    Using Claude Code?

    Install the ctx plugin from the Marketplace for Claude-native hooks, skills, and automatic context loading:

    1. Type /plugin and press Enter
    2. Select Marketplaces → Add Marketplace
    3. Enter ActiveMemory/ctx
    4. Back in /plugin, select Install and choose ctx

    You'll still need the ctx binary for the CLI: See Getting Started for install options.

    # 1. Initialize\ncd your-project\nctx init\n\n# 2. Add one real decision from your project\nctx add decision \"Your actual architectural choice\" \\\n  --context \"What prompted this decision\" \\\n  --rationale \"Why you chose this approach\" \\\n  --consequence \"What changes as a result\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# 3. Check what the AI will see\nctx status\n\n# 4. Start an AI session and ask: \"Do you remember?\"\n

    If the AI cites your decision back to you, it's working.

    Want to remove it later? One command:

    rm -rf .context/\n

    No dependencies to uninstall. No configuration to revert. Just files.

    Ready to try it out?

    • Join the Community→: Open Source is better together.
    • Getting Started →: Full installation and setup.
    • ctx and Similar Tools →: Detailed comparison with other approaches.
    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/joining-a-project/","level":1,"title":"Joining a Project","text":"

    You've joined a team or inherited a project, and there's a .context/ directory in the repo. Good news: someone already set up persistent context. This page gets you oriented fast.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#what-to-read-first","level":2,"title":"What to Read First","text":"

    The files in .context/ have a deliberate priority order. Read them top-down:

    1. CONSTITUTION.md: Hard rules. Read this before you touch anything. These are inviolable constraints the team has agreed on.
    2. TASKS.md: Current and planned work. Shows what's in progress, what's pending, and what's blocked.
    3. CONVENTIONS.md: How the team writes code. Naming patterns, file organization, preferred idioms.
    4. ARCHITECTURE.md: System overview. Components, boundaries, data flow.
    5. DECISIONS.md: Why things are the way they are. Saves you from re-proposing something the team already evaluated and rejected.
    6. LEARNINGS.md: Gotchas, tips, and hard-won lessons. The stuff that doesn't fit anywhere else but will save you hours.

    See Context Files for detailed documentation of each file's structure and purpose.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#checking-context-health","level":2,"title":"Checking Context Health","text":"

    Before you start working, check whether the context is current:

    ctx status\n

    This shows file counts, token estimates, and recent activity. If files haven't been touched in weeks, the context may be stale.

    ctx drift\n

    This compares context files against recent code changes and flags potential drift: decisions that no longer match the codebase, conventions that have shifted, or tasks that look outdated.

    If things are stale, mention it to the team. Don't silently fix it yourself on day one.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#starting-your-first-session","level":2,"title":"Starting Your First Session","text":"

    Generate a context packet to prime your AI:

    ctx agent --budget 8000\n

    This outputs a token-budgeted summary of the project context, ordered by priority. With Claude Code and the ctx plugin, context loads automatically via hooks. You can also use the /ctx-remember skill to get a structured readback of what the AI knows.

    The readback is your verification step: if the AI can cite specific tasks and decisions, the context is working.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#adding-context","level":2,"title":"Adding Context","text":"

    As you work, you'll discover things worth recording. Use the CLI:

    # Record a decision you made or learned about\nctx add decision \"Use connection pooling for DB access\" \\\n  --rationale \"Reduces connection overhead under load\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Capture a gotcha you hit\nctx add learning \"Redis timeout defaults to 5s\" \\\n  --context \"Hit timeouts during bulk operations\" \\\n  --application \"Set explicit timeout for batch jobs\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add a convention you noticed the team follows\nctx add convention \"All API handlers return structured errors\"\n

    You can also just tell the AI: \"Record this as a learning\" or \"Add this decision to context.\" With the ctx plugin, context-update commands handle the file writes.

    See the Knowledge Capture recipe for the full workflow.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#session-etiquette","level":2,"title":"Session Etiquette","text":"

    A few norms for working in a ctx-managed project:

    • Respect existing conventions. If CONVENTIONS.md says \"use filepath.Join,\" use filepath.Join. If you disagree, propose a change, don't silently diverge.
    • Don't restructure context files without asking. The file layout and section structure are shared state. Reorganizing them affects every team member and every AI session.
    • Mark tasks done when complete. Check the box ([x]) in place. Don't move tasks between sections or delete them.
    • Add context as you go. Decisions, learnings, and conventions you discover are valuable to the next person (or the next session).
    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

    Ignoring CONSTITUTION.md. The constitution exists for a reason. If a task conflicts with a constitution rule, the task is wrong. Raise it with the team instead of working around the constraint.

    Deleting tasks. Never delete a task from TASKS.md. Mark it [x] (done) or [-] (skipped with a reason). The history matters for session replay and audit.

    Bypassing hooks. If the project uses ctx hooks (pre-commit nudges, context autoloading), don't disable them. They exist to keep context fresh. If a hook is noisy or broken, fix it or file a task.

    Over-contributing on day one. Read first, then contribute. Adding a dozen learnings before you understand the project's norms creates noise, not signal.

    Related:

    • Getting Started: installation and setup from scratch
    • Context Files: detailed file reference
    • Knowledge Capture: recording decisions, learnings, and conventions
    • Session Lifecycle: how a typical AI session flows with ctx
    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/keeping-ai-honest/","level":1,"title":"Keeping AI Honest","text":"","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-problem","level":2,"title":"The Problem","text":"

    AI agents confabulate. They invent history that never happened, claim familiarity with decisions that were never made, and sometimes declare a task complete when it is not. This is not malice - it is the default behavior of a system optimizing for plausible-sounding responses.

    When your AI says \"we decided to use Redis for caching last week,\" can you verify that? When it says \"the auth module is complete,\" can you confirm it? Without grounded, persistent context, the answer is no. You are trusting vibes.

    ctx replaces vibes with verifiable artifacts.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#grounded-memory","level":2,"title":"Grounded Memory","text":"

    Every entry in ctx context files has a timestamp and structured fields. When the AI cites a decision, you can check it.

    ## [2026-01-28-143022] Use Event Sourcing for Audit Trail\n\n**Status**: Accepted\n\n**Context**: Compliance requires full mutation history.\n\n**Decision**: Event sourcing for the audit subsystem only.\n\n**Rationale**: Append-only log meets compliance requirements\nwithout imposing event sourcing on the entire domain model.\n

    The timestamp 2026-01-28-143022 is not decoration. It is a verifiable anchor. If the AI references this decision, you can open DECISIONS.md, find the entry, and confirm it says what the AI claims. If the entry does not exist, the AI is hallucinating - and you know immediately.

    This is grounded memory: claims that trace back to artifacts you control and can audit.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#constitutionmd-hard-guardrails","level":2,"title":"CONSTITUTION.md: Hard Guardrails","text":"

    CONSTITUTION.md defines rules the AI must treat as inviolable. These are not suggestions or best practices - they are constraints that override task requirements.

    # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these,\nthe task is wrong.\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] All public API changes require a decision record\n* [ ] Never delete context files without explicit user approval\n

    The AI reads these at session start, before anything else. A well- integrated agent will refuse a task that conflicts with a constitutional rule, citing the specific rule it would violate.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-agent-playbooks-anti-hallucination-rules","level":2,"title":"The Agent Playbook's Anti-Hallucination Rules","text":"

    The AGENT_PLAYBOOK.md file includes a section called \"How to Avoid Hallucinating Memory\" with five explicit rules:

    1. Never assume. If it is not in the context files, you do not know it.
    2. Never invent history. Do not claim \"we discussed\" something without a file reference.
    3. Verify before referencing. Search files before citing them.
    4. When uncertain, say so. \"I don't see a decision on this\" is always better than a fabricated one.
    5. Trust files over intuition. If the files say PostgreSQL but your training data suggests MySQL, the files win.

    These rules create a behavioral contract. The AI is not left to guess how confident it should be - it has explicit instructions to ground every claim in the context directory.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#drift-detection","level":2,"title":"Drift Detection","text":"

    Context files can go stale. You rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist. Stale context is almost as dangerous as no context: the AI treats outdated information as current truth.

    ctx drift detects this divergence:

    ctx drift\n

    It scans context files for references to files, paths, and symbols that no longer exist in the codebase. Stale references get flagged so you can update or remove them before they mislead the next session.

    Regular drift checks - weekly, or after major refactors - keep your context files honest the same way tests keep your code honest.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-verification-loop","level":2,"title":"The Verification Loop","text":"

    The /ctx-commit skill includes a built-in verification step: before staging, it maps claims to evidence and runs self-audit questions to surface gaps. This catches inconsistencies at the point where they matter most — right before code is committed.

    This closes the loop. You write context. The AI reads context. The verification step confirms that context still matches reality. When it does not, you fix it - and the next session starts from truth, not from drift.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#trust-through-structure","level":2,"title":"Trust through Structure","text":"

    The common thread across all of these mechanisms is structure over prose. Timestamps make claims verifiable. Constitutional rules make boundaries explicit. Drift detection makes staleness visible. The playbook makes behavioral expectations concrete.

    You do not need to trust the AI. You need to trust the system -- and verify when it matters.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#further-reading","level":2,"title":"Further Reading","text":"
    • Detecting and Fixing Drift: the full workflow for keeping context files accurate
    • Invariants: the properties that must hold for any valid ctx implementation
    • Agent Security: threat model and mitigations for AI agents operating with persistent context
    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/prompting-guide/","level":1,"title":"Prompting Guide","text":"

    New to ctx?

    This guide references context files like TASKS.md, DECISIONS.md, and LEARNINGS.md:

    These are plain Markdown files that ctx maintains in your project's .context/ directory.

    If terms like \"context packet\" or \"session ceremony\" are unfamiliar,

    • start with the ctx Manifesto for the why,
    • About for the big picture,
    • then Getting Started to set up your first project.
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#literature-matters","level":2,"title":"Literature Matters","text":"

    This guide is about crafting effective prompts for working with AI assistants in ctx-enabled projects, but the guidelines given here apply to other AI systems, too.

    The right prompt triggers the right behavior.

    This guide documents prompts that reliably produce good results.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#tldr","level":2,"title":"TL;DR","text":"Goal Prompt Load context \"Do you remember?\" Resume work \"What's the current state?\" What's next /ctx-next Debug \"Why doesn't X work?\" Validate \"Is this consistent with our decisions?\" Impact analysis \"What would break if we...\" Reflect /ctx-reflect Wrap up /ctx-wrap-up Persist \"Add this as a learning\" Explore \"How does X work in this codebase?\" Sanity check \"Is this the right approach?\" Completeness \"What am I missing?\" One more thing \"What's the single smartest addition?\" Set tone \"Push back if my assumptions are wrong.\" Constrain scope \"Only change files in X. Nothing else.\" Course correct \"Stop. That's not what I meant.\" Check health \"Run ctx drift\" Commit /ctx-commit","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#session-start","level":2,"title":"Session Start","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#do-you-remember","level":3,"title":"\"do you remember?\"","text":"

    Triggers the AI to silently read TASKS.md, DECISIONS.md, LEARNINGS.md, and check recent history via ctx journal before responding with a structured readback:

    1. Last session: most recent session topic and date
    2. Active work: pending or in-progress tasks
    3. Recent context: 1-2 recent decisions or learnings
    4. Next step: offer to continue or ask what to focus on

    Use this at the start of every important session.

    Do you remember what we were working on?\n

    This question implies prior context exists. The AI checks files rather than admitting ignorance. The expected response cites specific context (session names, task counts, decisions), not vague summaries.

    If the AI instead narrates its discovery process (\"Let me check if there are files...\"), it has not loaded CLAUDE.md or AGENT_PLAYBOOK.md properly.

    For a detailed case study on making agents actually follow this protocol (including the failure modes, the timing problem, and the hook design that solved it) see The Dog Ate My Homework.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#whats-the-current-state","level":3,"title":"\"What's the Current State?\"","text":"

    Prompts reading of TASKS.md, recent sessions, and status overview.

    Use this when resuming work after a break.

    Variants:

    • \"Where did we leave off?\"
    • \"What's in progress?\"
    • \"Show me the open tasks.\"
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#during-work","level":2,"title":"During Work","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-doesnt-x-work","level":3,"title":"\"Why Doesn't X Work?\"","text":"

    This triggers root cause analysis rather than surface-level fixes.

    Use this when something fails unexpectedly.

    Framing as \"why\" encourages investigation before action. The AI will trace through code, check configurations, and identify the actual cause.

    Real Example

    \"Why can't I run /ctx-reflect?\" led to discovering missing permissions in settings.local.json bootstrapping.

    This was a fix that benefited all users of ctx.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-consistent-with-our-decisions","level":3,"title":"\"Is This Consistent with Our Decisions?\"","text":"

    This prompts checking DECISIONS.md before implementing.

    Use this before making architectural choices.

    Variants:

    • \"Check if we've decided on this before\"
    • \"Does this align with our conventions?\"
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-would-break-if-we","level":3,"title":"\"What Would Break If We...\"","text":"

    This triggers defensive thinking and impact analysis.

    Use this before making significant changes.

    What would break if we change the Settings struct?\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#before-you-start-read-x","level":3,"title":"\"Before You Start, Read X\"","text":"

    This ensures specific context is loaded before work begins.

    Use this when you know the relevant context exists in a specific file.

    Before you start, check ctx journal source for the auth discussion session\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-control","level":3,"title":"Scope Control","text":"

    Constrain the AI to prevent sprawl. These are some of the most useful prompts in day-to-day work.

    Only change files in internal/cli/add/. Nothing else.\n
    No new files. Modify the existing implementation.\n
    Keep the public API unchanged. Internal refactor only.\n

    Use these when the AI tends to \"helpfully\" modify adjacent code, add documentation you didn't ask for, or create new abstractions.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#course-correction","level":3,"title":"Course Correction","text":"

    Steer the AI when it goes off-track: Don't wait for it to finish a wrong approach.

    Stop! That's not what I meant. Let me clarify.\n
    Let's step back. Explain what you're about to do before changing anything.\n
    Undo that last change and try a different approach.\n

    These work because they interrupt momentum.

    Without explicit course correction, the AI tends to commit harder to a wrong path rather than reconsidering.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#failure-modes","level":3,"title":"Failure Modes","text":"

    When the AI misbehaves, match the symptom to the recovery prompt:

    Symptom Recovery prompt Hand-waves (\"should work now\") \"Show evidence: file/line refs, command output, or test name.\" Creates unnecessary files \"No new files. Modify the existing implementation.\" Expands scope unprompted \"Stop after the smallest working change. Ask before expanding scope.\" Narrates instead of acting \"Skip the explanation. Make the change and show the diff.\" Repeats a failed approach \"That didn't work last time. Try a different approach.\" Claims completion without proof \"Run the test. Show me the output.\"

    These are recovery handles, not rules to paste into CLAUDE.md.

    Use them in the moment when you see the behavior.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reflection-and-persistence","level":2,"title":"Reflection and Persistence","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-did-we-learn","level":3,"title":"\"What Did We Learn?\"","text":"

    This prompts reflection on the session and often triggers adding learnings to LEARNINGS.md.

    Use this after completing a task or debugging session.

    This is an explicit reflection prompt. The AI will summarize insights and often offer to persist them.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#add-this-as-a-learningdecision","level":3,"title":"\"Add This as a Learning/decision\"","text":"

    This is an explicit persistence request.

    Use this when you have discovered something worth remembering.

    Add this as a learning: \"JSON marshal escapes angle brackets by default\"\n\n# or simply.\nAdd this as a learning.\n# and let the AI autonomously infer and summarize.\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#save-context-before-we-end","level":3,"title":"\"Save Context Before We End\"","text":"

    This triggers context persistence before the session closes.

    Use it at the end of the session or before switching topics.

    Variants:

    • \"Let's persist what we did\"
    • \"Update the context files\"
    • /ctx-wrap-up:the recommended end-of-session ceremony (see Session Ceremonies)
    • /ctx-reflect: mid-session reflection checkpoint
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#exploration-and-research","level":2,"title":"Exploration and Research","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-the-codebase-for-x","level":3,"title":"\"Explore the Codebase for X\"","text":"

    This triggers thorough codebase search rather than guessing.

    Use this when you need to understand how something works.

    This works because \"Explore\" signals that investigation is needed, not immediate action.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#how-does-x-work-in-this-codebase","level":3,"title":"\"How Does X Work in This Codebase?\"","text":"

    This prompts reading actual code rather than explaining general concepts.

    Use this to understand the existing implementation.

    How does session saving work in this codebase?\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#find-all-places-where-x","level":3,"title":"\"Find All Places Where X\"","text":"

    This triggers a comprehensive search across the codebase.

    Use this before refactoring or understanding the impact.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#meta-and-process","level":2,"title":"Meta and Process","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-should-we-document-from-this","level":3,"title":"\"What Should We Document from This?\"","text":"

    This prompts identifying learnings, decisions, and conventions worth persisting.

    Use this after complex discussions or implementations.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-the-right-approach","level":3,"title":"\"Is This the Right Approach?\"","text":"

    This invites the AI to challenge the current direction.

    Use this when you want a sanity check.

    This works because it allows AI to disagree.

    AIs often default to agreeing; this prompt signals you want an honest assessment.

    Stronger variant: \"Push back if my assumptions are wrong.\" This sets the tone for the entire session: The AI will flag questionable choices proactively instead of waiting to be asked.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-am-i-missing","level":3,"title":"\"What Am I Missing?\"","text":"

    This prompts thinking about edge cases, overlooked requirements, or unconsidered approaches.

    Use this before finalizing a design or implementation.

    Forward-looking variant: \"What's the single smartest addition you could make to this at this point?\" Use this after you think you're done: It surfaces improvements you wouldn't have thought to ask for. The constraint to one thing prevents feature sprawl.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#cli-commands-as-prompts","level":2,"title":"CLI Commands as Prompts","text":"

    Asking the AI to run ctx commands is itself a prompt. These load context or trigger specific behaviors:

    Command What it does \"Run ctx status\" Shows context summary, file presence, staleness \"Run ctx agent\" Loads token-budgeted context packet \"Run ctx drift\" Detects dead paths, stale files, missing context","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ctx-skills","level":3,"title":"ctx Skills","text":"

    The SKILS.md Standard

    Skills are formalized prompts stored as SKILL.md files.

    The /slash-command syntax below is Claude Code specific.

    Other agents can use the same skill files, but invocation may differ.

    Use ctx skills by name:

    Skill When to use /ctx-status Quick context summary /ctx-agent Load full context packet /ctx-remember Recall project context and structured readback /ctx-wrap-up End-of-session context persistence /ctx-history Browse session history for past discussions /ctx-reflect Structured reflection checkpoint /ctx-next Suggest what to work on next /ctx-commit Commit with context persistence /ctx-drift Detect and fix context drift /ctx-implement Execute a plan step-by-step with verification /ctx-loop Generate autonomous loop script /ctx-pad Manage encrypted scratchpad /ctx-archive Archive completed tasks /check-links Audit docs for dead links

    Ceremony vs. Workflow Skills

    Most skills work conversationally: \"what should we work on?\" triggers /ctx-next, \"save that as a learning\" triggers /ctx-learning-add. Natural language is the recommended approach.

    Two skills are the exception: /ctx-remember and /ctx-wrap-up are ceremony skills for session boundaries: Invoke them as explicit slash commands: conversational triggers risk partial execution. See Session Ceremonies.

    Skills combine a prompt, tool permissions, and domain knowledge into a single invocation.

    Skills beyond Claude Code

    The /slash-command syntax above is Claude Code native, but the underlying SKILL.md files are a standard markdown format that any agent can consume. If you use a different coding agent, consult its documentation for how to load skill files as prompt templates.

    See Integrations for setup details.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#anti-patterns","level":2,"title":"Anti-Patterns","text":"

    Based on our ctx development experience (i.e., \"sipping our own champagne\") so far, here are some prompts that tend to produce poor results:

    Prompt Problem Better Alternative \"Fix this\" Too vague, may patch symptoms \"Why is this failing?\" \"Make it work\" Encourages quick hacks \"What's the right way to solve this?\" \"Just do it\" Skips planning \"Plan this, then implement\" \"You should remember\" Confrontational \"Do you remember?\" \"Obviously...\" Discourages questions State the requirement directly \"Idiomatic X\" Triggers language priors \"Follow project conventions\" \"Implement everything\" No phasing, sprawl risk Break into tasks, implement one at a time \"You should know this\" Assumes context is loaded \"Before you start, read X\"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reliability-checklist","level":2,"title":"Reliability Checklist","text":"

    Before sending a non-trivial prompt, check these four elements. This is the guide's DNA in one screenful.

    1. Goal in one sentence: What does \"done\" look like?
    2. Files to read: What existing code or context should the AI review before acting?
    3. Verification command: How will you prove it worked? (test name, CLI command, expected output)
    4. Scope boundary: What should the AI not touch?

    A prompt that covers all four is almost always good enough.

    A prompt missing #3 is how you get \"should work now\" without evidence.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#safety-invariants","level":2,"title":"Safety Invariants","text":"

    These Are Invariants: Not Suggestions

    A prompting guide earns its trust by being honest about risk.

    These four rules mentioned below don't change with model versions, agent frameworks, or project size.

    Build them into your workflow once and stop thinking about them.

    Tool-using agents can read files, run commands, and modify your codebase. That power makes them useful. It also creates a trust boundary you should be aware of.

    These invariants apply regardless of which agent or model you use.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#treat-the-repository-text-as-untrusted-input","level":3,"title":"Treat the Repository Text as \"Untrusted Input\"","text":"

    Issue descriptions, PR comments, commit messages, documentation, and even code comments can contain text that looks like instructions. An agent that reads a GitHub issue and then runs a command found inside it is executing untrusted input.

    The rule: Before running any command the agent found in repo text (issues, docs, comments), restate the command explicitly and confirm it does what you expect. Don't let the agent copy-paste from untrusted sources into a shell.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ask-before-destructive-operations","level":3,"title":"Ask Before Destructive Operations","text":"

    git push --force, rm -rf, DROP TABLE, docker system prune: these are irreversible or hard to reverse. A good agent should pause before running them, but don't rely on that.

    The rule: For any operation that deletes data, overwrites history, or affects shared infrastructure, require explicit confirmation. If the agent runs something destructive without asking, that's a course-correction moment: \"Stop. Never run destructive commands without asking first.\"

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-the-blast-radius","level":3,"title":"Scope the Blast Radius","text":"

    An agent told to \"fix the tests\" might modify test fixtures, change assertions, or delete tests that inconveniently fail. An agent told to \"deploy\" might push to production. Broad mandates create broad risk.

    The rule: Constrain scope before starting work. The Reliability Checklist's scope boundary (#4) is your primary safety lever. When in doubt, err on the side of a tighter boundary.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#secrets-never-belong-in-context","level":3,"title":"Secrets Never Belong in Context","text":"

    LEARNINGS.md, DECISIONS.md, and session transcripts are plain-text files that may be committed to version control.

    Don't persist API keys, passwords, tokens, or credentials in context files.

    The rule: If the agent encounters a secret during work, it should use it transiently (environment variable, an alias to the secret instead of the actual secret, etc.) and never write it to a context file.

    Any Secret Seen IS Exposed

    If you see a secret in a context file, remove it immediately and rotate the credential.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-plan-implement","level":2,"title":"Explore → Plan → Implement","text":"

    For non-trivial work, name the phase you want:

    Explore src/auth and summarize the current flow.\nThen propose a plan. After I approve, implement with tests.\n

    This prevents the AI from jumping straight to code.

    The three phases map to different modes of thinking:

    • Explore: read, search, understand: no changes
    • Plan: propose approach, trade-offs, scope: no changes
    • Implement: write code, run tests, verify: changes

    Small fixes skip straight to implement. Complex or uncertain work benefits from all three.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#prompts-by-task-type","level":2,"title":"Prompts by Task Type","text":"

    Different tasks need different prompt structures. The pattern: symptom + location + verification.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#bugfix","level":3,"title":"Bugfix","text":"
    Users report search returns empty results for queries with hyphens.\nReproduce in src/search/. Write a failing test for \"foo-bar\",\nfix the root cause, run: go test ./internal/search/...\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#refactor","level":3,"title":"Refactor","text":"
    Inspect src/auth/ and list duplication hotspots.\nPropose a refactor plan scoped to one module.\nAfter approval, remove duplication without changing behavior.\nAdd a test if coverage is missing. Run: make audit\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#research","level":3,"title":"Research","text":"
    Explore the request flow around src/api/.\nSummarize likely bottlenecks with evidence.\nPropose 2-3 hypotheses. Do not implement yet.\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#docs","level":3,"title":"Docs","text":"
    Update docs/cli-reference.md to reflect the new --format flag.\nConfirm the flag exists in the code and the example works.\n

    Notice each prompt includes what to verify and how. Without that, you get a \"should work now\" instead of evidence.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#writing-tasks-as-prompts","level":2,"title":"Writing Tasks as Prompts","text":"

    Tasks in TASKS.md are indirect prompts to the AI. How you write them shapes how the AI approaches the work.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-motivation-not-just-the-goal","level":3,"title":"State the Motivation, Not Just the Goal","text":"

    Tell the AI why you are building something, not just what.

    Bad: \"Build a calendar view.\"

    Good: \"Build a calendar view. The motivation is that all notes and tasks we build later should be viewable here.\"

    The second version lets the AI anticipate downstream requirements:

    It will design the calendar's data model to be compatible with future features: Without you having to spell out every integration point. Motivation turns a one-off task into a directional task.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-deliverable-not-just-steps","level":3,"title":"State the Deliverable, Not Just Steps","text":"

    Bad task (implementation-focused):

    - [ ] T1.1.0: Parser system\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

    The AI may complete all subtasks but miss the actual goal. What does \"Parser system\" deliver to the user?

    Good task (deliverable-focused):

    - [ ] T1.1.0: Parser CLI command\n  **Deliverable**: `ctx journal source` command that shows parsed sessions\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

    Now the AI knows the subtasks serve a specific user-facing deliverable.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#use-acceptance-criteria","level":3,"title":"Use Acceptance Criteria","text":"

    For complex tasks, add explicit \"done when\" criteria:

    - [ ] T2.0: Authentication system\n  **Done when**:\n  - [ ] User can register with email\n  - [ ] User can log in and get a token\n  - [ ] Protected routes reject unauthenticated requests\n

    This prevents premature \"task complete\" when only the implementation details are done, but the feature doesn't actually work.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#subtasks-parent-task","level":3,"title":"Subtasks ≠ Parent Task","text":"

    Completing all subtasks does not mean the parent task is complete.

    The parent task describes what the user gets.

    Subtasks describe how to build it.

    Always re-read the parent task description before marking it complete. Verify the stated deliverable exists and works.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-do-these-approaches-work","level":2,"title":"Why Do These Approaches Work?","text":"

    The patterns in this guide aren't invented here: They are practitioner translations of well-established, peer-reviewed research, most of which predate the current AI (hype) wave.

    The underlying ideas come from decades of work in machine learning, cognitive science, and numerical optimization. For a concrete case study showing how these principles play out when an agent decides whether to follow instructions (attention competition, optimization toward least-resistance paths, and observable compliance as a design goal) see The Dog Ate My Homework.

    Phased work (\"Explore → Plan → Implement\") applies chain-of-thought reasoning: Decomposing a problem into sequential steps before acting. Forcing intermediate reasoning steps measurably improves output quality in language models, just as it does in human problem-solving. Wei et al., Chain-of-Thought Prompting Elicits Reasoning in Large Language Models (2022).

    Root-cause prompts (\"Why doesn't X work?\") use step-back abstraction: Retreating to a higher-level question before diving into specifics. This mirrors how experienced engineers debug: they ask \"what should happen?\" before asking \"what went wrong?\" Zheng et al., Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models (2023).

    Exploring alternatives (\"Propose 2-3 approaches\") leverages self-consistency: Generating multiple independent reasoning paths and selecting the most coherent result. The idea traces back to ensemble methods in ML: A committee of diverse solutions outperforms any single one. Wang et al., Self-Consistency Improves Chain of Thought Reasoning in Language Models (2022).

    Impact analysis (\"What would break if we...\") is a form of tree-structured exploration: Branching into multiple consequence paths before committing. This is the same principle behind game-tree search (minimax, MCTS) that has powered decision-making systems since the 1950s. Yao et al., Tree of Thoughts: Deliberate Problem Solving with Large Language Models (2023).

    Motivation prompting (\"Build X because Y\") works through goal conditioning: Providing the objective function alongside the task. In optimization terms, you are giving the gradient direction, not just the loss. The model can make locally coherent decisions that serve the global objective because it knows what \"better\" means.

    Scope constraints (\"Only change files in X\") apply constrained optimization: Bounding the search space to prevent divergence. This is the same principle behind regularization in ML: Without boundaries, powerful optimizers find solutions that technically satisfy the objective but are practically useless.

    CLI commands as prompts (\"Run ctx status\") interleave reasoning with acting: The model thinks, acts on external tools, observes results, then thinks again. Grounding reasoning in real tool output reduces hallucination because the model can't ignore evidence it just retrieved. Yao et al., ReAct: Synergizing Reasoning and Acting in Language Models (2022).

    Task decomposition (\"Prompts by Task Type\") applies least-to-most prompting: Breaking a complex problem into subproblems and solving them sequentially, each building on the last. This is the research version of \"plan, then implement one slice.\" Zhou et al., Least-to-Most Prompting Enables Complex Reasoning in Large Language Models (2022).

    Explicit planning (\"Explore → Plan → Implement\") is directly supported by plan-and-solve prompting, which addresses missing-step failures in zero-shot reasoning by extracting a plan before executing. The phased structure prevents the model from jumping to code before understanding the problem. Wang et al., Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models (2023).

    Session reflection (\"What did we learn?\", /ctx-reflect) is a form of verbal reinforcement learning: Improving future performance by persisting linguistic feedback as memory rather than updating weights. This is exactly what LEARNINGS.md and DECISIONS.md provide: a durable feedback signal across sessions. Shinn et al., Reflexion: Language Agents with Verbal Reinforcement Learning (2023).

    These aren't prompting \"hacks\" that you will find in the \"1000 AI Prompts for the Curious\" listicles: They are applications of foundational principles:

    • Decomposition,
    • Abstraction,
    • Ensemble Reasoning,
    • Search,
    • and Constrained Optimization.

    They work because language models are, at their core, optimization systems navigating probabilistic landscapes.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#further-reading","level":2,"title":"Further Reading","text":"
    • The Attention Budget: Why your AI forgets what you just told it, and how token budgets shape context strategy
    • The Dog Ate My Homework: A case study in making agents follow instructions: attention timing, delegation decay, and observable compliance as a design goal
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#contributing","level":2,"title":"Contributing","text":"

    Found a prompt that works well? Open an issue or PR with:

    1. The prompt text;
    2. What behavior it triggers;
    3. When to use it;
    4. Why it works (optional but helpful).

    Dive Deeper:

    • Recipes: targeted how-to guides for specific tasks
    • CLI Reference: all commands and flags
    • Integrations: setup for Claude Code, Cursor, Aider
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/repeated-mistakes/","level":1,"title":"My AI Keeps Making the Same Mistakes","text":"","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-problem","level":2,"title":"The Problem","text":"

    You found a bug last Tuesday. You debugged it, understood the root cause, and moved on. Today, a new session hits the exact same bug. The AI rediscovers it from scratch, burning twenty minutes on something you already solved.

    Worse: you spent an hour last week evaluating two database migration strategies, picked one, documented why in a comment somewhere, and now the AI is cheerfully suggesting the approach you rejected. Again.

    This is not a model problem. It is a memory problem. Without persistent context, every session starts with amnesia.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#how-ctx-stops-the-loop","level":2,"title":"How ctx Stops the Loop","text":"

    ctx gives your AI three files that directly prevent repeated mistakes, each targeting a different failure mode.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#decisionsmd-stop-relitigating-settled-choices","level":3,"title":"DECISIONS.md: Stop Relitigating Settled Choices","text":"

    When you make an architectural decision, record it with rationale and rejected alternatives. The AI reads this at session start and treats it as settled.

    ## [2026-02-12] Use JWT for Authentication\n\n**Status**: Accepted\n\n**Context**: Need stateless auth for the API layer.\n\n**Decision**: JWT with short-lived access tokens and refresh rotation.\n\n**Rationale**: Stateless, scales horizontally, team has prior experience.\n\n**Alternatives Considered**:\n- Session-based auth: Rejected. Requires sticky sessions or shared store.\n- API keys only: Rejected. No user identity, no expiry rotation.\n

    Next session, when the AI considers auth, it reads this entry and builds on the decision instead of re-debating it. If someone asks \"why not sessions?\", the rationale is already there.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#learningsmd-capture-gotchas-once","level":3,"title":"LEARNINGS.md: Capture Gotchas Once","text":"

    Learnings are the bugs, quirks, and non-obvious behaviors that cost you time the first time around. Write them down so they cost you zero time the second time.

    ## Build\n\n### CGO Required for SQLite on Alpine\n\n**Discovered**: 2026-01-20\n\n**Context**: Docker build failed silently with \"no such table\" at runtime.\n\n**Lesson**: The go-sqlite3 driver requires CGO_ENABLED=1 and gcc\ninstalled in the build stage. Alpine needs apk add build-base.\n\n**Application**: Always use the golang:alpine image with build-base\nfor SQLite builds. Never set CGO_ENABLED=0.\n

    Without this entry, the next session that touches the Dockerfile will hit the same wall. With it, the AI knows before it starts.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#constitutionmd-draw-hard-lines","level":3,"title":"CONSTITUTION.md: Draw Hard Lines","text":"

    Some mistakes are not about forgetting - they are about boundaries the AI should never cross. CONSTITUTION.md sets inviolable rules.

    * [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never disable security linters without a documented exception\n* [ ] All database migrations must be reversible\n

    The AI reads these as absolute constraints. It does not weigh them against convenience. It refuses tasks that would violate them.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-accumulation-effect","level":2,"title":"The Accumulation Effect","text":"

    Each of these files grows over time. Session one captures two decisions. Session five adds a tricky learning about timezone handling. Session twelve records a convention about error message formatting.

    By session twenty, your AI has a knowledge base that no single person carries in their head. New team members - human or AI - inherit it instantly.

    The key insight: you are not just coding. You are building a knowledge layer that makes every future session faster.

    ctx files version with your code in git. They survive branch switches, team changes, and model upgrades. The context outlives any single session.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#getting-started","level":2,"title":"Getting Started","text":"

    Capture your first decision or learning right now:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a relational database for the project\" \\\n  --rationale \"Team expertise, JSONB support, mature ecosystem\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\nctx add learning \"Vitest mock hoisting\" \\\n  --context \"Tests failing intermittently\" \\\n  --lesson \"vi.mock() must be at file top level\" \\\n  --application \"Use vi.doMock() for dynamic mocks\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#further-reading","level":2,"title":"Further Reading","text":"
    • Knowledge Capture: the full workflow for persisting decisions, learnings, and conventions
    • Context Files Reference: structure and format for every file in .context/
    • About ctx: the bigger picture - why persistent context changes how you work with AI
    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/steering/","level":1,"title":"Steering Files","text":"","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#steering-files","level":2,"title":"Steering Files","text":"

    ctx projects talk to AI assistants through several layers — context files, decisions, conventions, the agent context packet — but none of those can tell the assistant how to behave when a specific kind of prompt arrives. That's what steering files are for.

    A steering file is a small markdown document with YAML frontmatter that says: \"when the user asks about X, prepend these rules to the prompt.\" ctx manages those files in .context/steering/, decides which ones match each prompt, and syncs them out to each AI tool's native config (Claude Code, Cursor, Kiro, Cline) so the rules actually land in the prompt pipeline.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#not-the-same-as-decisions-or-conventions","level":2,"title":"Not the Same as Decisions or Conventions","text":"

    The three look similar on disk but serve different purposes:

    Kind Purpose Decisions (DECISIONS.md) What was chosen and why Conventions (CONVENTIONS.md) How the codebase is written Steering (.context/steering/*.md) How the AI should behave on matching prompts

    If you find yourself writing \"the AI should always do X when asked about Y,\" that belongs in steering, not decisions.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#your-first-steering-files","level":2,"title":"Your First Steering Files","text":"

    ctx init scaffolds four foundation steering files in .context/steering/ so you start with something to edit rather than an empty directory:

    File What to fill in product.md What the project is, who it's for, what's out of scope tech.md Languages, frameworks, runtime, hard constraints structure.md Directory layout, where new files go, naming rules workflow.md Branch strategy, commit conventions, pre-commit checks

    Each file starts with an inline HTML comment explaining the three inclusion modes, priority semantics, and tool scoping. The comment is invisible in rendered markdown but visible when you open the file to edit it — it's self-documenting scaffolding, not forever guidance. Delete the comment once you've customized the file.

    Default settings for foundation files:

    • inclusion: always — fires on every AI tool call
    • priority: 10 — injected near the top of the prompt
    • tools: [] — applies to every configured AI tool

    You should open each of these files and replace the placeholder content with your project's actual rules. Re-running ctx init is safe: existing files are left alone, so your edits survive. Use ctx init --no-steering-init to opt out of the scaffold entirely.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#inclusion-modes","level":2,"title":"Inclusion Modes","text":"

    Each steering file declares an inclusion mode in its frontmatter:

    Mode When the file is included always Every prompt, unconditionally auto When the prompt keywords match the file's description manual Only when the user explicitly names the file

    Which mode to pick depends on the AI tool you use, because the two tool families consume steering very differently.

    Claude Code and Codex — prefer inclusion: always for rules that must fire reliably. These tools have two delivery channels:

    1. The plugin's PreToolUse hook runs ctx agent with an empty prompt, so only always files match and get injected automatically on every tool call.
    2. The ctx_steering_get MCP tool, registered automatically when the ctx plugin is installed. Claude can call this tool mid-task to fetch auto or manual files matching a specific prompt. Verify with claude mcp list — look for ctx: ✓ Connected.

    Use always for invariants and anything that must fire every session. Use auto for situational rules where \"Claude fetches this when the prompt is relevant\" is the right behavior — those still land, just on Claude's judgment. Use manual for reference libraries you'll name explicitly.

    Cursor, Cline, Kiro — auto is the natural default. These tools read .cursor/rules/, .clinerules/, or .kiro/steering/ natively and resolve the description match on their own, so auto files fire when the prompt matches. manual files load on explicit invocation. always still works but consumes context budget on every turn.

    Mixed setups — if a rule must fire on Claude Code, pick always, even if it's overkill for your Cursor setup. The context budget cost is small; the alternative (silently not firing) is worse.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-families-of-ai-tools-two-delivery-paths","level":2,"title":"Two Families of AI Tools, Two Delivery Paths","text":"

    Not every AI tool consumes steering the same way. ctx handles two tool families differently, and it's worth knowing which family your editor is in before you wonder why a rule isn't firing.

    Native-rules tools — Cursor, Cline, Kiro — have a built-in rules primitive. They read a specific directory (.cursor/rules/, .clinerules/, .kiro/steering/) and apply the rules they find there. ctx handles these via ctx steering sync, which exports your files into the tool-native format. Run sync whenever you edit a steering file.

    Hook + MCP tools — Claude Code, Codex — have no native rules primitive, so ctx steering sync is a no-op for them. Instead, ctx delivers steering through two non-sync channels:

    1. Automatic injection via a PreToolUse hook. The ctx setup claude-code plugin wires a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them by the active prompt, and includes matching bodies in the context packet it prints. Claude Code feeds that output back into its context. Every tool call, automatically.
    2. On-demand via the ctx_steering_get MCP tool. The ctx MCP server exposes a tool Claude can call mid-task to fetch matching steering files for a specific prompt. Claude decides when to call it — it's not automatic.

    Both channels activate when you run ctx setup claude-code --write. After that, steering just works for Claude Code.

    Practical takeaway:

    • Using Cursor/Cline/Kiro only? → Run ctx steering sync after edits.
    • Using Claude Code or Codex only? → Never run sync; the hook+MCP pipeline handles it.
    • Using both? → Run sync for the native-rules tools; the hook+MCP pipeline covers Claude Code automatically.
    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-shapes-of-automation-rules-and-scripts","level":2,"title":"Two Shapes of Automation: Rules and Scripts","text":"

    Steering is one of two hook-like layers ctx provides for customizing AI behavior. They're complementary:

    • Steering — persistent rules that get prepended to prompts. Declarative, text-only, scored by match.
    • Triggers — executable shell scripts that fire at lifecycle events. Imperative, runs arbitrary code, gated by exit codes.

    Pick steering when you want \"always remind the AI of X.\" Pick triggers when you want \"do Y when event Z happens.\" They can coexist — many projects use both.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Writing Steering Files — a six-step walkthrough: scaffold, write the rule, preview matches, list, get-rules-in-front-of-the-AI (two paths depending on tool family), verify.
    • ctx steering reference — full command, flag, and frontmatter reference; includes the per-tool delivery-mechanism table and a dedicated section on how Claude Code and Codex consume steering.
    • ctx setup — configure which AI tools receive steering. For Cursor/Cline/Kiro this is about sync targets; for Claude Code/Codex it installs the plugin that wires the PreToolUse hook and MCP server.
    • Lifecycle Triggers — the imperative companion to steering files.
    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/triggers/","level":1,"title":"Lifecycle Triggers","text":"","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#lifecycle-triggers","level":2,"title":"Lifecycle Triggers","text":"

    Some things can't be expressed as a rule you want the AI to follow. Sometimes you want something to happen: block a dangerous tool call, inject today's standup notes into the next session, log every file save to a journal. That's what triggers are for.

    A trigger is an executable shell script that ctx runs at a specific lifecycle event: the start of a session, before a tool call, when a file is saved, and so on. Triggers read a JSON payload from stdin, do whatever they need, and write a JSON response on stdout. They can allow, block, or inject context into the pipeline depending on the event type.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#trigger-types","level":2,"title":"Trigger Types","text":"Type Fires when Use case session-start A new AI session begins Inject rotating context, standup notes session-end An AI session ends Persist summaries, send notifications pre-tool-use Before a tool call executes Block, gate, or audit post-tool-use After a tool call completes Log, react, post-process file-save A file is saved Lint on save, update indices context-add A new entry is added to .context/ Cross-link, notify, enrich","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-are-arbitrary-code-treat-them-like-pre-commit-hooks","level":2,"title":"Triggers Are Arbitrary Code — Treat Them like Pre-Commit Hooks","text":"

    Only Enable Scripts You've Read and Understand

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    ctx trigger add intentionally creates new scripts disabled (no executable bit). You must ctx trigger enable <name> after reviewing the contents. That's not a suggestion — it's the security model.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#three-hook-like-layers-in-ctx","level":2,"title":"Three Hook-like Layers in ctx","text":"

    Triggers are one of three distinct hook-like concepts in ctx. The names are similar but the owners and use cases are not:

    Layer Owned by Where they live When to use ctx trigger You .context/hooks/<type>/*.sh Project-specific automation, any AI tool ctx system hooks ctx itself built-in, wired into tool configs Built-in nudges (you don't author these) Claude Code hooks Claude Code .claude/settings.local.json Claude-Code-only tool-specific integration

    This page is about the first category. The other two run automatically and are invisible to you.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-vs-steering-same-problem-different-shape","level":2,"title":"Triggers vs Steering — Same Problem, Different Shape","text":"

    Triggers are the imperative counterpart to steering files. Steering expresses persistent rules the AI reads before each prompt; triggers express side effects that run on lifecycle events. They're complementary, not competing:

    • Want the AI to remember something? → Steering.
    • Want a script to run when something happens? → Trigger.

    Most projects use both.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Authoring Lifecycle Triggers — walkthrough with security guidance: scaffold, test, enable, iterate.
    • ctx trigger reference — command reference, trigger type table, input/output contract.
    • Steering files — the declarative counterpart to triggers.
    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"operations/","level":1,"title":"Operations","text":"

    Guides for installing, upgrading, integrating, and running ctx. Split into three groups by audience.

    ","path":["Operations"],"tags":[]},{"location":"operations/#day-to-day","level":2,"title":"Day-to-Day","text":"

    Everyday operation guides for anyone running ctx in a project or adopting it in a team.

    ","path":["Operations"],"tags":[]},{"location":"operations/#integration","level":3,"title":"Integration","text":"

    Adopt ctx in an existing project: initialize context files, migrate from other tools, and onboard team members.

    ","path":["Operations"],"tags":[]},{"location":"operations/#upgrade","level":3,"title":"Upgrade","text":"

    Upgrade between versions with step-by-step migration notes and breaking-change guidance.

    ","path":["Operations"],"tags":[]},{"location":"operations/#ai-tools","level":3,"title":"AI Tools","text":"

    Configure ctx with Claude Code, Cursor, Aider, Copilot, Windsurf, and other AI coding tools.

    ","path":["Operations"],"tags":[]},{"location":"operations/#autonomous-loops","level":3,"title":"Autonomous Loops","text":"

    Run an unattended AI agent that works through tasks overnight, with ctx providing persistent memory between iterations.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub","level":2,"title":"Hub","text":"

    Operator guides for running a ctx Hub — the gRPC server that fans out structured entries across projects. If you're a client connecting to a Hub someone else runs, see ctx connect and the Hub recipes instead.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub-operations","level":3,"title":"Hub Operations","text":"

    Data directory layout, daemon management, systemd unit, backup and restore, log rotation, monitoring, and upgrades.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub-failure-modes","level":3,"title":"Hub Failure Modes","text":"

    What can go wrong in network, storage, cluster, auth, and clock layers — and what you should do about each one. Includes the short-list table oncall engineers will want bookmarked.

    ","path":["Operations"],"tags":[]},{"location":"operations/#maintainers","level":2,"title":"Maintainers","text":"

    Runbooks for people shipping ctx itself.

    ","path":["Operations"],"tags":[]},{"location":"operations/#cutting-a-release","level":3,"title":"Cutting a Release","text":"

    Step-by-step runbook for maintainers: bump version, generate release notes, run the release script, and verify the result.

    ","path":["Operations"],"tags":[]},{"location":"operations/#runbooks","level":2,"title":"Runbooks","text":"

    Step-by-step procedures you run with your agent. Each runbook includes a prompt to paste into a Claude Code session and guidance on triaging the results.

    Runbook Purpose When to run Release checklist Full pre-release sequence Before every release Plugin release Plugin-specific release steps Plugin changes ship Breaking migration Guide users across breaking changes Releases with renames Hub deployment Set up a ctx Hub end-to-end First-time hub setup New contributor Onboarding: clone to first session New contributors Codebase audit AST audits, magic strings, dead code, doc alignment Before release, quarterly Docs semantic audit Narrative gaps, weak pages, structural problems Before release, after adding pages Sanitize permissions Clean .claude/settings.local.json of over-broad grants After heavy permission granting Architecture exploration Systematic architecture docs across repos New codebase onboarding, reviews

    Recommended cadence:

    • Before every release: release checklist (which includes codebase audit + docs semantic audit)
    • Monthly: sanitize permissions
    • Quarterly: full sweep of all audit runbooks
    ","path":["Operations"],"tags":[]},{"location":"operations/autonomous-loop/","level":1,"title":"Autonomous Loops","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#autonomous-ai-development","level":2,"title":"Autonomous AI Development","text":"

    Iterate until done.

    An autonomous loop is an iterative AI development workflow where an agent works on tasks until completion, without constant human intervention.

    ctx provides the memory that makes this possible:

    • ctx provides the memory: persistent context that survives across iterations
    • The loop provides the automation: continuous execution until done

    Together, they enable fully autonomous AI development where the agent remembers everything across iterations.

    Origin

    This pattern is inspired by Geoffrey Huntley's Ralph Wiggum technique.

    We use generic terminology here so the concepts remain clear regardless of trends.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#how-it-works","level":2,"title":"How It Works","text":"
    graph TD\n    A[Start Loop] --> B[Load .context/loop.md]\n    B --> C[AI reads .context/]\n    C --> D[AI picks task from TASKS.md]\n    D --> E[AI completes task]\n    E --> F[AI updates context files]\n    F --> G[AI commits changes]\n    G --> H{Check signals}\n    H -->|SYSTEM_CONVERGED| I[Done - all tasks complete]\n    H -->|SYSTEM_BLOCKED| J[Done - needs human input]\n    H -->|Continue| B
    1. Loop reads .context/loop.md and invokes AI
    2. AI loads context from .context/
    3. AI picks one task and completes it
    4. AI updates context files (mark task done, add learnings)
    5. AI commits changes
    6. Loop checks for completion signals
    7. Repeat until converged or blocked
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#quick-start-shell-while-loop-recommended","level":2,"title":"Quick Start: Shell While Loop (Recommended)","text":"

    The best way to run an autonomous loop is a plain shell script that invokes your AI tool in a fresh process on each iteration. This is \"pure ralph\":

    The only state that carries between iterations is what lives in .context/ and the git history. No context window bleed, no accumulated tokens, no hidden state.

    Create a loop.sh:

    #!/bin/bash\n# loop.sh: an autonomous iteration loop\n\nPROMPT_FILE=\"${1:-.context/loop.md}\"\nMAX_ITERATIONS=\"${2:-10}\"\nOUTPUT_FILE=\"/tmp/loop_output.txt\"\n\nfor i in $(seq 1 $MAX_ITERATIONS); do\n  echo \"=== Iteration $i ===\"\n\n  # Invoke AI with prompt\n  cat \"$PROMPT_FILE\" | claude --print > \"$OUTPUT_FILE\" 2>&1\n\n  # Display output\n  cat \"$OUTPUT_FILE\"\n\n  # Check for completion signals\n  if grep -q \"SYSTEM_CONVERGED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop complete: All tasks done\"\n    break\n  fi\n\n  if grep -q \"SYSTEM_BLOCKED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop blocked: Needs human input\"\n    break\n  fi\n\n  sleep 2\ndone\n

    Make it executable and run:

    chmod +x loop.sh\n./loop.sh\n

    You can also generate this script with ctx loop (see CLI Reference).

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-do-we-use-a-shell-loop","level":3,"title":"Why Do We Use a Shell Loop?","text":"

    Each iteration starts a fresh AI process with zero context window history. The agent knows only what it reads from .context/ files: Exactly the information you chose to persist.

    This is the core loop principle: memory is explicit, not accidental.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#alternative-claude-codes-built-in-loop","level":2,"title":"Alternative: Claude Code's Built-in Loop","text":"

    Claude Code has built-in loop support:

    # Start autonomous loop\n/loop\n\n# Cancel running loop\n/cancel-loop\n

    This is convenient for quick iterations, but be aware of important caveats:

    This Loop Is Not Pure

    Claude Code's /loop runs all iterations within the same session. This means:

    • State leaks between iterations: The context window accumulates output from every previous iteration. The agent \"remembers\" things it saw earlier (even if they were never persisted to .context/).
    • Token budget degrades: Each iteration adds to the context window, leaving less room for actual work in later iterations.
    • Not ergonomic for long runs: Users report that the built-in loop is less predictable for 10+ iteration runs compared to a shell loop.

    For short explorations (2-5 iterations) or interactive use, /loop works fine. For overnight unattended runs or anything where iteration independence matters, use the shell while loop instead.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#the-contextloopmd-file","level":2,"title":"The .context/loop.md File","text":"

    The prompt file instructs the AI on how to work autonomously. Here's a template:

    # Autonomous Development Prompt\n\nYou are working on this project autonomously. Follow these steps:\n\n## 1. Load Context\n\nRead these files in order:\n\n1. `.context/CONSTITUTION.md`: NEVER violate these rules\n2. `.context/TASKS.md`: Find work to do\n3. `.context/CONVENTIONS.md`: Follow these patterns\n4. `.context/DECISIONS.md`: Understand past choices\n\n## 2. Pick One Task\n\nFrom `.context/TASKS.md`, select ONE task that is:\n\n- Not blocked\n- Highest priority available\n- Within your capabilities\n\n## 3. Complete the Task\n\n- Write code following conventions\n- Run tests if applicable\n- Keep changes focused and minimal\n\n## 4. Update Context\n\nAfter completing work:\n\n- Mark task complete in `TASKS.md`\n- Add any learnings to `LEARNINGS.md`\n- Add any decisions to `DECISIONS.md`\n\n## 5. Commit Changes\n\nCreate a focused commit with clear message.\n\n## 6. Signal Status\n\nEnd your response with exactly ONE of:\n\n- `SYSTEM_CONVERGED`: All tasks in TASKS.md are complete\n- `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n- (no signal): More work remains, continue to next iteration\n\n## Rules\n\n- ONE task per iteration\n- NEVER skip tests\n- NEVER violate CONSTITUTION.md\n- Commit after each task\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#completion-signals","level":2,"title":"Completion Signals","text":"

    The loop watches for these signals in AI output:

    Signal Meaning When to Use SYSTEM_CONVERGED All tasks complete No pending tasks in TASKS.md SYSTEM_BLOCKED Cannot proceed Needs clarification, access, or decision BOOTSTRAP_COMPLETE Initial setup done Project scaffolding finished","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-usage","level":3,"title":"Example Usage","text":"

    converged state

    I've completed all tasks in TASKS.md:\n- [x] Set up project structure\n- [x] Implement core API\n- [x] Add authentication\n- [x] Write tests\n\nNo pending tasks remain.\n\nSYSTEM_CONVERGED\n

    blocked state

    I cannot proceed with the \"Deploy to production\" task because:\n- Missing AWS credentials\n- Need confirmation on region selection\n\nPlease provide credentials and confirm deployment region.\n\nSYSTEM_BLOCKED\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-ctx-and-loops-work-well-together","level":2,"title":"Why ctx and Loops Work Well Together","text":"Without ctx With ctx Each iteration starts fresh Each iteration has full history Decisions get re-made Decisions persist in DECISIONS.md Learnings are lost Learnings accumulate in LEARNINGS.md Tasks can be forgotten Tasks tracked in TASKS.md","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#automatic-context-updates","level":3,"title":"Automatic Context Updates","text":"

    During the loop, the AI should update context files:

    Mark task complete:

    ctx task complete \"implement user auth\"\n

    Or emit an update command (parsed by ctx watch):

    <context-update type=\"complete\">user auth</context-update>\n

    Add learning:

    ctx add learning \"Rate limiting requires Redis connection\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    Or via update command:

    <context-update type=\"learning\"\n  context=\"Implementing rate limiter\"\n  lesson=\"Rate limiting requires Redis connection\"\n  application=\"Ensure Redis is provisioned before enabling rate limits\"\n>Rate Limiting Redis Dependency</context-update>\n

    Record decision:

    ctx add decision \"Use JWT tokens for API authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#advanced-watch-mode","level":2,"title":"Advanced: Watch Mode","text":"

    Run ctx watch alongside the loop to automatically process context updates:

    # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

    The watch command processes context updates from the loop output in real time.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#project-setup","level":2,"title":"Project Setup","text":"

    Initialize a project for autonomous loop operation:

    ctx init\n

    The loop prompt template is deployed to .context/loop.md during initialization. It instructs the agent to:

    • Work autonomously without asking clarifying questions;
    • Follow one-task-per-iteration discipline;
    • Use SYSTEM_CONVERGED / SYSTEM_BLOCKED signals;
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-project-structure","level":2,"title":"Example Project Structure","text":"
    my-project/\n├── .context/\n│   ├── CONSTITUTION.md\n│   ├── TASKS.md          # Work items for the loop\n│   ├── DECISIONS.md\n│   ├── LEARNINGS.md\n│   ├── CONVENTIONS.md\n│   └── sessions/         # Loop iteration history\n├── loop.sh               # Loop script (if not using Claude Code)\n└── src/                  # Your code\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#sample-tasksmd-for-autonomous-loops","level":3,"title":"Sample TASKS.md for Autonomous Loops","text":"
    # Tasks\n\n## Phase 1: Setup\n\n- [x] Initialize project structure\n- [x] Set up testing framework\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Polish\n\n- [ ] Add rate limiting `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n

    The loop will work through these systematically, marking each complete.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#loop-runs-forever","level":3,"title":"Loop Runs Forever","text":"

    Cause: AI not emitting completion signals

    Fix: Ensure .context/loop.md explicitly instructs signaling:

    End EVERY response with one of:\n- SYSTEM_CONVERGED (if all tasks done)\n- SYSTEM_BLOCKED (if stuck)\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#context-not-persisting","level":3,"title":"Context Not Persisting","text":"

    Cause: AI not updating context files

    Fix: Add explicit instructions to .context/loop.md:

    After completing a task, you MUST:\n1. Run: ctx task complete \"<task>\"\n2. Add learnings: ctx add learning \"...\" --session-id abc12345 --branch main --commit 68fbc00a\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#tasks-getting-repeated","level":3,"title":"Tasks Getting Repeated","text":"

    Cause: Task not marked complete before next iteration

    Fix: Ensure commit happens after context update:

    Order of operations:\n1. Complete coding work\n2. Update context files (*`ctx task complete`, `ctx add`*)\n3. Commit **ALL** changes including `.context/`\n4. Then signal status\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#ai-violating-constitution","level":3,"title":"AI Violating Constitution","text":"

    Cause: Constitution not read first

    Fix: Make constitution check explicit in .context/loop.md:

    BEFORE any work:\n1. Read .context/CONSTITUTION.md\n2. If task would violate ANY rule, emit SYSTEM_BLOCKED\n3. Explain which rule prevents the work\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#further-reading","level":2,"title":"Further Reading","text":"
    • Building ctx Using ctx: The dogfooding story: how autonomous loops built the tool that powers them
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#resources","level":2,"title":"Resources","text":"
    • Geoffrey Huntley's Ralph Wiggum Technique: The original inspiration
    • Context CLI: Command reference
    • Integrations: Tool-specific setup
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/hub-failure-modes/","level":1,"title":"Hub Failure Modes","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#ctx-hub-failure-modes","level":1,"title":"ctx Hub: Failure Modes","text":"

    What can go wrong, what the system does about it, and what you should do. Complementary to ctx Hub Operations.

    Design Posture

    The hub is best-effort knowledge sharing, not a durable ledger. Local .context/ files are the source of truth for each project; the hub is a fan-out channel. This framing informs every failure-mode decision below.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#network","level":2,"title":"Network","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#client-loses-connection-mid-stream","level":3,"title":"Client Loses Connection Mid-Stream","text":"

    What happens: ctx connection listen detects the EOF, waits with exponential backoff, and reconnects. On reconnect it passes its last-seen sequence; the hub replays everything newer.

    What you should do: nothing. If reconnects are looping, check firewall state on the hub and ctx hub status output.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-majority-side-reachable","level":3,"title":"Partition — Majority Side Reachable","text":"

    What happens: clients routed to the majority side continue to publish and listen. The minority nodes step down to followers that cannot accept writes (Raft quorum lost).

    What you should do: let it heal. When the partition closes, followers catch up via sequence-based sync automatically.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-split-brain-no-quorum","level":3,"title":"Partition — Split Brain (No Quorum)","text":"

    What happens: no node holds a majority, so no leader is elected. All nodes become read-only. ctx connection publish and ctx add --share fail with a \"no leader\" error; local writes still succeed.

    What you should do: fix the network. If the partition is permanent (e.g., a data center is gone), bootstrap a new cluster from the survivors with ctx hub peer remove for the dead nodes.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#hub-unreachable-during-ctx-add-share","level":3,"title":"Hub Unreachable during ctx add --share","text":"

    What happens: the local write succeeds; the share step prints a warning and exits non-zero on the share leg only. --share is best-effort; it never blocks local context updates.

    What you should do: run ctx connection publish later to backfill, or rely on another --share for the same entry ID. The hub deduplicates by entry ID.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#storage","level":2,"title":"Storage","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#disk-full-on-the-leader","level":3,"title":"Disk Full on the Leader","text":"

    What happens: entries.jsonl append fails. The hub rejects writes with an error and stays up for read traffic. Clients retry; followers keep their in-sync status using whatever the leader already wrote.

    What you should do: free disk or grow the volume, then nothing else — the hub resumes accepting writes on the next append attempt.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#corrupt-entriesjsonl","level":3,"title":"Corrupt entries.jsonl","text":"

    What happens: if the last line is a partial JSON write from a crash, the hub truncates it on startup and logs a warning. If any earlier line is malformed, the hub refuses to start.

    What you should do: inspect with jq -c . <data-dir>/entries.jsonl > /dev/null to find the bad line. Move the bad region to a .quarantine file, then start. Nothing is ever silently dropped.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#metajson-entriesjsonl-sequence-mismatch","level":3,"title":"meta.json / entries.jsonl Sequence Mismatch","text":"

    What happens: the hub refuses to start. This usually means someone copied one file without the other.

    What you should do: restore both files from the same backup, or accept the higher sequence by regenerating meta.json from entries.jsonl (manual for now — file a bug).

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#cluster","level":2,"title":"Cluster","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-clean-shutdown","level":3,"title":"Leader Crash, Clean Shutdown","text":"

    What happens: ctx hub stop triggers stepdown first, so a new leader is elected before the old one exits. In-flight writes drain. Clients reconnect to the new leader transparently.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-hard-fail-kill-9-power-loss","level":3,"title":"Leader Crash, Hard Fail (Kill -9, Power Loss)","text":"

    What happens: Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted but had not yet replicated can be lost — see the Raft-lite warning in the cluster recipe.

    What you should do: if you need stronger durability, run ctx connection listen on a dedicated \"collector\" project that persists entries locally as a write-ahead backup.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#split-brain-after-rejoin","level":3,"title":"Split-Brain After Rejoin","text":"

    What happens: Raft reconciles: the minority side's uncommitted writes are discarded, and the majority's log is authoritative.

    What you should do: nothing automatic. If you know the minority had important writes, grep for them in <data-dir>/entries.jsonl.rejected (written by the reconciliation pass) and replay them with ctx connection publish.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#auth-and-tokens","level":2,"title":"Auth and Tokens","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#lost-admin-token","level":3,"title":"Lost Admin Token","text":"

    What happens: you cannot register new projects.

    What you should do: retrieve it from <data-dir>/admin.token. If that file is also gone, stop the hub and regenerate — note that all existing client tokens keep working; only new registrations need the admin token.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-admin-token","level":3,"title":"Compromised Admin Token","text":"

    What happens: anyone with the token can register new projects and publish. They cannot read existing entries without a client token for a project that subscribes.

    What you should do: rotate the admin token (regenerate <data-dir>/admin.token and restart), revoke suspicious client registrations via clients.json, and audit entries.jsonl for unexpected origins.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-client-token","level":3,"title":"Compromised Client Token","text":"

    What happens: the attacker can publish as that project and read anything that project is subscribed to. Because Origin is self-asserted on publish, the attacker can also publish entries tagged with any other project's name, so attribution in entries.jsonl cannot be trusted after a token compromise.

    What you should do: remove the client's entry from clients.json, restart the hub, and re-register the legitimate project with a fresh token. Audit entries.jsonl for entries published after the compromise timestamp and quarantine any that look suspicious — remember that Origin on those entries proves nothing.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-hub-host","level":3,"title":"Compromised Hub Host","text":"

    What happens: <data-dir>/clients.json stores client tokens verbatim (not hashed). Anyone with read access to that file has every client token in hand and can impersonate any registered project until each one is rotated.

    What you should do: treat it as a total hub compromise. Stop the hub, wipe <data-dir> (keep a forensic copy first), regenerate the admin token, and have every client re-register. See Security model for the mitigations that reduce the blast radius while the hashing follow-up is pending.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#clock-skew","level":2,"title":"Clock Skew","text":"

    Hub entries carry a timestamp assigned by the publishing client. The hub does not rewrite timestamps. Clients with significant clock skew will publish entries that look out of order in the shared feed.

    What you should do: run NTP on all client machines. If you see entries dated in the future or far past, the publisher's clock is the culprit.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#the-short-list","level":2,"title":"The Short List","text":"Symptom First thing to check Client can't reach hub Firewall, then ctx hub status \"No leader\" errors Cluster quorum — run ctx hub status on each peer Hub won't start after crash Last line of entries.jsonl Entries missing after restore Check clients.json sequence vs local .sync-state.json Duplicate entries in shared feed Client replayed after restore — safe, dedup by ID Followers lagging Disk or network on the follower, not the leader","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub Operations
    • ctx Hub security model
    • HA cluster recipe
    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub/","level":1,"title":"Hub Operations","text":"","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#ctx-hub-operations","level":1,"title":"ctx Hub: Operations","text":"

    Running the ctx ctx Hub in production. This page is for operators — people running a hub for themselves or a team, not people writing to a hub someone else is running.

    If you have not read it yet, start with the ctx Hub overview. It explains what the hub is, the two user stories it supports (personal cross-project brain vs small trusted team), and what it does not do. A client-side tour is in Getting Started.

    Operator Cheat Sheet

    • The hub fans out four entry types only: decision, learning, convention, task. Journals, scratchpad, and other local state are out of scope.
    • Identity is per-project, not per-user. Attribution is limited to Origin, which is self-asserted by the publishing client.
    • The data model is an append-only JSONL log plus two small JSON sidecar files. Nothing is rewritten in place.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#data-directory-layout","level":2,"title":"Data Directory Layout","text":"

    The hub stores everything under a single data directory (default ~/.ctx/hub-data/, override with --data-dir).

    <data-dir>/\n  admin.token        # Initial admin token (chmod 600)\n  clients.json       # Registered client tokens and project names\n  meta.json          # Sequence counter, version, cluster metadata\n  entries.jsonl      # Append-only log (single source of truth)\n  hub.pid            # Daemon PID file (daemon mode only)\n  raft/              # Raft state (cluster mode only)\n    log.db\n    stable.db\n    snapshots/\n

    Invariants:

    • entries.jsonl is append-only. Every line is a valid JSON object. Corrupt lines are fatal at startup: fix or truncate before restart.
    • meta.json is authoritative for the next sequence number. On restart, the hub reads the last valid line of entries.jsonl and refuses to start if the sequences disagree.
    • clients.json holds hashed client tokens; losing it invalidates all client registrations.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#starting-and-stopping","level":2,"title":"Starting and Stopping","text":"ForegroundDaemon
    ctx hub start                    # Ctrl-C to stop\nctx hub start --port 8080        # Custom port\nctx hub start --data-dir /srv/ctx-hub\n
    ctx hub start --daemon           # Fork to background\nctx hub stop                      # Graceful shutdown\n

    --stop sends SIGTERM to the PID in hub.pid, waits for in-flight RPCs to drain, then exits. If the daemon is wedged, remove hub.pid and send SIGKILL manually — entries.jsonl is crash-safe, so you will not lose accepted writes.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#systemd-unit","level":2,"title":"Systemd Unit","text":"

    For production single-node deployments, run the hub as a systemd service instead of --daemon:

    # /etc/systemd/system/ctx-hub.service\n[Unit]\nDescription=ctx `ctx` Hub\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nType=simple\nUser=ctx\nGroup=ctx\nExecStart=/usr/local/bin/ctx hub start --port 9900 \\\n    --data-dir /var/lib/ctx-hub\nRestart=on-failure\nRestartSec=5\nNoNewPrivileges=true\nProtectSystem=strict\nProtectHome=true\nReadWritePaths=/var/lib/ctx-hub\nPrivateTmp=true\n\n[Install]\nWantedBy=multi-user.target\n
    sudo systemctl enable --now ctx-hub\nsudo journalctl -u ctx-hub -f\n
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#backup-and-restore","level":2,"title":"Backup and Restore","text":"

    Because entries.jsonl is append-only, backups are trivial:

    # Hot backup — safe while the hub is running.\ncp <data-dir>/entries.jsonl backups/entries-$(date +%F).jsonl\ncp <data-dir>/meta.json      backups/meta-$(date +%F).json\ncp <data-dir>/clients.json   backups/clients-$(date +%F).json\n

    For a consistent snapshot across all three files, stop the hub, copy, then start again — or use a filesystem-level snapshot (LVM, ZFS, Btrfs).

    Restore:

    ctx hub stop                           # Stop the hub\ncp backups/entries-2026-04-10.jsonl <data-dir>/entries.jsonl\ncp backups/meta-2026-04-10.json      <data-dir>/meta.json\ncp backups/clients-2026-04-10.json   <data-dir>/clients.json\nctx hub start --daemon\n

    Clients that pushed sequences above the restored watermark will re-publish on the next listen reconnect, because the hub now reports a lower sequence than what clients have on disk. This is safe — the store deduplicates by entry ID.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#log-rotation","level":2,"title":"Log Rotation","text":"

    entries.jsonl grows unbounded. For long-lived hubs, rotate it offline:

    ctx hub stop\nmv <data-dir>/entries.jsonl <data-dir>/entries-$(date +%F).jsonl.old\n# Replay the last N days into a fresh entries.jsonl if you want a\n# trimmed active log, or leave the old file in place as history.\nctx hub start --daemon\n

    Do not truncate entries.jsonl while the hub is running. The hub holds an open file handle; an in-place truncation confuses the sequence counter and loses writes.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#monitoring","level":2,"title":"Monitoring","text":"

    Liveness probe:

    ctx hub status --exit-code\n

    Exit code 0 means the node is healthy (leader or in-sync follower); non-zero means degraded. Wire this into your monitoring of choice.

    For cluster deployments, watch for:

    • Role flaps — the leader changing more than once per hour suggests network instability or disk contention.
    • Replication lag — ctx hub status shows per-peer sequence offsets. Sustained lag > 100 sequences on a follower is worth investigating.
    • entries.jsonl growth rate — sudden spikes often indicate a misbehaving ctx connection listen reconnect loop.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#upgrading","level":2,"title":"Upgrading","text":"

    The JSONL format is versioned in meta.json. ctx refuses to start against a newer store version than it understands; older store versions are upgraded in place at first start after an upgrade.

    Always back up <data-dir>/ before upgrading.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub failure modes
    • ctx Hub security model
    • ctx serve reference
    • ctx hub reference
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/integrations/","level":1,"title":"AI Tools","text":"","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#ai-tools","level":2,"title":"AI Tools","text":"

    Context works with any AI tool that can read files. This guide covers setup for popular AI coding assistants.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#claude-code-full-integration","level":2,"title":"Claude Code (Full Integration)","text":"

    Claude Code has the deepest integration via the ctx plugin.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup","level":3,"title":"Setup","text":"

    First, install ctx and initialize your project:

    ctx init\n

    Then, install the ctx plugin in Claude Code:

    # From the ctx repository\nclaude /plugin install ./internal/assets/claude\n\n# Or from the marketplace\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

    Ensure the Plugin Is Enabled

    Installing a plugin registers it, but local installs may not auto-enable it globally. Verify ~/.claude/settings.json contains:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Without this, the plugin's hooks and skills won't appear in other projects. Running ctx init auto-enables the plugin; use --no-plugin-enable to skip this step.

    This gives you:

    Component Purpose .context/ All context files CLAUDE.md Bootstrap instructions Plugin hooks Lifecycle automation Plugin skills Agent Skills","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#how-it-works","level":3,"title":"How It Works","text":"
    graph TD\n    A[Session Start] --> B[Claude reads CLAUDE.md]\n    B --> C[PreToolUse hook runs]\n    C --> D[ctx agent loads context]\n    D --> E[Work happens]\n    E --> F[Session End]
    1. Session start: Claude reads CLAUDE.md, which tells it to check .context/
    2. First tool use: PreToolUse hook runs ctx agent and emits the context packet (subsequent invocations within the cooldown window are silent)
    3. Next session: Claude reads context files and continues with context
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#plugin-hooks","level":3,"title":"Plugin Hooks","text":"

    The ctx plugin provides lifecycle hooks implemented as Go subcommands (ctx system *):

    Hook Event Purpose ctx system context-load-gate PreToolUse (.*) Auto-inject context on first tool use ctx system block-non-path-ctx PreToolUse (Bash) Block ./ctx or go run: force $PATH install ctx system qa-reminder PreToolUse (Bash) Remind agent to lint/test before committing ctx system specs-nudge PreToolUse (EnterPlanMode) Nudge agent to use project specs when planning ctx system check-context-size UserPromptSubmit Nudge context assessment as sessions grow ctx system check-ceremonies UserPromptSubmit Nudge /ctx-remember and /ctx-wrap-up adoption ctx system check-persistence UserPromptSubmit Remind to persist learnings/decisions ctx system check-journal UserPromptSubmit Remind to export/enrich journal entries ctx system check-reminders UserPromptSubmit Relay pending reminders at session start ctx system check-version UserPromptSubmit Warn when binary/plugin versions diverge ctx system check-resources UserPromptSubmit Warn when memory/swap/disk/load hit DANGER level ctx system check-knowledge UserPromptSubmit Nudge when knowledge files grow large ctx system check-map-staleness UserPromptSubmit Nudge when ARCHITECTURE.md is stale ctx system heartbeat UserPromptSubmit Session-alive signal with prompt count metadata ctx system post-commit PostToolUse (Bash) Nudge context capture and QA after git commits

    A catch-all PreToolUse hook also runs ctx agent on every tool use (with cooldown) to autoload context.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#hook-configuration","level":3,"title":"Hook Configuration","text":"

    The plugin's hooks.json wires everything automatically: no manual configuration in settings.local.json needed:

    {\n  \"hooks\": {\n    \"PreToolUse\": [\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system context-load-gate\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system block-non-path-ctx\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system qa-reminder\" }\n        ]\n      },\n      {\n        \"matcher\": \"EnterPlanMode\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system specs-nudge\" }\n        ]\n      },\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx agent --budget 4000 2>/dev/null || true\" }\n        ]\n      }\n    ],\n    \"PostToolUse\": [\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system post-commit\" }\n        ]\n      }\n    ],\n    \"UserPromptSubmit\": [\n      {\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system check-context-size\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-ceremonies\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-persistence\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-journal\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-reminders\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-version\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-resources\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-knowledge\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-map-staleness\" },\n          { \"type\": \"command\", \"command\": \"ctx system heartbeat\" }\n        ]\n      }\n    ]\n  }\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#customizing-token-budget-and-cooldown","level":3,"title":"Customizing Token Budget and Cooldown","text":"

    Edit the PreToolUse command to change the token budget or cooldown:

    \"command\": \"ctx agent --budget 8000 --session $PPID >/dev/null || true\"\n\"command\": \"ctx agent --budget 4000 --cooldown 5m --session $PPID >/dev/null || true\"\n

    The --session $PPID flag isolates the cooldown per session: $PPID resolves to the Claude Code process PID, so concurrent sessions don't interfere. The default cooldown is 10 minutes; use --cooldown 0 to disable it.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#verifying-setup","level":3,"title":"Verifying Setup","text":"
    1. Start a new Claude Code session;
    2. Ask: \"Do you remember?\"
    3. Claude should cite specific context:
      • Current tasks from .context/TASKS.md;
      • Recent decisions or learnings;
      • Recent session history from ctx journal.
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#local-plugin-development","level":3,"title":"Local Plugin Development","text":"

    When developing ctx locally (adding skills, hooks, or changing plugin behavior), Claude Code caches the plugin by version. You must bump the version in both files and update the marketplace for changes to take effect:

    1. Bump version in both:
    2. internal/assets/claude/.claude-plugin/plugin.json (plugin manifest), .claude-plugin/marketplace.json (marketplace listing*);

    3. Update the marketplace in Claude Code:

    4. Open the Plugins UI (/plugins or Esc menu),
    5. Go to Marketplaces tab,
    6. Select the activememory-ctx Marketplace,
    7. Choose Update marketplace;

    8. Start a new Claude Code session: skill changes aren't reflected in existing sessions.

    Both Version Files Must Match

    If you only bump plugin.json but not marketplace.json (or vice versa), Claude Code may not detect the update. Always bump both together.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#troubleshooting","level":3,"title":"Troubleshooting","text":"Issue Solution Context not loading Check ctx is in PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list New skill not visible Bump version in both plugin.json files, update marketplace","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-load","level":3,"title":"Manual Context Load","text":"

    If hooks aren't working, manually load context:

    # Get context packet\nctx agent --budget 4000\n\n# Or paste into conversation\ncat .context/TASKS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#agent-skills","level":3,"title":"Agent Skills","text":"

    The ctx plugin ships Agent Skills following the agentskills.io specification.

    These are invoked in Claude Code with /skill-name.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-lifecycle-skills","level":4,"title":"Session Lifecycle Skills","text":"Skill Description /ctx-remember Recall project context at session start (ceremony) /ctx-wrap-up End-of-session context persistence (ceremony) /ctx-status Show context summary (tasks, decisions, learnings) /ctx-agent Get AI-optimized context packet /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Review session and suggest what to persist /ctx-remind Manage session-scoped reminders /ctx-pause Pause context hooks for this session /ctx-resume Resume context hooks after a pause","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-persistence-skills","level":4,"title":"Context Persistence Skills","text":"Skill Description /ctx-task-add Add a task to TASKS.md /ctx-learning-add Add a learning to LEARNINGS.md /ctx-decision-add Add a decision with context/rationale/consequence /ctx-convention-add Add a coding convention to CONVENTIONS.md /ctx-archive Archive completed tasks","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#scratchpad-skills","level":4,"title":"Scratchpad Skills","text":"Skill Description /ctx-pad Manage encrypted scratchpad entries","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-history-skills","level":4,"title":"Session History Skills","text":"Skill Description /ctx-history Browse AI session history /ctx-journal-enrich Enrich a journal entry with frontmatter/tags /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#blogging-skills","level":4,"title":"Blogging Skills","text":"

    Blogging Is a Better Way of Creating Release Notes

    The blogging workflow can also double as generating release notes:

    AI reads your git commit history and creates a \"narrative\", which is essentially what a release note is for.

    Skill Description /ctx-blog Generate blog post from recent activity /ctx-blog-changelog Generate blog post from commit range with theme","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#auditing-health-skills","level":4,"title":"Auditing & Health Skills","text":"Skill Description /ctx-doctor Troubleshoot ctx behavior with structural health checks /ctx-drift Detect and fix context drift (structural + semantic) /ctx-consolidate Merge redundant learnings or decisions into denser entries /ctx-alignment-audit Audit doc claims against playbook instructions /ctx-prompt-audit Analyze session logs for vague prompts /check-links Audit docs for dead internal and external links","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#planning-execution-skills","level":4,"title":"Planning & Execution Skills","text":"Skill Description /ctx-loop Generate a Ralph Loop iteration script /ctx-implement Execute a plan step-by-step with checks /ctx-plan-import Import Claude Code plan files into project specs /ctx-worktree Manage git worktrees for parallel agents /ctx-architecture Build and maintain architecture maps","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples","level":4,"title":"Usage Examples","text":"
    /ctx-status\n/ctx-learning-add \"Token refresh requires explicit cache invalidation\"\n/ctx-journal-enrich twinkly-stirring-kettle\n

    Skills support partial matching where applicable (e.g., session slugs).

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#cursor-ide","level":2,"title":"Cursor IDE","text":"

    Cursor can use context files through its system prompt or by reading files directly.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_1","level":3,"title":"Setup","text":"
    # Generate Cursor configuration\nctx setup cursor\n\n# Initialize context\nctx init --minimal\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration","level":3,"title":"Configuration","text":"

    Add to Cursor settings (.cursor/settings.json):

    // split to multiple lines for readability\n{\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and \n  .context/CONVENTIONS.md before responding. \n  Follow rules in .context/CONSTITUTION.md.\",\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage","level":3,"title":"Usage","text":"
    1. Open your project in Cursor
    2. Context files are available in the file tree
    3. Reference them in prompts: \"Check .context/DECISIONS.md for our approach to...\"
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-injection","level":3,"title":"Manual Context Injection","text":"

    For more control, paste context directly:

    # Get AI-ready packet\nctx agent --budget 4000 | pbcopy  # macOS\nctx agent --budget 4000 | xclip  # Linux\n

    Paste into Cursor's chat.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#aider","level":2,"title":"Aider","text":"

    Aider works well with context files through its --read flag.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_2","level":3,"title":"Setup","text":"
    # Generate Aider configuration\nctx setup aider\n\n# Initialize context\nctx init\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_1","level":3,"title":"Configuration","text":"

    Create .aider.conf.yml:

    read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_1","level":3,"title":"Usage","text":"
    # Start Aider (reads context files automatically)\naider\n\n# Or specify files explicitly\naider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#with-watch-mode","level":3,"title":"With Watch Mode","text":"

    Run ctx watch alongside Aider to capture context updates:

    # Terminal 1: Run Aider\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/aider.log\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#github-copilot","level":2,"title":"GitHub Copilot","text":"

    GitHub Copilot integrates with ctx at three levels: an automated instructions file, a VS Code Chat extension, and manual patterns.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_3","level":3,"title":"Setup","text":"
    # Initialize context\nctx init\n\n# Generate .github/copilot-instructions.md\nctx setup copilot --write\n

    The --write flag creates .github/copilot-instructions.md, which Copilot reads automatically at the start of every session. This file contains your project's constitution rules, current tasks, conventions, and architecture: giving Copilot persistent context without manual copy-paste.

    Re-run ctx setup copilot --write after updating your .context/ files to regenerate the instructions.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#vs-code-chat-extension-ctx","level":3,"title":"VS Code Chat Extension (@ctx)","text":"

    The ctx VS Code extension adds a @ctx chat participant to GitHub Copilot Chat, giving you direct access to all context commands from within the editor.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#installation","level":4,"title":"Installation","text":"
    1. Build from source (requires Node.js 18+):
    cd editors/vscode\nnpm install\nnpm run build\nnpx @vscode/vsce package\n
    1. Install the .vsix file:
    code --install-extension ctx-context-0.8.1.vsix\n
    1. Reload VS Code. Type @ctx in Copilot Chat to verify.
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#slash-commands","level":4,"title":"Slash Commands","text":"Command Description @ctx /init Initialize .context/ directory with template files @ctx /status Show context summary with token estimate @ctx /agent Print AI-ready context packet @ctx /drift Detect stale or invalid context @ctx /journal Browse and search AI session history @ctx /hook Generate AI tool integration configs @ctx /add Add a task, decision, or learning @ctx /load Output assembled context Markdown @ctx /compact Archive completed tasks and clean up @ctx /sync Reconcile context with codebase","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples_1","level":4,"title":"Usage Examples","text":"
    @ctx /init\n@ctx /status\n@ctx /add task Implement user authentication\n@ctx /drift\n@ctx /hook copilot\n@ctx /journal\n

    Typing @ctx without a command shows help with all available commands. The extension also supports natural language: asking @ctx about \"status\" or \"drift\" routes to the correct command automatically.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_2","level":4,"title":"Configuration","text":"Setting Default Description ctx.executablePathctx Path to the ctx binary. Set this if ctx is not in your PATH.","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#follow-up-suggestions","level":4,"title":"Follow-Up Suggestions","text":"

    After each command, the extension suggests relevant next steps. For example, after /init it suggests /status and /hook; after /drift it suggests /sync.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-persistence","level":3,"title":"Session Persistence","text":"

    ctx init creates a .context/sessions/ directory for storing session data from non-Claude tools. The Markdown session parser scans this directory during ctx journal, enabling session history for Copilot and other tools.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-patterns","level":3,"title":"Manual Patterns","text":"

    These patterns work without the extension, using Copilot's built-in file awareness:

    Pattern 1: Keep context files open

    Open .context/CONVENTIONS.md in a split pane. Copilot will reference it.

    Pattern 2: Reference in comments

    // See .context/CONVENTIONS.md for naming patterns\n// Following decision in .context/DECISIONS.md: Use PostgreSQL\n\nfunction getUserById(id: string) {\n  // Copilot now has context\n}\n

    Pattern 3: Paste context into Copilot Chat

    ctx agent --budget 2000\n

    Paste output into Copilot Chat for context-aware responses.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#windsurf-ide","level":2,"title":"Windsurf IDE","text":"

    Windsurf supports custom instructions and file-based context.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_4","level":3,"title":"Setup","text":"
    # Generate Windsurf configuration\nctx setup windsurf\n\n# Initialize context\nctx init\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_3","level":3,"title":"Configuration","text":"

    Add to Windsurf settings:

    // Split to multiple lines for readability\n{\n  \"ai.customInstructions\": \"Always read .context/CONSTITUTION.md first. \n  Check .context/TASKS.md for current work. \n  Follow patterns in .context/CONVENTIONS.md.\"\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_2","level":3,"title":"Usage","text":"

    Context files appear in the file tree. Reference them when chatting:

    • \"What's in our task list?\" → AI reads .context/TASKS.md
    • \"What convention do we use for naming?\" → AI reads .context/CONVENTIONS.md
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#generic-integration","level":2,"title":"Generic Integration","text":"

    For any AI tool that can read files, use these patterns:

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-loading","level":3,"title":"Manual Context Loading","text":"
    # Get full context\nctx load\n\n# Get AI-optimized packet\nctx agent --budget 8000\n\n# Get specific file\ncat .context/TASKS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#system-prompt-template","level":3,"title":"System Prompt Template","text":"
    You are working on a project with persistent context in .context/\n\nBefore responding:\n1. Read .context/CONSTITUTION.md - NEVER violate these rules\n2. Check .context/TASKS.md for current work\n3. Follow .context/CONVENTIONS.md patterns\n4. Reference .context/DECISIONS.md for architectural choices\n\nWhen you learn something new, note it for .context/LEARNINGS.md\nWhen you make a decision, document it for .context/DECISIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#automated-updates","level":3,"title":"Automated Updates","text":"

    If your AI tool outputs to a log, use ctx watch:

    # Watch log file for context-update commands\nyour-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

    The AI can emit updates like:

    <context-update type=\"complete\">implement caching</context-update>\n<context-update type=\"learning\"\n  context=\"Implementing caching layer\"\n  lesson=\"Important thing learned today\"\n  application=\"Apply this insight going forward\"\n>Caching Insight</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-update-commands","level":2,"title":"Context Update Commands","text":"

    The ctx watch command parses update commands from AI output. Use this format:

    <context-update type=\"TYPE\" [attributes]>Content</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#supported-types","level":3,"title":"Supported Types","text":"Type Target File Required Attributes task TASKS.md None decision DECISIONS.md context, rationale, consequencelearning LEARNINGS.md context, lesson, applicationconvention CONVENTIONS.md None complete TASKS.md None","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#simple-format-tasks-conventions-complete","level":3,"title":"Simple Format (Tasks, Conventions, Complete)","text":"
    <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"convention\">Use kebab-case for files</context-update>\n<context-update type=\"complete\">rate limiting</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#structured-format-learnings-decisions","level":3,"title":"Structured Format (Learnings, Decisions)","text":"

    Learnings and decisions support structured attributes for better documentation:

    Learning with full structure:

    <context-update type=\"learning\"\n  context=\"Debugging Claude Code hooks\"\n  lesson=\"Hooks receive JSON via stdin, not environment variables\"\n  application=\"Parse JSON stdin with the host language (Go, Python, etc.): no jq needed\"\n>Hook Input Format</context-update>\n

    Decision with full structure:

    <context-update type=\"decision\"\n  context=\"Need a caching layer for API responses\"\n  rationale=\"Redis is fast, well-supported, and team has experience\"\n  consequence=\"Must provision Redis infrastructure; team training on Redis patterns\"\n>Use Redis for caching</context-update>\n

    Learnings require: context, lesson, application attributes. Decisions require: context, rationale, consequence attributes. Updates missing required attributes are rejected with an error.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#further-reading","level":2,"title":"Further Reading","text":"
    • Skills That Fight the Platform: Common pitfalls in skill design that work against the host tool
    • The Anatomy of a Skill That Works: What makes a skill reliable: the E/A/R framework and quality gates
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/migration/","level":1,"title":"Integration","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#adopting-ctx-in-existing-projects","level":2,"title":"Adopting ctx in Existing Projects","text":"

    Claude Code User?

    You probably want the plugin instead of this page.

    Install ctx from the marketplace: (/plugin → search \"ctx\" → Install) and you're done: hooks, skills, and updates are handled for you.

    See Getting Started for the full walkthrough.

    This guide covers adopting ctx in existing projects regardless of which tools your team uses.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#quick-paths","level":2,"title":"Quick Paths","text":"You have... Command What happens Nothing (greenfield) ctx init Creates .context/, CLAUDE.md, permissions Existing CLAUDE.mdctx init --merge Backs up your file, inserts ctx block after the H1 Existing CLAUDE.md + ctx markers ctx init --force Replaces the ctx block, leaves your content intact .cursorrules / .aider.conf.ymlctx initctx ignores those files: they coexist cleanly Team repo, first adopter ctx init --merge && git add .context/ CLAUDE.md Initialize and commit for the team","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-claudemd","level":2,"title":"Existing CLAUDE.md","text":"

    This is the most common scenario:

    You have a CLAUDE.md with project-specific instructions and don't want to lose them.

    You Own CLAUDE.md

    After initialization, CLAUDE.md is yours: edit it freely.

    Add project instructions, remove sections you don't need, reorganize as you see fit.

    The only part ctx manages is the block between the <!-- ctx:context --> and <!-- ctx:end --> markers; everything outside those markers is yours to change at any time.

    If you remove the markers, nothing breaks: ctx simply treats the file as having no ctx content and will offer to merge again on the next ctx init.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-ctx-init-does","level":3,"title":"What ctx init Does","text":"

    When ctx init detects an existing CLAUDE.md, it checks for ctx markers (<!-- ctx:context --> ... <!-- ctx:end -->):

    State Default behavior With --merge With --force No CLAUDE.md Creates from template Creates from template Creates from template Exists, no ctx markers Prompts to merge Auto-merges (no prompt) Auto-merges (no prompt) Exists, has ctx markers Skips (already set up) Skips Replaces the ctx block only","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-merge-flag","level":3,"title":"The --merge Flag","text":"

    --merge auto-merges without prompting. The merge process:

    1. Backs up your existing CLAUDE.md to CLAUDE.md.<timestamp>.bak;
    2. Finds the H1 heading (e.g., # My Project) in your file;
    3. Inserts the ctx block immediately after it;
    4. Preserves everything else untouched.

    Your content before and after the ctx block remains exactly as it was.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#before-after-example","level":3,"title":"Before / After Example","text":"

    Before: your existing CLAUDE.md:

    # My Project\n\n## Build Commands\n\n-`npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

    After ctx init --merge:

    # My Project\n\n<!-- ctx:context -->\n<!-- DO NOT REMOVE: This marker indicates ctx-managed content -->\n\n## IMPORTANT: You Have Persistent Memory\n\nThis project uses Context (`ctx`) for context persistence across sessions.\n...\n\n<!-- ctx:end -->\n\n## Build Commands\n\n- `npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

    Your build commands and code style sections are untouched. The ctx block sits between markers and can be updated independently.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-force-flag","level":3,"title":"The --force Flag","text":"

    If your CLAUDE.md already has ctx markers (from a previous ctx init), the default behavior is to skip it. Use --force to replace the ctx block with the latest template: This is useful after upgrading ctx:

    ctx init --force\n

    This only replaces content between <!-- ctx:context --> and <!-- ctx:end -->. Your own content outside the markers is preserved. A timestamped backup is created before any changes.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#undoing-a-merge","level":3,"title":"Undoing a Merge","text":"

    Every merge creates a backup:

    $ ls CLAUDE.md*.bak\nCLAUDE.md.1738000000.bak\n

    To restore:

    cp CLAUDE.md.1738000000.bak CLAUDE.md\n

    Or if you are using git, simply:

    git checkout CLAUDE.md\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-cursorrules-aider-copilot","level":2,"title":"Existing .cursorrules / Aider / Copilot","text":"

    ctx doesn't touch tool-specific config files. It creates its own files (.context/, CLAUDE.md) and coexists with whatever you already have.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-does-ctx-create","level":3,"title":"What Does ctx Create?","text":"ctx creates ctx does NOT touch .context/ directory .cursorrulesCLAUDE.md (or merges into) .aider.conf.yml.claude/settings.local.json (seeded by ctx init; the plugin manages hooks and skills) .github/copilot-instructions.md.windsurfrules Any other tool-specific config

    Claude Code hooks and skills are provided by the ctx plugin, installed from the Claude Code marketplace (/plugin → search \"ctx\" → Install).

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#running-ctx-alongside-other-tools","level":3,"title":"Running ctx Alongside Other Tools","text":"

    The .context/ directory is the source of truth. Tool-specific configs point to it:

    • Cursor: Reference .context/ files in your system prompt (see Cursor setup)
    • Aider: Add .context/ files to the read: list in .aider.conf.yml (see Aider setup)
    • Copilot: Keep .context/ files open or reference them in comments (see Copilot setup)

    You can generate a tool-specific configuration with:

    ctx setup cursor    # Generate Cursor config snippet\nctx setup aider     # Generate .aider.conf.yml\nctx setup copilot   # Generate Copilot tips\nctx setup windsurf  # Generate Windsurf config\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#migrating-content-into-context","level":3,"title":"Migrating Content into .context/","text":"

    If you have project knowledge scattered across .cursorrules or custom prompt files, consider migrating it:

    1. Rules / invariants → .context/CONSTITUTION.md
    2. Code patterns → .context/CONVENTIONS.md
    3. Architecture notes → .context/ARCHITECTURE.md
    4. Known issues / tips → .context/LEARNINGS.md

    You don't need to delete the originals: ctx and tool-specific files can coexist. But centralizing in .context/ means every tool gets the same context.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#team-adoption","level":2,"title":"Team Adoption","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#context-is-designed-to-be-committed","level":3,"title":".context/ Is Designed to Be Committed","text":"

    The context files (tasks, decisions, learnings, conventions, architecture) are meant to live in version control. However, some subdirectories are personal or sensitive and should not be committed.

    ctx init automatically adds these .gitignore entries:

    # Journals contain full session transcripts: personal, potentially large\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Runtime state and logs (ephemeral, machine-specific):\n.context/state/\n.context/logs/\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

    With those in place, committing is straightforward:

    # One person initializes\nctx init --merge\n\n# Commit context files (journals and keys are already gitignored)\ngit add .context/ CLAUDE.md\ngit commit -m \"Add ctx context management\"\ngit push\n

    Teammates pull and immediately have context. No per-developer setup needed.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-about-claude","level":3,"title":"What about .claude/?","text":"

    The .claude/ directory contains permissions that ctx init seeds. Hooks and skills are provided by the ctx plugin (not per-project files).

    File Commit? Why .claude/settings.local.json No Machine-specific, accumulates session permissions .claude/settings.golden.json Yes Curated permission snapshot (via ctx permission snapshot)","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#merge-conflicts-in-context-files","level":3,"title":"Merge Conflicts in Context Files","text":"

    Context files are plain Markdown. Resolve conflicts the same way you would for any other documentation file:

    # After a conflicting pull\ngit diff .context/TASKS.md    # See both sides\n# Edit to keep both sets of tasks, then:\ngit add .context/TASKS.md\ngit commit\n

    Common conflict scenarios:

    • TASKS.md: Two people added tasks: Keep both.
    • DECISIONS.md: Same decision recorded differently: Unify the entry.
    • LEARNINGS.md: Parallel discoveries: Keep both, remove duplicates.
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#gradual-adoption","level":3,"title":"Gradual Adoption","text":"

    You don't need the whole team to switch at once:

    1. One person runs ctx init --merge and commits;
    2. CLAUDE.md instructions work immediately for Claude Code users;
    3. Other tool users can adopt at their own pace using ctx setup <tool>;
    4. Context files benefit everyone who reads them, even without tool integration.
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verifying-it-worked","level":2,"title":"Verifying It Worked","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#check-status","level":3,"title":"Check Status","text":"
    ctx status\n

    You should see your context files listed with token counts and no warnings.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#test-memory","level":3,"title":"Test Memory","text":"

    Start a new AI session and ask: \"Do you remember?\"

    The AI should cite specific context:

    • Current tasks from .context/TASKS.md;
    • Recent decisions or learnings;
    • Session history (if you've had prior sessions);

    If it responds with generic \"I don't have memory\", check that ctx is in your PATH (which ctx) and that hooks are configured (see Troubleshooting).

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verify-the-merge","level":3,"title":"Verify the Merge","text":"

    If you used --merge, check that your original content is intact:

    # Your original content should still be there\ncat CLAUDE.md\n\n# The ctx block should be between markers\ngrep -c \"ctx:context\" CLAUDE.md  # Should print 1\ngrep -c \"ctx:end\" CLAUDE.md      # Should print 1\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#further-reading","level":2,"title":"Further Reading","text":"
    • Getting Started: Full setup walkthrough
    • Context Files: What each .context/ file does
    • Integrations: Per-tool setup (Claude Code, Cursor, Aider, Copilot)
    • CLI Reference: All ctx commands and flags
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/release/","level":1,"title":"Cutting a Release","text":"

    Full Release Checklist

    This page covers the mechanics of cutting a release (bump, tag, push). For the complete pre-release ceremony — audits, tests, verification, and post-release steps — see the Release Checklist runbook.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#prerequisites","level":2,"title":"Prerequisites","text":"

    Before you can cut a release you need:

    • Push access to origin (GitHub)
    • GPG signing configured (make gpg-test)
    • Go installed (version in go.mod)
    • Zensical installed (make site-setup)
    • A clean working tree (git status shows nothing to commit)
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#step-by-step","level":2,"title":"Step-by-Step","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#1-update-the-version-file","level":3,"title":"1. Update the VERSION File","text":"
    echo \"0.9.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.9.0\"\n

    The VERSION file uses bare semver (0.9.0), no v prefix. The release script adds the v prefix for git tags.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#2-generate-release-notes","level":3,"title":"2. Generate Release Notes","text":"

    In Claude Code:

    /_ctx-release-notes\n

    This analyzes commits since the last tag and writes dist/RELEASE_NOTES.md. The release script refuses to proceed without this file.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#3-verify-docs-and-commit-any-remaining-changes","level":3,"title":"3. Verify Docs and Commit Any Remaining Changes","text":"
    /ctx-link-check    # audit docs for dead links\nmake audit          # full check: fmt, vet, lint, style, test\ngit status          # must be clean\n
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#4-run-the-release","level":3,"title":"4. Run the Release","text":"
    make release\n

    Or, if you are in a Claude Code session:

    /_ctx-release\n

    The release script does everything in order:

    Step What happens 1 Reads VERSION, verifies release notes exist 2 Verifies working tree is clean 3 Updates version in 4 config files (plugin.json, marketplace.json, VS Code package.json + lock) 4 Updates download URLs in 3 doc files (index.md, getting-started.md, integrations.md) 5 Adds new row to versions.md 6 Rebuilds the documentation site (make site) 7 Commits all version and docs updates 8 Runs make test and make smoke 9 Builds binaries for all 6 platforms via hack/build-all.sh 10 Creates a signed git tag (v0.9.0) 11 Pushes the tag to origin 12 Updates and pushes the latest tag","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#5-github-ci-takes-over","level":3,"title":"5. GitHub CI Takes Over","text":"

    Pushing a v* tag triggers .github/workflows/release.yml:

    1. Checks out the tagged commit
    2. Runs the full test suite
    3. Builds binaries for all platforms
    4. Creates a GitHub Release with auto-generated notes
    5. Uploads binaries and SHA256 checksums
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#6-verify","level":3,"title":"6. Verify","text":"
    • GitHub Releases shows the new version
    • All 6 binaries are attached (linux/darwin x amd64/arm64, windows x amd64)
    • SHA256 files are attached
    • Release notes look correct
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#what-gets-updated-automatically","level":2,"title":"What Gets Updated Automatically","text":"

    The release script updates 8 files so you do not have to:

    File What changes internal/assets/claude/.claude-plugin/plugin.json Plugin version .claude-plugin/marketplace.json Marketplace version (2 fields) editors/vscode/package.json VS Code extension version editors/vscode/package-lock.json VS Code lock version (2 fields) docs/index.md Download URLs docs/home/getting-started.md Download URLs docs/operations/integrations.md VSIX filename version docs/reference/versions.md New version row + latest pointer

    The Go binary version is injected at build time via -ldflags from the VERSION file. No source file needs editing.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#build-targets-reference","level":2,"title":"Build Targets Reference","text":"Target What it does make release Full release (script + tag + push) make build Build binary for current platform make build-all Build all 6 platform binaries make test Unit tests make smoke Integration smoke tests make audit Full check (fmt + vet + lint + drift + docs + test) make site Rebuild documentation site","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#release-notes-not-found","level":3,"title":"\"Release Notes Not Found\"","text":"
    ERROR: dist/RELEASE_NOTES.md not found.\n

    Run /_ctx-release-notes in Claude Code first, or write dist/RELEASE_NOTES.md manually.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#working-tree-is-not-clean","level":3,"title":"\"Working Tree Is Not Clean\"","text":"
    ERROR: Working tree is not clean.\n

    Commit or stash all changes before running make release.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#tag-already-exists","level":3,"title":"\"Tag Already Exists\"","text":"
    ERROR: Tag v0.9.0 already exists.\n

    You cannot release the same version twice. Either bump VERSION to a new version, or delete the old tag if the release was incomplete:

    git tag -d v0.9.0\ngit push origin :refs/tags/v0.9.0\n
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#ci-build-fails-after-tag-push","level":3,"title":"CI Build Fails After Tag Push","text":"

    The tag is already published. Fix the issue, bump to a patch version (e.g. 0.9.1), and release again. Do not force-push tags that others may have already fetched.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/upgrading/","level":1,"title":"Upgrade","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade","level":2,"title":"Upgrade","text":"

    New versions of ctx may ship updated permissions, CLAUDE.md directives, or plugin hooks and skills.

    Claude Code User?

    The marketplace can update skills, hooks, and prompts independently: /plugin → select ctx → Update now (or enable auto-update).

    The ctx binary is separate: rebuild from source or download a new release when one is available, then run ctx init --force --merge. Knowledge files are preserved automatically.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#tldr","level":2,"title":"TL:DR","text":"
    # Plugin users (Claude Code)\n# /plugin → select ctx → Update now\n# Then update the binary and reinitialize:\nctx init --force --merge\n\n# From-source / manual users\n# install new ctx binary, then:\nctx init --force --merge\n# /plugin → select ctx → Update now   (if using Claude Code)\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-changes-between-versions","level":2,"title":"What Changes between Versions","text":"

    ctx init generates two categories of files:

    Category Examples Changes between versions? Infrastructure .claude/settings.local.json (permissions), ctx-managed sections in CLAUDE.md, ctx plugin (hooks + skills) Yes Knowledge .context/TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md, ARCHITECTURE.md, GLOSSARY.md, CONSTITUTION.md, AGENT_PLAYBOOK.md No: this is your data

    Infrastructure is regenerated by ctx init and plugin updates. Knowledge files are yours and should never be overwritten.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade-steps","level":2,"title":"Upgrade Steps","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#1-install-the-new-version","level":3,"title":"1. Install the New Version","text":"

    Build from source or download the binary:

    cd /path/to/ctx-source\ngit pull\nmake build\nsudo make install\nctx --version   # verify\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#2-reinitialize","level":3,"title":"2. Reinitialize","text":"
    ctx init --force --merge\n
    • --force regenerates infrastructure files (permissions, ctx-managed sections in CLAUDE.md).
    • --merge preserves your content outside ctx markers.

    Knowledge files (.context/TASKS.md, DECISIONS.md, etc.) are preserved automatically: ctx init only overwrites infrastructure, never your data.

    Encryption key: The encryption key lives at ~/.ctx/.ctx.key (outside the project). Reinit does not affect it. If you have a legacy key at .context/.ctx.key or ~/.local/ctx/keys/, copy it manually (see Syncing Scratchpad Notes).

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#3-update-the-ctx-plugin","level":3,"title":"3. Update the ctx Plugin","text":"

    If you use Claude Code, update the plugin to get new hooks and skills:

    1. Open /plugin in Claude Code.
    2. Select ctx.
    3. Click Update now.

    Or enable auto-update so the plugin stays current without manual steps.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#4-review-custom-settings","level":3,"title":"4. Review Custom Settings","text":"

    If you added custom permissions to .claude/settings.local.json beyond what ctx init provides, diff and merge:

    diff .claude.bak/settings.local.json .claude/settings.local.json\n

    Manually add back any custom entries that the new init dropped.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#5-verify","level":3,"title":"5. Verify","text":"
    ctx status          # context files intact\nctx drift           # no broken references\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#6-clean-up","level":3,"title":"6. Clean Up","text":"

    If you made manual backups, remove them once satisfied:

    rm -rf .context.bak .claude.bak CLAUDE.md.bak\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-if-i-skip-the-upgrade","level":2,"title":"What If I Skip the Upgrade?","text":"

    The old binary still works with your existing .context/ files. But you may miss:

    • New plugin hooks that enforce better practices or catch mistakes;
    • Updated skill prompts that produce better results;
    • New .gitignore entries for directories added in newer versions;
    • Bug fixes in the CLI itself.

    The plugin and the binary can be updated independently. You can update the plugin (for new hooks/skills) even if you stay on an older binary, and vice versa.

    Context files are plain Markdown: They never break between versions.

    The surrounding infrastructure is what evolves.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/","level":1,"title":"Architecture Exploration","text":"","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#architecture-exploration","level":1,"title":"Architecture Exploration","text":"

    Systematically build architecture documentation across one or more repositories using ctx skills. Each invocation does one unit of work; a simple loop drives the agent through all phases.

    When to use: When onboarding to a new codebase, performing architecture reviews, or building up .context/ documentation across a workspace of repos.

    Prerequisites: ctx installed, repos cloned under a shared workspace directory (e.g., ~/WORKSPACE/).

    Companion skills:

    • /ctx-architecture — structural baseline and principal analysis
    • /ctx-architecture-enrich — code intelligence enrichment via GitNexus
    • /ctx-architecture-failure-analysis — adversarial failure analysis
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#overview","level":2,"title":"Overview","text":"

    The agent progresses through phases per repo, depth-first:

    Phase Skill What it does bootstrapctx init + /ctx-architecture Initialize context and build structural baseline principal/ctx-architecture principal Deep analysis: vision, bottlenecks, alternatives enriched/ctx-architecture-enrich Quantify with code intelligence (blast radius, flows) frontier-N/ctx-architecture (re-run) Explore unexplored areas found in convergence report lens-*/ctx-architecture with lens Focused exploration through conceptual lenses

    Exploration stops when convergence >= 0.85, frontier runs plateau, or all lenses are exhausted.

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#setup","level":2,"title":"Setup","text":"

    Create a tracking directory in your workspace root:

    cd ~/WORKSPACE\nmkdir -p .arch-explorer\n

    Create .arch-explorer/manifest.json listing your repos:

    {\n  \"repos\": [\"ctx\", \"portal\", \"infra\"],\n  \"current_repo_index\": 0,\n  \"progress\": {}\n}\n

    Create .arch-explorer/run-log.md (empty, the agent appends to it).

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#prompt","level":2,"title":"Prompt","text":"

    Save this as .arch-explorer/PROMPT.md and invoke with your agent. The prompt is self-contained: the agent reads the manifest, picks the next unit of work, executes it, updates tracking, and stops.

    You are an autonomous architecture exploration agent. Your job is to\nsystematically build and evolve architecture documentation across all\nrepositories in this workspace using ctx skills.\n\n## Execution Protocol\n\n### Step 1: Read State\n\nRead `.arch-explorer/manifest.json`. This tells you:\n- Which repos exist and their order\n- What has been done per repo (`progress` object)\n- Which repo to work on next (`current_repo_index`)\n\n### Step 2: Pick the Next Unit of Work\n\n**Strategy: depth-first, sequential.**\n\nFind the current repo (by `current_repo_index`). Determine its next\nphase from the progression below. If all phases are exhausted for this\nrepo (convergence score >= 0.85 or 3+ frontier runs with no new\nfindings), advance `current_repo_index` and pick the next repo.\n\n### Phase Progression (per repo)\n\nEach repo progresses through these phases in order:\n\n| Phase | Skill | Prerequisite |\n|-------|-------|-------------|\n| `bootstrap` | `ctx init` + `/ctx-architecture` | None |\n| `principal` | `/ctx-architecture principal` | bootstrap done |\n| `enriched` | `/ctx-architecture-enrich` | principal done, GitNexus indexed |\n| `frontier-N` | `/ctx-architecture` (re-run) | enriched done |\n\n**`bootstrap` is a single composite unit:** `ctx init` followed by\nstructural analysis. This is the ONLY phase that combines two actions.\nNo other phase may chain actions.\n\n**Frontier runs** are numbered: `frontier-1`, `frontier-2`, etc.\nEach frontier run reads CONVERGENCE-REPORT.md and picks unexplored\nareas. The skill handles this automatically.\n\nAfter the third frontier run OR when convergence >= 0.85, apply\n**conceptual lenses** (one per run):\n\n| Lens | Focus Areas |\n|------|-------------|\n| `security` | Auth flows, input validation, secrets, attack surfaces, trust boundaries |\n| `performance` | Hot paths, caching, concurrency, resource lifecycle, allocation patterns |\n| `stability` | Error handling, retries, graceful degradation, circuit breakers, timeouts |\n| `observability` | Logging, metrics, tracing, alerting, debugging affordances |\n| `data-integrity` | Storage, serialization, migrations, consistency, backup, recovery |\n\nFor lens runs, prepend the lens context as an explicit instruction to\nthe skill invocation:\n\n> \"Focus exploration on security: auth flows, input validation, secrets,\n> attack surfaces, trust boundaries.\"\n\nDo NOT wait for the skill to ask what to explore. Provide the lens\nfocus as input upfront.\n\n### Step 3: Do the Work\n\n1. `cd` into the repo directory (`~/WORKSPACE/<repo-name>`)\n2. If phase is `bootstrap`:\n    - Run `ctx init`, confirm `.context/` exists\n    - Then run `/ctx-architecture` (structural baseline)\n3. If phase is `principal` or `frontier-*`:\n    - Run `/ctx-architecture` (add `principal` argument for principal phase)\n    - The skill will read existing artifacts and build on them\n4. If phase is `enriched`:\n    - Verify GitNexus is connected: call `mcp__gitnexus__list_repos`\n    - Success = non-empty list returned with no error\n    - If GitNexus unavailable, log as `enriched-skipped` and advance\n      to `frontier-1`\n    - Run `/ctx-architecture-enrich`\n5. If phase is a lens run (`lens-security`, etc.):\n    - Run `/ctx-architecture` with lens focus prepended as instruction\n      (see lens table above for exact wording)\n\n### Step 4: Extract Results\n\nAfter the skill completes, gather:\n\n- **Convergence score**: from `map-tracking.json`, computed as:\n  average of all module `confidence` values (0.0-1.0). If\n  `map-tracking.json` is missing or has no confidence values,\n  record `null` and log a warning.\n- **Frontier count**: from CONVERGENCE-REPORT.md, count the number\n  of listed unexplored areas. If CONVERGENCE-REPORT.md is missing,\n  record `frontier_count: null` and log a warning. Treat missing\n  as \"exploration should continue\" (do not stall).\n- **Key findings**: 2-3 bullet points of what was discovered or\n  changed in this run (new modules mapped, danger zones found, etc.)\n- **New artifacts**: list any new files created in `.context/`\n\n### Step 5: Update Tracking\n\nUpdate `.arch-explorer/manifest.json`:\n\n```json\n{\n  \"progress\": {\n    \"ctx\": {\n      \"phases_completed\": [\"bootstrap\", \"principal\"],\n      \"current_phase\": \"enriched\",\n      \"lenses_explored\": [],\n      \"last_run\": \"2026-04-07T14:00:00Z\",\n      \"convergence_score\": 0.72,\n      \"frontier_count\": 3,\n      \"total_runs\": 2,\n      \"findings_summary\": \"14 modules mapped, 3 danger zones, 2 extension points\"\n    }\n  }\n}\n```\n\nAppend to `.arch-explorer/run-log.md`:\n\n```markdown\n## 2026-04-07T14:00:00Z — ctx — principal\n\n**Phase:** principal\n**Convergence:** 0.45 -> 0.72\n**Frontiers remaining:** 3\n**Key findings:**\n- Identified CLI dispatch as primary bottleneck (fan-out to 12 subsystems)\n- Security: context files readable by any process (no access control)\n- Strategic recommendation: extract context engine into library package\n\n**Artifacts updated:** ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md, map-tracking.json\n```\n\n### Step 6: Report and Stop\n\nPrint this exact format as the FINAL output of the invocation:\n\n```\n[arch-explorer] DONE\n  repo: ctx\n  phase: principal\n  convergence: 0.72\n  frontiers: 3\n  runs_on_repo: 3\n  next: ctx / enriched\n```\n\nThe `[arch-explorer] DONE` line is the terminal marker. After printing\nit, produce no further output. Execution is complete.\n\n## Rules\n\n1. **One unit per invocation.** The only composite unit is `bootstrap`\n   (init + structural). All other phases are exactly one skill run.\n2. **Additive only.** Never delete or overwrite existing artifacts.\n   The skills already handle incremental updates.\n3. **No duplicated work.** Read manifest before acting. If a phase is\n   already recorded as completed, skip it.\n4. **Log everything.** Every run gets a run-log entry, even failures\n   and skips.\n5. **Fail gracefully.** If a skill fails (missing GitNexus, broken repo,\n   etc.), log the failure with reason and advance to the next phase or\n   repo. Don't retry in the same invocation.\n6. **Respect ctx conventions.** Each repo gets its own `.context/`\n   directory. Never write architecture artifacts outside `.context/`.\n\n## Stopping Logic\n\nA repo is considered \"explored\" when ANY of these is true:\n- Convergence score >= 0.85 (from map-tracking.json)\n- 3+ frontier runs produced no new findings (frontier_count unchanged\n  across consecutive runs)\n- All 5 lenses have been applied\n- Convergence score is `null` after 3 attempts (artifacts aren't being\n  generated properly — log warning and move on)\n\nWhen a repo is explored, advance `current_repo_index` in the manifest.\n\n## When All Repos Are Done\n\nWhen every repo has reached its stopping condition, print:\n\n```\n[arch-explorer] ALL DONE\n  - ctx: 0.92 convergence, 8 runs, 5 lenses\n  - portal: 0.87 convergence, 6 runs, 3 lenses\n  ...\n```\n
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#invocation","level":2,"title":"Invocation","text":"

    Single run (safest for quota):

    cd ~/WORKSPACE\nclaude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n

    Batch of N runs:

    cd ~/WORKSPACE\nfor i in $(seq 1 5); do\n  claude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n  echo \"--- Run $i complete ---\"\ndone\n

    Resume after interruption:

    Just run again. The manifest tracks state; the agent picks up where it left off.

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#tips","level":2,"title":"Tips","text":"
    • Start small: list 1-2 repos in the manifest first. Add more once you're confident in the output quality.
    • GitNexus is optional: the enrichment phase is skipped gracefully if GitNexus isn't connected. You still get structural and principal analysis.
    • Review between batches: check the run-log and generated artifacts between batch runs. The agent is additive-only, but early course correction saves wasted runs.
    • Lens runs are the payoff: the first three phases build the map; lens runs find the interesting things (security gaps, performance cliffs, stability risks).
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#history","level":2,"title":"History","text":"
    • 2026-04-07: Original prompt created as hack/agents/architecture-explorer.md.
    • 2026-04-16: Moved to docs as a runbook for discoverability.
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/","level":1,"title":"Breaking Migration","text":"","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#breaking-migration-guide","level":1,"title":"Breaking Migration Guide","text":"

    Template for upgrading across breaking CLI renames or behavior changes. Use this as a starting point when writing migration notes for a specific release, or hand it to your agent as context for generating release-specific guidance.

    When to use: When a release includes breaking changes (command renames, removed flags, changed defaults) that require user action.

    Companion: Upgrade guide covers the general upgrade flow. This runbook covers the breaking-change specifics.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-1-identify-what-changed","level":2,"title":"Step 1: Identify What Changed","text":"

    Ask your agent to diff the CLI surface between the old and new version:

    Compare the CLI command surface between the previous release tag\nand HEAD. For each change, categorize as: renamed, removed,\nnew, or changed-behavior. Include old and new command signatures.\n

    Or use the /_ctx-command-audit skill after the rename.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-2-regenerate-infrastructure","level":2,"title":"Step 2: Regenerate Infrastructure","text":"
    # Install the new binary\nmake build && sudo make install\n\n# Regenerate CLAUDE.md and permissions\nctx init --force --merge\n

    --merge preserves your knowledge files (TASKS.md, DECISIONS.md, etc.) while regenerating infrastructure (permissions, CLAUDE.md managed sections).

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-3-update-the-plugin","level":2,"title":"Step 3: Update the Plugin","text":"
    /plugin -> select ctx -> Update now\n

    Or, if using a local clone:

    make plugin-reload\n# restart Claude Code\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-4-update-personal-scripts","level":2,"title":"Step 4: Update Personal Scripts","text":"

    Search your scripts and aliases for old command names:

    # Example: find references to old command names\ngrep -r \"ctx old-command\" ~/scripts/ ~/.zshrc ~/.bashrc\n

    Replace with the new names per the changelog.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-5-update-hook-configs","level":2,"title":"Step 5: Update Hook Configs","text":"

    If you have custom hooks in .claude/settings.local.json that reference ctx commands, update them:

    jq '.hooks' .claude/settings.local.json | grep \"ctx \"\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
    ctx status          # context files intact\nctx drift           # no broken references\nmake test           # if you're a contributor\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#writing-release-specific-migration-notes","level":2,"title":"Writing Release-Specific Migration Notes","text":"

    When preparing a release with breaking changes, create a section in the release notes using this template:

    ## Breaking Changes\n\n### `old-command` renamed to `new-command`\n\n**What changed**: `ctx old-command` is now `ctx new-command`.\nThe old name is removed (no deprecation alias).\n\n**Action required**:\n1. Run `ctx init --force --merge` to update CLAUDE.md\n2. Update any scripts referencing `ctx old-command`\n3. Update hook configs if applicable\n\n**Why**: [brief rationale for the rename]\n

    Repeat for each breaking change. Users should be able to follow the notes mechanically without needing to understand the codebase.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/codebase-audit/","level":1,"title":"Codebase Audit","text":"","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#codebase-audit","level":1,"title":"Codebase Audit","text":"

    A structured audit of the codebase: dead code, magic strings, documentation drift, security surface, and roadmap opportunities.

    When to run: Before a release, after a long YOLO sprint, quarterly, or when planning the next phase of work.

    Time: ~15-30 minutes with a team of agents.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#how-to-use-this-runbook","level":2,"title":"How to Use This Runbook","text":"

    Start a Claude Code session with a clean git state (git stash or commit first). Paste or adapt the prompt below. The agent does the analysis; you triage the findings.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#prompt","level":2,"title":"Prompt","text":"
    I want you to create an agent team to audit this codebase. Save each report as\na separate markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable: every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (session mining)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (godoc + inline)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check package-level comments match\npackage names. Output: drift items ranked by severity with exact file:line refs.\n\n### 3. Maintainability\nLook for: functions >80 lines that have logical split points; switch blocks\nwith >5 cases that could be table-driven or extracted; inline comments that\nsay \"step 1\", \"step 2\" or similar (sign the block wants to be a function);\nfiles with >400 lines; packages with flat structure that could benefit from\nsub-packages; functions that seem misplaced in their file. Do NOT flag\nthings that are fine as-is just because they could theoretically be different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app: focus on CLI-relevant attack surface, not web OWASP:\nfile path traversal (does user input flow into file paths unsanitized?),\ncommand injection (does user input flow into exec calls?), symlink following\n(does the tool follow symlinks when writing to .context/?), permission\nhandling (are file permissions set correctly?), sensitive data in outputs\n(do any commands leak secrets or session content?). Output: findings with\nseverity ratings and exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git log,\nrecent session discussions, and DECISIONS.md for story arcs worth writing\nabout. Suggest 3-5 blog post themes with: title, angle, target audience,\nkey commits/sessions to reference, and a 2-sentence pitch. Prioritize\nthemes that build a coherent narrative across posts.\n\n### 6. Roadmap & Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses:\nwhat are the highest-value improvements? Consider: user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with effort/impact estimates (not time estimates).\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and any user docs. Suggest improvements\nstructured as use-case pages: the problem, how ctx solves it, typical\nworkflow, gotchas. Identify gaps where a user would get stuck without\nreading source code. Output: list of documentation gaps and suggested\npage outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each: team composition (roles, agent types),\ntask distribution strategy, coordination approach, and which types of work\nit suits. Ground suggestions in actual project patterns, not generic advice.\n
    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#tips","level":2,"title":"Tips","text":"
    • Clean state matters: the prompt says \"no code changes\" but accidents happen. Start from a clean git state so you can git checkout . if needed.

    • Adjust scope: drop analyses you don't need. Analyses 1-4 are the most actionable. Analyses 5-8 are planning/creative and can be skipped if you just want a technical audit.

    • Reports feed TASKS.md: after the audit, read each report and create tasks in the appropriate Phase section. The reports are input, not output.

    • ideas/ is gitignored: reports saved there won't be committed. Move specific findings to TASKS.md, DECISIONS.md, or LEARNINGS.md to persist them.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#history","level":2,"title":"History","text":"
    • 2026-02-08: Original prompt created after a codebase audit sprint.
    • 2026-02-17: Improved with read-only agents, report structure template, CLI-scoped security review, and maintainability thresholds.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/","level":1,"title":"Docs Semantic Audit","text":"","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#documentation-semantic-audit","level":1,"title":"Documentation Semantic Audit","text":"

    Find structural problems that linters and link checkers cannot: weak pages that should be merged, heavy pages that should be split, missing cross-links, and narrative arcs that don't land.

    When to run: Before a release, after adding several new pages, when the site feels sprawling, or when you suspect narrative gaps.

    Time: ~20-40 minutes with an agent session.

    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#why-this-is-a-runbook","level":2,"title":"Why This Is a Runbook","text":"

    These judgments are inherently subjective and context-dependent. A page is \"weak\" relative to its neighbors; a narrative arc only matters if the docs intend to tell a story. Deterministic tools (broken-link checkers, word counters) can't do this. An LLM reading the full doc set can.

    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#prompt","level":2,"title":"Prompt","text":"

    Paste or adapt the following into a Claude Code session. The agent needs read access to docs/ and the site nav structure.

    Read every file under docs/ (including docs/blog/ and docs/recipes/).\nFor each file, note: title, word count, outbound links, inbound links\n(how many other pages link to it), and a one-line summary of its purpose.\n\nThen produce a report with these sections:\n\n## 1. Weak Dangling Pages\n\nPages that are thin, isolated, or redundant. Signs:\n- Under ~300 words with no unique content (just restates what another page says)\n- Zero or one inbound links (orphaned in the nav)\n- Content that would be stronger merged into an adjacent page\n- \"Try it in 5 minutes\" sections that assume installation already happened\n- Pages whose title doesn't work as a nav entry (too long, too vague)\n\nFor each: identify the page, explain why it's weak, and recommend\nmerge target or deletion.\n\n## 2. Overly Heavy Pages\n\nPages doing too much. Signs:\n- Over ~1500 words with multiple distinct topics\n- More than 4 H2 sections that could stand alone\n- Reader has to scroll past irrelevant content to find what they need\n- Mixed audience (beginner setup + advanced config on same page)\n\nFor each: identify the page, list the distinct topics, and suggest\nsplit points.\n\n## 3. Missing Cross-Links\n\nPlaces where a reader would naturally want to jump to related content\nbut no link exists. Look for:\n- Concepts mentioned but not linked (e.g., \"scratchpad\" without linking\n  to the scratchpad page)\n- Blog posts that describe features without linking to the reference docs\n- Recipes that reference workflows without linking to the relevant\n  getting-started section\n- Pages that end without a \"Next Up\" or \"See Also\" pointer\n\nFor each: source page, anchor text, suggested link target.\n\n## 4. Narrative Gaps\n\nThe docs should tell a coherent story: problem -> install -> first session\n-> daily workflow -> advanced patterns -> contributing. Look for:\n- Gaps in the progression (e.g., no bridge from \"first session\" to\n  \"daily habits\")\n- Blog posts that introduce concepts the reference docs don't cover\n- Recipes that assume knowledge no other page teaches\n- Features documented in CLI reference but missing from workflows/recipes\n\nFor each: describe the gap and suggest what page or section would fill it.\n\n## 5. Blog Cross-Linking Opportunities\n\nBlog posts are often written in isolation. Look for:\n- Posts that cover the same theme but don't reference each other\n- Posts that describe the evolution of a feature (natural \"part 1 / part 2\")\n- Posts that would benefit from a \"Related posts\" footer\n- Thematic clusters that could be linked from a recipe or reference page\n\nFor each: list the posts, the shared theme, and the suggested links.\n\n## Output Format\n\nFor every finding, include:\n- File path (docs/whatever.md)\n- Severity: high (actively confusing), medium (missed opportunity),\n  low (nice to have)\n- Concrete recommendation (merge into X, split at H2 Y, add link to Z)\n\nEnd with a prioritized action list: what to fix first.\n
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#after-the-audit","level":2,"title":"After the Audit","text":"
    1. Triage findings — not everything needs fixing. Focus on high severity.
    2. Merge weak pages first — fewer pages is almost always better.
    3. Add cross-links — cheapest improvement, highest reader impact.
    4. File split decisions in DECISIONS.md — page splits are architectural.
    5. Regenerate the site and spot-check nav after structural changes.
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#history","level":2,"title":"History","text":"
    • 2026-02-17: Created after merging docs/re-explaining.md into docs/about.md, which surfaced the pattern of weak standalone pages that dilute rather than add.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/hub-deployment/","level":1,"title":"Hub Deployment","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#hub-deployment","level":1,"title":"Hub Deployment","text":"

    Linear runbook for setting up a ctx Hub for yourself or a team. Consolidates pieces currently scattered across hub recipes and operations docs.

    When to use: First-time hub setup, or when onboarding a new team onto an existing hub.

    Prerequisites: ctx binary installed, network connectivity between hub and clients.

    Companion docs:

    • Hub overview — what the hub is and isn't
    • Hub operations — data directory, systemd, backup, monitoring
    • Hub failure modes — what can go wrong
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"Quick Start (foreground)Production (systemd)
    ctx hub start\n

    See Hub Operations: Systemd Unit for the full unit file.

    sudo systemctl enable --now ctx-hub\n

    The hub creates admin.token on first start. Save this token — it's the only way to register clients.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-2-generate-the-admin-token","level":2,"title":"Step 2: Generate the Admin Token","text":"

    On first start, the hub writes admin.token to the data directory (default ~/.ctx/hub-data/):

    cat ~/.ctx/hub-data/admin.token\n

    This token has full admin privileges. Keep it secret.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-3-register-clients","level":2,"title":"Step 3: Register Clients","text":"

    For each client (person or machine) that will connect:

    # On the hub machine\nctx hub register --name \"volkan-laptop\" --admin-token <admin-token>\n

    This returns a client token. Distribute it securely to the client.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-4-connect-clients","level":2,"title":"Step 4: Connect Clients","text":"

    On each client machine:

    ctx connect <hub-address> --token <client-token>\n

    Verify the connection:

    ctx connection status\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-5-verify-sync","level":2,"title":"Step 5: Verify Sync","text":"

    Push a test entry from one client and verify it arrives:

    # Client A\nctx add learning \"Hub sync test\" --context \"Verifying hub setup\"\n\n# Client B (after a moment)\nctx status   # should show the new learning\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-6-configure-backup","level":2,"title":"Step 6: Configure Backup","text":"

    Set up regular backups of the hub data directory. See Hub Operations: Backup and Restore.

    Minimum:

    # Add to cron\n0 */6 * * * cp ~/.ctx/hub-data/entries.jsonl ~/backups/entries-$(date +\\%F).jsonl\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-7-configure-tls-when-available","level":2,"title":"Step 7: Configure TLS (When Available)","text":"

    Coming Soon

    TLS support is planned (H-01/H-02). Until then, run the hub on a trusted network or behind a reverse proxy with TLS termination.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#team-onboarding-checklist","level":2,"title":"Team Onboarding Checklist","text":"

    When adding a new team member to an existing hub:

    • Generate a client token (ctx hub register --name \"<name>\")
    • Share the token and hub address securely
    • Have them run ctx connect <hub-address> --token <token>
    • Verify with ctx connection status
    • Point them to the Hub Getting Started recipe
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#connection-refused","level":3,"title":"\"Connection Refused\"","text":"

    The hub isn't running or the port is wrong. Check:

    ctx hub status          # on the hub machine\nss -tlnp | grep 9900   # default port\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#authentication-failed","level":3,"title":"\"Authentication Failed\"","text":"

    The client token is wrong or was never registered. Re-register:

    ctx hub register --name \"<name>\" --admin-token <admin-token>\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#entries-not-syncing","level":3,"title":"Entries Not Syncing","text":"

    Check that the client is listening:

    ctx connection status\n

    If connected but not syncing, check the hub logs for sequence mismatch errors. See Hub Failure Modes for details.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/new-contributor/","level":1,"title":"New Contributor","text":"","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#new-contributor-onboarding","level":1,"title":"New Contributor Onboarding","text":"

    Step-by-step onboarding sequence for new contributors. Consolidates setup instructions currently scattered across the README, contributing guide, and setup docs.

    When to use: First-time contributor setup, or when verifying your development environment after a major upgrade.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-1-clone-the-repository","level":2,"title":"Step 1: Clone the Repository","text":"
    git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n

    Or fork first on GitHub, then clone your fork.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-2-initialize-context","level":2,"title":"Step 2: Initialize Context","text":"
    ctx init\n

    This creates the .context/ directory with knowledge files and the .claude/ directory with agent configuration. If ctx is not yet installed, proceed to Step 3 first, then come back.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-3-build-and-install","level":2,"title":"Step 3: Build and Install","text":"
    make build\nsudo make install\n

    Verify:

    ctx --version\n
    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-4-install-the-plugin-claude-code-users","level":2,"title":"Step 4: Install the Plugin (Claude Code Users)","text":"

    If you use Claude Code, install the plugin from your local clone so skills and hooks reflect your working tree:

    1. Launch claude
    2. Type /plugin and press Enter
    3. Select Marketplaces -> Add Marketplace
    4. Enter the absolute path to your clone (e.g., ~/WORKSPACE/ctx)
    5. Back in /plugin, select Install and choose ctx

    Verify:

    claude /plugin list   # should show ctx\n

    See Contributing: Install the Plugin for details on cache clearing.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-5-switch-to-dev-profile","level":2,"title":"Step 5: Switch to Dev Profile","text":"
    ctx config switch dev\n

    This enables verbose logging and notify events — useful during development.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-6-verify-hooks","level":2,"title":"Step 6: Verify Hooks","text":"

    Start a Claude Code session and check that hooks fire:

    claude\n

    You should see ctx session hooks (ceremonies reminder, context loading) on session start. If not, check that the plugin is installed correctly (Step 4).

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-7-run-your-first-session","level":2,"title":"Step 7: Run Your First Session","text":"

    In Claude Code:

    /ctx-status\n

    This should show context file health, active tasks, and recent decisions. If it works, your setup is complete.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-8-verify-context-persistence","level":2,"title":"Step 8: Verify Context Persistence","text":"

    End the session and start a new one:

    /ctx-remember\n

    The agent should recall what happened in the previous session. This confirms that context persistence is working end-to-end.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-9-run-tests","level":2,"title":"Step 9: Run Tests","text":"
    make test     # unit tests\nmake audit    # full check: fmt + vet + lint + drift + docs + test\n

    All tests should pass with a clean clone.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#quick-reference","level":2,"title":"Quick Reference","text":"Task Command Build make build Install sudo make install Test make test Full audit make audit Rebuild docs site make site Serve docs locally make site-serve Clear plugin cache make plugin-reload Switch config profile ctx config switch dev","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#next-steps","level":2,"title":"Next Steps","text":"
    • Read the contributing guide for project layout, code style, and PR process
    • Check TASKS.md for open work items
    • Ask /ctx-next for suggested work
    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/plugin-release/","level":1,"title":"Plugin Release","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#plugin-release","level":1,"title":"Plugin Release","text":"

    Plugin-specific release procedure. The general release checklist covers the full ctx release; this runbook covers the plugin-specific steps that are not part of that flow.

    When to use: When releasing plugin changes (new skills, hook updates, permission changes) independently of a ctx binary release, or as a sub-procedure within the full release.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#what-ships-in-the-plugin","level":2,"title":"What Ships in the Plugin","text":"

    The plugin lives at internal/assets/claude/ and includes:

    Component Path What it does Skills internal/assets/claude/skills/ User-facing /ctx-* slash commands Hooks internal/assets/claude/hooks/ Pre/post tool-use hooks Plugin manifest internal/assets/claude/.claude-plugin/plugin.json Declares skills, hooks, version Marketplace .claude-plugin/marketplace.json Points Claude Code to the plugin","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-1-update-hooksjson-if-hooks-changed","level":2,"title":"Step 1: Update hooks.json (If Hooks Changed)","text":"

    If you added, removed, or modified hooks:

    # Verify hook definitions match implementations\nmake audit\n

    Check that plugin.json lists all hooks correctly — missing hooks silently fail to fire.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-2-bump-version","level":2,"title":"Step 2: Bump Version","text":"

    Update the version in three places:

    • internal/assets/claude/.claude-plugin/plugin.json
    • .claude-plugin/marketplace.json (two fields)
    • editors/vscode/package.json + package-lock.json (if VS Code extension is affected)

    The Release Script Does This

    If you're running make release, the script bumps these automatically from VERSION. Only bump manually if you're releasing the plugin independently.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-3-test-against-a-fresh-install","level":2,"title":"Step 3: Test Against a Fresh Install","text":"
    # Clear cached plugin\nmake plugin-reload\n\n# Restart Claude Code, then:\nclaude /plugin list    # verify version\n

    Test the critical paths:

    • /ctx-status works
    • Session hooks fire (ceremonies, context loading)
    • At least one user-facing skill works end-to-end
    • Pre-tool-use hooks block when they should
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-4-test-against-a-clean-project","level":2,"title":"Step 4: Test Against a Clean Project","text":"

    Create a temporary project to verify the plugin works outside the ctx repo:

    mkdir /tmp/test-ctx-plugin && cd /tmp/test-ctx-plugin\ngit init\nctx init\nclaude   # start a session, verify hooks fire\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-5-verify-skill-count","level":2,"title":"Step 5: Verify Skill Count","text":"

    The plugin manifest declares all user-invocable skills. Verify the count matches:

    # Count skills in plugin.json\njq '.skills | length' internal/assets/claude/.claude-plugin/plugin.json\n\n# Count skill directories\nls -d internal/assets/claude/skills/ctx-*/ | wc -l\n

    These numbers should match (some skills are not user-invocable and won't appear in both counts).

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-6-commit-and-tag","level":2,"title":"Step 6: Commit and Tag","text":"

    If releasing independently of a binary release:

    git add internal/assets/claude/ .claude-plugin/\ngit commit -m \"chore: release plugin v0.X.Y\"\ngit tag plugin-v0.X.Y\ngit push origin main --tags\n

    If part of a full release, the release checklist handles this.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#skills-dont-appear-after-update","level":3,"title":"Skills Don't Appear After Update","text":"

    Claude Code caches plugin files aggressively:

    make plugin-reload    # clears cache\n# restart Claude Code\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#hooks-dont-fire","level":3,"title":"Hooks Don't Fire","text":"

    Check that the hook is registered in plugin.json and that the command it calls exists:

    jq '.hooks' internal/assets/claude/.claude-plugin/plugin.json\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#version-mismatch","level":3,"title":"Version Mismatch","text":"

    If claude /plugin list shows an old version after updating:

    make plugin-reload\n# restart Claude Code\nclaude /plugin list   # should show new version\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/release-checklist/","level":1,"title":"Release Checklist","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release-checklist","level":1,"title":"Release Checklist","text":"

    The canonical pre-release sequence. This runbook ties together the audits, tests, and release steps that are otherwise scattered across docs and the operator's head.

    When to run: Before every release. No exceptions.

    Companion: The /_ctx-release skill automates the tag-and-push portion; this checklist covers everything before and after that automation.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#pre-release","level":2,"title":"Pre-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#1-run-the-codebase-audit","level":3,"title":"1. Run the Codebase Audit","text":"

    Use the codebase audit runbook prompt with your agent. Focus on analyses 1-4 (extractable patterns, documentation drift, maintainability, security). Triage findings into TASKS.md — anything blocking ships before the release.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#2-run-the-docs-semantic-audit","level":3,"title":"2. Run the Docs Semantic Audit","text":"

    Use the docs semantic audit runbook prompt. Fix high-severity findings (weak pages, broken narrative arcs). Medium-severity items can be deferred.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#3-sanitize-permissions","level":3,"title":"3. Sanitize Permissions","text":"

    Follow the sanitize permissions runbook. Clean up .claude/settings.local.json before it gets committed as part of the release.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#4-run-the-full-test-suite","level":3,"title":"4. Run the Full Test Suite","text":"
    make audit    # fmt + vet + lint + drift + docs + test\nmake smoke    # integration smoke tests\n

    All tests must pass. No exceptions.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#5-check-context-health","level":3,"title":"5. Check Context Health","text":"
    ctx drift          # broken references, stale patterns\nctx status         # context file health\n/ctx-link-check    # dead links in docs\n

    Fix anything flagged.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#6-review-tasksmd","level":3,"title":"6. Review TASKS.md","text":"

    Scan for incomplete tasks tagged as release-blocking. Either finish them or explicitly defer with a reason in the task note.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release","level":2,"title":"Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#7-bump-version","level":3,"title":"7. Bump Version","text":"
    echo \"0.X.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.X.0\"\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#8-generate-release-notes","level":3,"title":"8. Generate Release Notes","text":"

    In Claude Code:

    /_ctx-release-notes\n

    Review dist/RELEASE_NOTES.md. Ensure it captures all user-visible changes.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#9-cut-the-release","level":3,"title":"9. Cut the Release","text":"
    make release\n

    Or in Claude Code: /_ctx-release. See Cutting a Release for the full step-by-step.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#post-release","level":2,"title":"Post-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#10-verify-the-github-release","level":3,"title":"10. Verify the GitHub Release","text":"
    • GitHub Releases shows the new version
    • All 6 binaries are attached
    • SHA256 checksums are attached
    • Release notes render correctly
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#11-update-the-plugin-marketplace","level":3,"title":"11. Update the Plugin Marketplace","text":"

    If the plugin version changed, verify the marketplace entry:

    claude /plugin list   # shows updated version\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#12-announce","level":3,"title":"12. Announce","text":"

    Post in the project's communication channels. Reference the release notes.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#13-clean-up","level":3,"title":"13. Clean Up","text":"
    rm dist/RELEASE_NOTES.md   # consumed by the release script\ngit stash pop              # if you stashed earlier\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/","level":1,"title":"Sanitize Permissions","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#sanitize-permissions","level":1,"title":"Sanitize Permissions","text":"

    Manual procedure for cleaning up .claude/settings.local.json. The agent may analyze and recommend, but you make every edit.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#why-manual-not-automated","level":2,"title":"Why Manual, Not Automated","text":"

    settings.local.json controls what the agent can do without asking. An agent that can edit its own permission file is a self-escalation vector — especially if the skill is auto-accepted. Keep this manual.

    When to run: After busy sessions where you clicked \"Allow\" many times, weekly hygiene (pair with ctx drift), or before committing .claude/settings.local.json.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-1-snapshot","level":2,"title":"Step 1: Snapshot","text":"
    cp .claude/settings.local.json /tmp/settings-backup-$(date +%Y%m%d).json\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-2-extract-the-allow-list","level":2,"title":"Step 2: Extract the Allow List","text":"
    jq '.permissions.allow[]' .claude/settings.local.json | sort\n

    Eyeball it. You're looking for four categories:

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-3-identify-problems","level":2,"title":"Step 3: Identify Problems","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#a-garbage-nonsense","level":3,"title":"A. Garbage / Nonsense","text":"

    Entries that are clearly broken or meaningless:

    Bash(done)\nBash(__NEW_LINE_aa838494a90279c4__ echo \"\")\n

    Action: Delete.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#b-one-off-commands-session-debris","level":3,"title":"B. One-Off Commands (Session Debris)","text":"

    Entries with hardcoded paths, literal arguments, or exact commands that were accepted during a specific debugging session:

    Bash(git -C /home/jose/WORKSPACE/ctx log --oneline --all -20)\nBash(/home/jose/WORKSPACE/ctx/ctx add decision \"Use PostgreSQL\" --context ...)\n

    Signs of a one-off:

    • Full absolute paths to specific files
    • Literal string arguments (not wildcards)
    • Very specific flag combinations
    • Commands that look like they came from a single task

    Action: Delete unless you want to promote to a wildcard pattern.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#c-subsumed-entries-redundant","level":3,"title":"C. Subsumed Entries (Redundant)","text":"

    A narrow entry that's already covered by a broader one:

    # Narrow (redundant):\nBash(ctx journal source)\nBash(git -C /home/jose/WORKSPACE/ctx log --oneline -5)\n\n# Broad (already covers the above):\nBash(ctx journal source:*)\nBash(git -C:*)\n

    To find these, look for entries where removing the specific args would match an existing wildcard entry.

    Action: Delete the narrow entry.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#d-duplicate-intent-different-spelling","level":3,"title":"D. Duplicate Intent, Different Spelling","text":"

    Same command with env vars in different order, or slight variations:

    Bash(CGO_ENABLED=0 CTX_SKIP_PATH_CHECK=1 go test:*)\nBash(CTX_SKIP_PATH_CHECK=1 CGO_ENABLED=0 go test:*)\n

    Action: Keep one, delete the other.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-4-check-for-security-concerns","level":2,"title":"Step 4: Check for Security Concerns","text":"

    While you're in here, also flag:

    Pattern Risk Bash(git push:*) Bypasses block-git-push.sh hook Bash(rm -rf:*) Recursive delete, no confirmation Bash(sudo:*) Privilege escalation Bash(echo:*), Bash(cat:*) Can compose into writes to sensitive files Bash(curl:*), Bash(wget:*) Arbitrary network access Any write to .claude/ paths Agent self-modification

    See the /ctx-permission-sanitize skill for the full threat matrix.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-5-edit","level":2,"title":"Step 5: Edit","text":"

    Edit .claude/settings.local.json directly in your editor. Remove flagged entries. Keep the JSON valid.

    # Validate JSON after editing\njq . .claude/settings.local.json > /dev/null && echo \"valid\" || echo \"BROKEN\"\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
    # Compare before/after\ndiff /tmp/settings-backup-$(date +%Y%m%d).json .claude/settings.local.json\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-7-optionally-commit","level":2,"title":"Step 7: Optionally Commit","text":"
    git add .claude/settings.local.json\ngit commit -m \"chore: sanitize agent permissions\"\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#asking-the-agent-for-help","level":2,"title":"Asking the Agent for Help","text":"

    You can safely ask the agent to analyze the file:

    \"Look at my settings.local.json and tell me which permissions look like one-offs or are redundant.\"

    The agent can read and report. You do the edits.

    Do not add these to your allow list:

    • Skill(ctx-permission-sanitize)
    • Edit(.claude/settings.local.json)
    • Any Bash(...) pattern that writes to .claude/
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#history","level":2,"title":"History","text":"
    • 2026-02-15: Created as manual-only procedure after deciding against a self-modifying skill.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"recipes/","level":1,"title":"Recipes","text":"

    Workflow recipes combining ctx commands and skills to solve specific problems.

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#getting-started","level":2,"title":"Getting Started","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#guide-your-agent","level":3,"title":"Guide Your Agent","text":"

    How commands, skills, and conversational patterns work together. Train your agent to be proactive through ask, guide, reinforce.

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#setup-across-ai-tools","level":3,"title":"Setup across AI Tools","text":"

    Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf. Includes shell completion, watch mode for non-native tools, and verification.

    Uses: ctx init, ctx setup, ctx agent, ctx completion, ctx watch

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#multilingual-session-parsing","level":3,"title":"Multilingual Session Parsing","text":"

    Parse session journal entries written in other languages. Configure recognized session-header prefixes so the journal pipeline works for Turkish, Japanese, and any other locale.

    Uses: ctx journal source, ctx journal import, session_prefixes in .ctxrc

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#keeping-context-in-a-separate-repo","level":3,"title":"Keeping Context in a Separate Repo","text":"

    Store context files outside the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or multi-repo setups.

    Uses: ctx init, --context-dir, --allow-outside-cwd, .ctxrc, /ctx-status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#sessions","level":2,"title":"Sessions","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#the-complete-session","level":3,"title":"The Complete Session","text":"

    Walk through a full ctx session from start to finish:

    • Loading context,
    • Picking what to work on,
    • Committing with context,
    • Capturing, reflecting, and saving a snapshot.

    Uses: ctx status, ctx agent, /ctx-remember, /ctx-next, /ctx-commit, /ctx-reflect

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-ceremonies","level":3,"title":"Session Ceremonies","text":"

    The two bookend rituals for every session: /ctx-remember at the start to load and confirm context, /ctx-wrap-up at the end to review the session and persist learnings, decisions, and tasks.

    Uses: /ctx-remember, /ctx-wrap-up, /ctx-commit, ctx agent, ctx add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#browsing-and-enriching-past-sessions","level":3,"title":"Browsing and Enriching Past Sessions","text":"

    Export your AI session history to a browsable journal site. Enrich entries with metadata and search across months of work.

    Uses: ctx journal source/import, ctx journal site, ctx journal obsidian, ctx serve, /ctx-history, /ctx-journal-enrich, /ctx-journal-enrich-all

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-reminders","level":3,"title":"Session Reminders","text":"

    Leave a message for your next session. Reminders surface automatically at session start and repeat until dismissed. Date-gate reminders to surface only after a specific date.

    Uses: ctx remind, ctx remind list, ctx remind dismiss, ctx system check-reminders

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#reviewing-session-changes","level":3,"title":"Reviewing Session Changes","text":"

    See what moved since your last session: context file edits, code commits, directories touched. Auto-detects session boundaries from state markers.

    Uses: ctx change, ctx agent, ctx status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#pausing-context-hooks","level":3,"title":"Pausing Context Hooks","text":"

    Silence all nudge hooks for a quick task that doesn't need ceremony overhead. Session-scoped: Other sessions are unaffected. Security hooks still fire.

    Uses: ctx hook pause, ctx hook resume, /ctx-pause, /ctx-resume

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#knowledge-and-tasks","level":2,"title":"Knowledge and Tasks","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#persisting-decisions-learnings-and-conventions","level":3,"title":"Persisting Decisions, Learnings, and Conventions","text":"

    Record architectural decisions with rationale, capture gotchas and lessons learned, and codify conventions so they survive across sessions and team members.

    Uses: ctx add decision, ctx add learning, ctx add convention, ctx decision reindex, ctx learning reindex, /ctx-decision-add, /ctx-learning-add, /ctx-convention-add, /ctx-reflect

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#tracking-work-across-sessions","level":3,"title":"Tracking Work across Sessions","text":"

    Add, prioritize, complete, snapshot, and archive tasks. Keep TASKS.md focused as your project evolves across dozens of sessions.

    Uses: ctx add task, ctx task complete, ctx task archive, ctx task snapshot, /ctx-task-add, /ctx-archive, /ctx-next

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#using-the-scratchpad","level":3,"title":"Using the Scratchpad","text":"

    Use the encrypted scratchpad for quick notes, working memory, and sensitive values during AI sessions. Natural language in, encrypted storage out.

    Uses: ctx pad, /ctx-pad, ctx pad show, ctx pad edit

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#syncing-scratchpad-notes-across-machines","level":3,"title":"Syncing Scratchpad Notes across Machines","text":"

    Distribute your scratchpad encryption key, push and pull encrypted notes via git, and resolve merge conflicts when two machines edit simultaneously.

    Uses: ctx init, ctx pad, ctx pad resolve, scp

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#bridging-claude-code-auto-memory","level":3,"title":"Bridging Claude Code Auto Memory","text":"

    Mirror Claude Code's auto memory (MEMORY.md) into .context/ for version control, portability, and drift detection. Import entries into structured context files with heuristic classification.

    Uses: ctx memory sync, ctx memory status, ctx memory diff, ctx memory import, ctx memory publish, ctx system check-memory-drift

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hooks-and-notifications","level":2,"title":"Hooks and Notifications","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-output-patterns","level":3,"title":"Hook Output Patterns","text":"

    Choose the right output pattern for your Claude Code hooks: VERBATIM relay for user-facing reminders, hard gates for invariants, agent directives for nudges, and five more patterns across the spectrum.

    Uses: ctx plugin hooks, settings.local.json

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#customizing-hook-messages","level":3,"title":"Customizing Hook Messages","text":"

    Customize what hooks say without changing what they do. Override the QA gate for Python (pytest instead of make lint), silence noisy ceremony nudges, or tailor post-commit instructions for your stack.

    Uses: ctx hook message list, ctx hook message show, ctx hook message edit, ctx hook message reset

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-sequence-diagrams","level":3,"title":"Hook Sequence Diagrams","text":"

    Mermaid sequence diagrams for every system hook: entry conditions, state reads, output, throttling, and exit points. Includes throttling summary table and state file reference.

    Uses: All ctx system hooks

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#auditing-system-hooks","level":3,"title":"Auditing System Hooks","text":"

    The 12 system hooks that run invisibly during every session: what each one does, why it exists, and how to verify they're actually firing. Covers webhook-based audit trails, log inspection, and detecting silent hook failures.

    Uses: ctx system, ctx hook notify, .context/logs/, .ctxrc notify.events

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

    Get push notifications when loops complete, hooks fire, or agents hit milestones. Webhook URL is encrypted: never stored in plaintext. Works with IFTTT, Slack, Discord, ntfy.sh, or any HTTP endpoint.

    Uses: ctx hook notify setup, ctx hook notify test, ctx hook notify --event, .ctxrc notify.events

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

    Switch between dev and base runtime configurations without editing .ctxrc by hand. Verbose logging and webhooks for debugging, clean defaults for normal sessions.

    Uses: ctx config switch, ctx config status, /ctx-config

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#maintenance","level":2,"title":"Maintenance","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#detecting-and-fixing-drift","level":3,"title":"Detecting and Fixing Drift","text":"

    Keep context files accurate by detecting structural drift (stale paths, missing files, stale file ages) and task staleness.

    Uses: ctx drift, ctx sync, ctx compact, ctx status, /ctx-drift, /ctx-status, /ctx-prompt-audit

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#state-directory-maintenance","level":3,"title":"State Directory Maintenance","text":"

    Clean up session tombstones from .context/state/. Prune old per-session files, identify stale global markers, and keep the state directory lean.

    Uses: ctx prune

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#troubleshooting","level":3,"title":"Troubleshooting","text":"

    Diagnose hook failures, noisy nudges, stale context, and configuration issues. Start with ctx doctor for a structural health check, then use /ctx-doctor for agent-driven analysis of event patterns.

    Uses: ctx doctor, ctx hook event, /ctx-doctor

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#claude-code-permission-hygiene","level":3,"title":"Claude Code Permission Hygiene","text":"

    Keep .claude/settings.local.json clean: recommended safe defaults, what to never pre-approve, and a maintenance workflow for cleaning up session debris.

    Uses: ctx init, /ctx-drift, /ctx-permission-sanitize, ctx permission snapshot, ctx permission restore

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#permission-snapshots","level":3,"title":"Permission Snapshots","text":"

    Capture a known-good permission baseline as a golden image, then restore at session start to automatically drop session-accumulated permissions.

    Uses: ctx permission snapshot, ctx permission restore, /ctx-permission-sanitize

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#turning-activity-into-content","level":3,"title":"Turning Activity into Content","text":"

    Generate blog posts from project activity, write changelog posts from commit ranges, and publish a browsable journal site from your session history.

    The output is generic Markdown, but the skills are tuned for the ctx-style blog artifacts you see on this website.

    Uses: ctx journal site, ctx journal obsidian, ctx serve, ctx journal import, /ctx-blog, /ctx-blog-changelog, /ctx-journal-enrich

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#importing-claude-code-plans","level":3,"title":"Importing Claude Code Plans","text":"

    Import Claude Code plan files (~/.claude/plans/*.md) into specs/ as permanent project specs. Filter by date, select interactively, and optionally create tasks referencing each imported spec.

    Uses: /ctx-plan-import, /ctx-task-add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#design-before-coding","level":3,"title":"Design Before Coding","text":"

    Front-load design with a four-skill chain: brainstorm the approach, spec the design, task the work, implement step-by-step. Each step produces an artifact that feeds the next.

    Uses: /ctx-brainstorm, /ctx-spec, /ctx-task-add, /ctx-implement, /ctx-decision-add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#agents-and-automation","level":2,"title":"Agents and Automation","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#building-project-skills","level":3,"title":"Building Project Skills","text":"

    Encode repeating workflows into reusable skills the agent loads automatically. Covers the full cycle: identify a pattern, create the skill, test with realistic prompts, and iterate until it triggers correctly.

    Uses: /ctx-skill-create, ctx init

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#running-an-unattended-ai-agent","level":3,"title":"Running an Unattended AI Agent","text":"

    Set up a loop where an AI agent works through tasks overnight without you at the keyboard, using ctx for persistent memory between iterations.

    This recipe shows how ctx supports long-running agent loops without losing context or intent.

    Uses: ctx init, ctx loop, ctx watch, ctx load, /ctx-loop, /ctx-implement

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#when-to-use-a-team-of-agents","level":3,"title":"When to Use a Team of Agents","text":"

    Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

    This recipe covers the file overlap test, when teams make things worse, and what ctx provides at each level.

    Uses: /ctx-worktree, /ctx-next, ctx status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#parallel-agent-development-with-git-worktrees","level":3,"title":"Parallel Agent Development with Git Worktrees","text":"

    Split a large backlog across 3-4 agents using git worktrees, each on its own branch and working directory. Group tasks by file overlap, work in parallel, merge back.

    Uses: /ctx-worktree, /ctx-next, git worktree, git merge

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#architecture-deep-dive","level":3,"title":"Architecture Deep Dive","text":"

    Three-pass pipeline for understanding a codebase: map what exists, enrich with code intelligence, then hunt for where it will silently fail. Produces architecture docs, quantified dependency data, and ranked failure hypotheses.

    Uses: /ctx-architecture, /ctx-architecture-enrich, /ctx-architecture-failure-analysis

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#writing-steering-files","level":3,"title":"Writing Steering Files","text":"

    Tell your AI assistant how to behave — rule-based prompt injection that fires automatically when prompts match a description. Walks through scaffolding a steering file, previewing matches, and syncing to each AI tool's native format.

    Uses: ctx steering add, ctx steering preview, ctx steering list, ctx steering sync

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#authoring-lifecycle-triggers","level":3,"title":"Authoring Lifecycle Triggers","text":"

    Run executable shell scripts at session-start, pre-tool-use, file-save, and other lifecycle events. Script-based automation (complementary to steering's rule-based prompts), with a security-first workflow: scaffold disabled, test with mock input, enable only after review.

    Uses: ctx trigger add, ctx trigger test, ctx trigger enable, ctx trigger disable, ctx trigger list

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hub","level":2,"title":"Hub","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hub-overview","level":3,"title":"Hub Overview","text":"

    Mental model and three user stories for the ctx Hub. What flows, what doesn't, and when not to use it. Read this before any of the other Hub recipes.

    Uses: ctx hub, ctx connection, ctx add --share

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-getting-started","level":3,"title":"ctx Hub: Getting Started","text":"

    Stand up a single-node hub on localhost, register two projects, publish a decision from one, and watch it appear in the other. End-to-end in under five minutes.

    Uses: ctx hub start, ctx connection register, ctx connection subscribe, ctx connection sync, ctx connection listen, ctx add --share, ctx agent --include-hub

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

    Story 1 day-to-day workflow: one developer, many projects, one hub on localhost. Records a learning in project A, watches it show up automatically in project B. Walks through a realistic day of using the hub as passive infrastructure — no manual sync, no git push, no ceremony.

    Uses: ctx add --share, ctx connection subscribe, ctx agent --include-hub

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#team-knowledge-bus","level":3,"title":"Team Knowledge Bus","text":"

    Story 2 day-to-day workflow: a small trusted team sharing decisions, learnings, and conventions via a hub on an internal server. Covers the team publishing culture, what belongs on the hub vs. local, token management, and the social rules that make a shared knowledge stream stay signal-rich.

    Uses: ctx add --share, ctx connection status, ctx connection subscribe, ctx hub status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-multi-machine","level":3,"title":"ctx Hub: Multi-Machine","text":"

    Run the hub on a LAN host as a daemon and connect from project directories on other workstations. Firewall guidance, TLS via a reverse proxy, and safe daemon restart semantics.

    Uses: ctx hub start --daemon, ctx hub stop, ctx connection register, ctx connection status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-ha-cluster","level":3,"title":"ctx Hub: HA Cluster","text":"

    Raft-based leader election across three or more nodes for redundancy. Covers bootstrap, runtime peer management, graceful stepdown, and the Raft-lite durability caveat.

    Uses: ctx hub start --peers, ctx hub status, ctx hub peer add/remove, ctx hub stepdown

    ","path":["Recipes"],"tags":[]},{"location":"recipes/architecture-deep-dive/","level":1,"title":"Architecture Deep Dive","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-problem","level":2,"title":"The Problem","text":"

    Understanding a codebase at the surface level is easy. Understanding where it will break under real-world conditions takes three passes: mapping what exists, quantifying how it connects, and hunting for where it silently fails. Most teams stop at the first pass.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tldr","level":2,"title":"TL;DR","text":"
    # Pass 1: Map the system\n/ctx-architecture\n\n# Pass 2: Enrich with code intelligence\n/ctx-architecture-enrich\n\n# Pass 3: Hunt for failure modes\n/ctx-architecture-failure-analysis\n

    Each pass builds on the previous one. Run them in order. The output accumulates in .context/ — each pass reads the prior artifacts and extends them.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-architecture Skill Map modules, dependencies, data flow, patterns /ctx-architecture-enrich Skill Verify blast radius and flows with code intel /ctx-architecture-failure-analysis Skill Generate falsifiable incident hypotheses ctx drift CLI Detect stale paths and broken references ctx status CLI Quick structural overview","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-1-map-what-exists","level":3,"title":"Pass 1: Map What Exists","text":"
    /ctx-architecture\n

    Produces:

    • ARCHITECTURE.md — succinct project map (< 4000 tokens), loaded at every session start
    • DETAILED_DESIGN*.md — deep per-module reference with exported API, data flow, danger zones, extension points
    • CHEAT-SHEETS.md — lifecycle flow diagrams
    • map-tracking.json — coverage state with confidence scores

    This pass forces deep code reading. No shortcuts, no code intelligence tools — the agent reads every module it analyzes. That forced reading is what makes the subsequent passes useful.

    When to run: First time on a codebase, or after significant structural changes (new packages, moved files, changed dependencies).

    Principal mode: Add principal to get strategic analysis (ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md from P4):

    /ctx-architecture principal\n
    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-2-enrich-with-code-intelligence","level":3,"title":"Pass 2: Enrich with Code Intelligence","text":"
    /ctx-architecture-enrich\n

    Takes the Pass 1 artifacts as baseline and layers on verified, graph-backed data from GitNexus:

    • Blast radius numbers for key functions
    • Execution flow traces through hot paths
    • Domain clustering validation
    • Registration site discovery

    This pass does not replace reading — it quantifies what reading found. If Pass 1 says \"module X depends on module Y,\" Pass 2 says \"module X has 47 callers in module Y, and changing function Z would affect 12 downstream consumers.\"

    When to run: After Pass 1, when you need quantified confidence for refactoring decisions or risk assessment.

    Requires: GitNexus MCP server connected.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-3-hunt-for-failure-modes","level":3,"title":"Pass 3: Hunt for Failure Modes","text":"
    /ctx-architecture-failure-analysis\n

    The adversarial pass. Reads all prior artifacts, then systematically hunts for correctness bugs across 9 failure categories:

    1. Concurrency (races, deadlocks, goroutine leaks)
    2. Ordering assumptions (init, registration, shutdown)
    3. Cache staleness (TTL-less, read-your-writes, cross-process)
    4. Fan-out amplification (N+1, retry storms)
    5. Ownership and lifecycle (orphans, double-close)
    6. Error handling (silent swallowing, partial failure)
    7. Scaling cliffs (quadratic, unbounded, global locks)
    8. Idempotency failures (duplicate processing, retry mutations)
    9. State machine drift (illegal states, unvalidated transitions)

    Every finding must meet an evidence standard: code path, trigger, failure path, silence reason, and code evidence. A mandatory challenge phase attempts to disprove each finding before it is accepted. Findings carry a confidence level (High/Medium/Low) and explicit risk score.

    Produces DANGER-ZONES.md — a ranked inventory of findings split into Critical and Elevated tiers.

    When to run: Before releases, after major refactors, when investigating incident categories, or when onboarding.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#what-you-get","level":2,"title":"What You Get","text":"

    After all three passes, .context/ contains:

    File From Purpose ARCHITECTURE.md Pass 1 System map (session-start context) DETAILED_DESIGN*.md Pass 1 Module-level deep reference CHEAT-SHEETS.md Pass 1 Lifecycle flow diagrams map-tracking.json Pass 1 Coverage and confidence data CONVERGENCE-REPORT.md Pass 1 What's covered, what's not DANGER-ZONES.md Pass 3 Ranked failure hypotheses

    Pass 2 enriches Pass 1 artifacts in-place rather than creating new files.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tips","level":2,"title":"Tips","text":"
    • Run Pass 1 with focus areas if the codebase is large. The skill asks what to go deep on — name the modules you're about to change.
    • You don't need all three passes every time. Pass 1 is the foundation. Pass 2 and 3 are for when you need quantified confidence or adversarial rigor.
    • Re-run Pass 1 incrementally. It tracks coverage in map-tracking.json and only re-analyzes stale modules.
    • Pass 3 is most valuable before releases. The ranked DANGER-ZONES.md is a pre-release checklist.
    • The trilogy maps to a question progression: How does it work? How well does it connect? Where will it break?
    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#see-also","level":2,"title":"See Also","text":"

    See also: Detecting and Fixing Context Drift — keep architecture artifacts fresh between deep-dive sessions.

    See also: Detecting and Fixing Context Drift — structural checks that complement architecture analysis.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/autonomous-loops/","level":1,"title":"Running an Unattended AI Agent","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-problem","level":2,"title":"The Problem","text":"

    You have a project with a clear list of tasks, and you want an AI agent to work through them autonomously: overnight, unattended, without you sitting at the keyboard.

    Each iteration needs to remember what the previous one did, mark tasks as completed, and know when to stop.

    Without persistent memory, every iteration starts fresh and the loop collapses. With ctx, each iteration can pick up where the last one left off, but only if the agent persists its context as part of the work.

    Unattended operation works because the agent treats context persistence as a first-class deliverable, not an afterthought.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                                    # 1. init context\n# Edit TASKS.md with phased work items\nctx loop --tool claude --max-iterations 10  # 2. generate loop.sh\n./loop.sh 2>&1 | tee /tmp/loop.log &        # 3. run the loop\nctx watch --log /tmp/loop.log               # 4. process context updates\n# Next morning:\nctx status && ctx load                      # 5. review the results\n

    Read on for permissions, isolation, and completion signals.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init Command Initialize project context and prompt templates ctx loop Command Generate the loop shell script ctx watch Command Monitor AI output and persist context updates ctx load Command Display assembled context (for debugging) /ctx-loop Skill Generate loop script from inside Claude Code /ctx-implement Skill Execute a plan step-by-step with verification","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-1-initialize-for-unattended-operation","level":3,"title":"Step 1: Initialize for Unattended Operation","text":"

    Start by creating a .context/ directory configured so the agent can work without human input.

    ctx init\n

    This creates .context/ with the template files (including a loop prompt at .context/loop.md), and seeds Claude Code permissions in .claude/settings.local.json. Install the ctx plugin for hooks and skills.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-2-populate-tasksmd-with-phased-work","level":3,"title":"Step 2: Populate TASKS.md with Phased Work","text":"

    Open .context/TASKS.md and organize your work into phases. The agent works through these systematically, top to bottom, using priority tags to break ties.

    # Tasks\n\n## Phase 1: Foundation\n\n- [ ] Set up project structure and build system `#priority:high`\n- [ ] Configure testing framework `#priority:high`\n- [ ] Create CI pipeline `#priority:medium`\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Hardening\n\n- [ ] Add rate limiting to API endpoints `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n- [ ] Write integration tests `#priority:medium`\n

    Phased organization matters because it gives the agent natural boundaries. Phase 1 tasks should be completable without Phase 2 code existing yet.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-3-configure-the-loop-prompt","level":3,"title":"Step 3: Configure the Loop Prompt","text":"

    The loop prompt at .context/loop.md instructs the agent to operate autonomously:

    1. Read .context/CONSTITUTION.md first (hard rules, never violated)
    2. Load context from .context/ files
    3. Pick one task per iteration
    4. Complete the task and update context files
    5. Commit changes (including .context/)
    6. Signal status with a completion signal

    You can customize .context/loop.md for your project. The critical parts are the one-task-per-iteration discipline, proactive context persistence, and completion signals at the end:

    ## Signal Status\n\nEnd your response with exactly ONE of:\n\n* `SYSTEM_CONVERGED`: All tasks in `TASKS.md` are complete (*this is the\n  signal the loop script detects by default*)\n* `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n* (*no signal*): More work remains, continue to the next iteration\n\nNote: the loop script only checks for `SYSTEM_CONVERGED` by default.\n`SYSTEM_BLOCKED` is a convention for the human reviewing the log.\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-4-configure-permissions","level":3,"title":"Step 4: Configure Permissions","text":"

    An unattended agent needs permission to use tools without prompting. By default, Claude Code asks for confirmation on file writes, bash commands, and other operations, which stops the loop and waits for a human who is not there.

    There are two approaches.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-a-explicit-allowlist-recommended","level":4,"title":"Option A: Explicit Allowlist (Recommended)","text":"

    Grant only the permissions the agent needs. In .claude/settings.local.json:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Bash(ctx:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

    Adjust the Bash patterns for your project's toolchain. The agent can run make, go, git, and ctx commands but cannot run arbitrary shell commands.

    This is recommended even in sandboxed environments because it limits blast radius.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-b-skip-all-permission-checks","level":4,"title":"Option B: Skip All Permission Checks","text":"

    Claude Code supports a --dangerously-skip-permissions flag that disables all permission prompts:

    claude --dangerously-skip-permissions -p \"$(cat .context/loop.md)\"\n

    This Flag Means What It Says

    With --dangerously-skip-permissions, the agent can execute any shell command, write to any file, and make network requests without confirmation.

    Only use this on a sandboxed machine: ideally a virtual machine with no access to host credentials, no SSH keys, and no access to production systems.

    If you would not give an untrusted intern sudo on this machine, do not use this flag.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#enforce-isolation-at-the-os-level","level":4,"title":"Enforce Isolation at the OS Level","text":"

    The only controls an agent cannot override are the ones enforced by the operating system, the container runtime, or the hypervisor.

    Do Not Skip This Section

    This is not optional hardening:

    An unattended agent with unrestricted OS access is an unattended shell with unrestricted OS access.

    The allowlist above is a strong first layer, but do not rely on a single runtime boundary.

    For unattended runs, enforce isolation at the infrastructure level:

    Layer What to enforce User account Run the agent as a dedicated unprivileged user with no sudo access and no membership in privileged groups (docker, wheel, adm). Filesystem Restrict the project directory via POSIX permissions or ACLs. The agent should have no access to other users' files or system directories. Container Run inside a Docker/Podman sandbox. Mount only the project directory. Drop capabilities (--cap-drop=ALL). Disable network if not needed (--network=none). Never mount the Docker socket and do not run privileged containers. Prefer rootless containers. Virtual machine Prefer a dedicated VM with no shared folders, no host passthrough, and no keys to other machines. Network If the agent does not need the internet, disable outbound access entirely. If it does, restrict to specific domains via firewall rules. Resource limits Apply CPU, memory, and disk limits (cgroups/container limits). A runaway loop should not fill disk or consume all RAM. Self-modification Make instruction files read-only. CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md should not be writable by the agent user. If using project-local hooks, protect those too.

    A minimal Docker setup for overnight runs:

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh 2>&1 | tee /tmp/loop.log\n

    Defense in Depth

    Use multiple layers together: OS-level isolation (the boundary the agent cannot cross), a permission allowlist (what Claude Code will do within that boundary), and CONSTITUTION.md (a soft nudge for the common case).

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-5-generate-the-loop-script","level":3,"title":"Step 5: Generate the Loop Script","text":"

    Use ctx loop to generate a loop.sh tailored to your AI tool:

    # Generate for Claude Code with a 10-iteration cap\nctx loop --tool claude --max-iterations 10\n\n# Generate for Aider\nctx loop --tool aider --max-iterations 10\n\n# Custom prompt file and output filename\nctx loop --tool claude --prompt my-prompt.md --output my-loop.sh\n

    The generated script reads .context/loop.md, runs the tool, checks for completion signals, and loops until done or the cap is reached.

    You can also use the /ctx-loop skill from inside Claude Code.

    A Shell Loop Is the Best Practice

    The shell loop approach spawns a fresh AI process each iteration, so the only state that carries between iterations is what lives in .context/ and git.

    Claude Code's built-in /loop runs iterations within the same session, which can allow context window state to leak between iterations. This can be convenient for short runs, but it is less reliable for unattended loops.

    See Shell Loop vs Built-in Loop for details.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-6-run-with-watch-mode","level":3,"title":"Step 6: Run with Watch Mode","text":"

    Open two terminals. In the first, run the loop. In the second, run ctx watch to process context updates from the AI output.

    # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

    The watch command parses XML context-update commands from the AI output and applies them:

    <context-update type=\"complete\">user registration</context-update>\n<context-update type=\"learning\"\n  context=\"Setting up user registration\"\n  lesson=\"Email verification needs SMTP configured\"\n  application=\"Add SMTP setup to deployment checklist\"\n>SMTP Requirement</context-update>\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-7-completion-signals-end-the-loop","level":3,"title":"Step 7: Completion Signals End the Loop","text":"

    The generated script checks for one completion signal per run. By default this is SYSTEM_CONVERGED. You can change it with the --completion flag:

    ctx loop --tool claude --completion BOOTSTRAP_COMPLETE --max-iterations 5\n

    The following signals are conventions used in .context/loop.md:

    Signal Convention How the script handles it SYSTEM_CONVERGED All tasks in TASKS.md are done Detected by default (--completion default value) SYSTEM_BLOCKED Agent cannot proceed Only detected if you set --completion to this BOOTSTRAP_COMPLETE Initial scaffolding done Only detected if you set --completion to this

    The script uses grep -q on the agent's output, so any string works as a signal. If you need to detect multiple signals in one run, edit the generated loop.sh to add additional grep checks.

    When you return in the morning, check the log and the context files:

    tail -100 /tmp/loop.log\nctx status\nctx load\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-8-use-ctx-implement-for-plan-execution","level":3,"title":"Step 8: Use /ctx-implement for Plan Execution","text":"

    Within each iteration, the agent can use /ctx-implement to execute multi-step plans with verification between steps. This is useful for complex tasks that touch multiple files.

    The skill breaks a plan into atomic, verifiable steps:

    Step 1/6: Create user model .................. OK\nStep 2/6: Add database migration ............. OK\nStep 3/6: Implement registration handler ..... OK\nStep 4/6: Write unit tests ................... OK\nStep 5/6: Run test suite ..................... FAIL\n  -> Fixed: missing test dependency\n  -> Re-verify ............................... OK\nStep 6/6: Update TASKS.md .................... OK\n

    Each step is verified (build, test, syntax check) before moving to the next.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A typical overnight run:

    ctx init\n# Edit TASKS.md and .context/loop.md\n\nctx loop --tool claude --max-iterations 20\n\n./loop.sh 2>&1 | tee /tmp/loop.log &\nctx watch --log /tmp/loop.log\n\n# Next morning:\nctx status\nctx load\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#why-autonomous-loops-work-proactive-context-persistence","level":2,"title":"Why Autonomous Loops Work: Proactive Context Persistence","text":"

    The autonomous loop pattern works because the agent persists context as part of the job.

    Without proactive persistence, the loop degrades into disconnected iterations that repeat work, contradict decisions, and lose track of progress. The agent cannot rely on a human to prompt it. It must treat context updates as part of every task, not as a separate step.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-agent-playbook-self-check-drives-autonomy","level":3,"title":"The Agent Playbook Self-Check Drives Autonomy","text":"

    The Agent Playbook defines a self-check prompt the agent applies continuously:

    \"If this session ended right now, would the next session know what happened?\"\n

    In interactive sessions, a human might remind the agent to save context. When the agent runs unattended, nobody is there to remind it.

    The self-check becomes the trigger: after meaningful work, the agent evaluates whether the context files reflect reality and updates them immediately if they do not.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#what-the-agent-does-proactively-between-iterations","level":3,"title":"What the Agent Does Proactively between Iterations","text":"

    At milestones within an iteration, the agent persists without waiting for instructions:

    Milestone What the agent does proactively Completes a task Marks it [x] in TASKS.md Discovers a gotcha Adds it to LEARNINGS.md Makes a design choice Records it in DECISIONS.md with rationale Identifies follow-up work Creates new tasks in TASKS.md with #added timestamp Hits an unexpected failure Documents the root cause before moving on","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#example-what-proactive-persistence-looks-like","level":3,"title":"Example: What Proactive Persistence Looks Like","text":"

    Here is what the agent does during a single iteration beyond writing code:

    Iteration 4:\n  1. Read TASKS.md -> pick \"Add email verification\" (#priority:high)\n  2. Add #started:2026-01-25-030012 to the task\n  3. Implement the feature (code, tests, docs if needed)\n  4. Tests pass -> mark task [x]\n  5. Add learning: \"SMTP config must be set before verification handler registers. Order matters in init().\"\n  6. Add decision: \"Use token-based verification links (not codes) because links work better in automated tests.\"\n  7. Create follow-up task: \"Add rate limiting to verification endpoint\" #added:...\n  8. Commit all changes including `.context/`\n  9. No signal emitted -> loop continues to iteration 5\n

    Steps 2, 4, 5, 6, and 7 are proactive context persistence:

    The agent was not asked to do any of them.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#context-persistence-at-milestones","level":3,"title":"Context Persistence at Milestones","text":"

    For long autonomous runs, the agent persists context at natural boundaries, often at phase transitions or after completing a cluster of related tasks. It updates TASKS.md, DECISIONS.md, and LEARNINGS.md as it goes.

    If the loop crashes at 4 AM, the context files tell you exactly where to resume. You can also use ctx journal source to review the session transcripts.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-persistence-contract","level":3,"title":"The Persistence Contract","text":"

    The autonomous loop has an implicit contract:

    1. Every iteration reads context: TASKS.md, DECISIONS.md, LEARNINGS.md
    2. Every iteration writes context: task updates, new learnings, decisions
    3. Every commit includes .context/ so the next iteration sees changes
    4. Context stays current: if the loop stopped right now, nothing important is lost

    Break any part of this contract and the loop degrades.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tips","level":2,"title":"Tips","text":"

    Markdown Is Not Enforcement

    Your real guardrails are permissions and isolation, not Markdown. CONSTITUTION.md can nudge the agent, but it is probabilistic.

    The permission allowlist and OS isolation are deterministic:

    For unattended runs, trust the sandbox and the allowlist, not the prose.

    • Start with a small iteration cap. Use --max-iterations 5 on your first run.
    • Keep tasks atomic. Each task should be completable in a single iteration.
    • Check signal discipline. If the loop runs forever, the agent is not emitting SYSTEM_CONVERGED or SYSTEM_BLOCKED. Make the signal requirement explicit in .context/loop.md.
    • Commit after context updates. Finish code, update .context/, commit including .context/, then signal.
    • Set up webhook notifications to get notified when the loop completes, hits max iterations, or when hooks fire nudges. The generated loop script includes ctx hook notify calls automatically.
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#next-up","level":2,"title":"Next Up","text":"

    When to Use a Team of Agents →: Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#see-also","level":2,"title":"See Also","text":"
    • Autonomous Loops: loop pattern, prompt templates, troubleshooting
    • CLI Reference: ctx loop: flags and options
    • CLI Reference: ctx watch: watch mode details
    • CLI Reference: ctx init: init flags
    • The Complete Session: interactive workflow
    • Tracking Work Across Sessions: structuring TASKS.md
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/building-skills/","level":1,"title":"Building Project Skills","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-problem","level":2,"title":"The Problem","text":"

    You have workflows your agent needs to repeat across sessions: a deploy checklist, a review protocol, a release process. Each time, you re-explain the steps. The agent gets it mostly right but forgets edge cases you corrected last time.

    Skills solve this by encoding domain knowledge into a reusable document the agent loads automatically when triggered. A skill is not code - it is a structured prompt that captures what took you sessions to learn.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-skill-create\n

    The skill-creator walks you through: identify a repeating workflow, draft a skill, test with realistic prompts, iterate until it triggers correctly and produces good output.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-skill-create Skill Interactive skill creation and improvement workflow ctx init Command Deploys template skills to .claude/skills/ on first setup","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-1-identify-a-repeating-pattern","level":3,"title":"Step 1: Identify a Repeating Pattern","text":"

    Good skill candidates:

    • Checklists you repeat: deploy steps, release prep, code review
    • Decisions the agent gets wrong: if you keep correcting the same behavior, encode the correction
    • Multi-step workflows: anything with a sequence of commands and conditional branches
    • Domain knowledge: project-specific terminology, architecture constraints, or conventions the agent cannot infer from code alone

    Not good candidates: one-off instructions, things the platform already handles (file editing, git operations), or tasks too narrow to reuse.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-2-create-the-skill","level":3,"title":"Step 2: Create the Skill","text":"

    Invoke the skill-creator:

    You: \"I want a skill for our deploy process\"\n\nAgent: [Asks about the workflow: what steps, what tools,\n        what edge cases, what the output should look like]\n

    Or capture a workflow you just did:

    You: \"Turn what we just did into a skill\"\n\nAgent: [Extracts the steps from conversation history,\n        confirms understanding, drafts the skill]\n

    The skill-creator produces a SKILL.md file in .claude/skills/your-skill/.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-3-test-with-realistic-prompts","level":3,"title":"Step 3: Test with Realistic Prompts","text":"

    The skill-creator proposes 2-3 test prompts - the kind of thing a real user would say. It runs each one and shows the result alongside a baseline (same prompt without the skill) so you can compare.

    Agent: \"Here are test prompts I'd try:\n        1. 'Deploy to staging'\n        2. 'Ship the hotfix'\n        3. 'Run the release checklist'\n        Want to adjust these?\"\n
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-4-iterate-on-the-description","level":3,"title":"Step 4: Iterate on the Description","text":"

    The description field in frontmatter determines when a skill triggers. Claude tends to undertrigger - descriptions need to be specific and slightly \"pushy\":

    # Weak - too vague, will undertrigger\ndescription: \"Use for deployments\"\n\n# Strong - covers situations and synonyms\ndescription: >-\n  Use when deploying to staging or production, running the release\n  checklist, or when the user says 'ship it', 'deploy this', or\n  'push to prod'. Also use after merging to main when a deploy\n  is expected.\n

    The skill-creator helps you tune this iteratively.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-5-deploy-as-template-optional","level":3,"title":"Step 5: Deploy as Template (Optional)","text":"

    If the skill should be available to all projects (not just this one), place it in internal/assets/claude/skills/ so ctx init deploys it to new projects automatically.

    Most project-specific skills stay in .claude/skills/ and travel with the repo.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#skill-anatomy","level":2,"title":"Skill Anatomy","text":"
    my-skill/\n  SKILL.md         # Required: frontmatter + instructions (<500 lines)\n  scripts/         # Optional: deterministic code the skill can execute\n  references/      # Optional: detail loaded on demand (not always)\n  assets/          # Optional: output templates, not loaded into context\n

    Key sections in SKILL.md:

    Section Purpose Required? Frontmatter Name, description (trigger) Yes When to Use Positive triggers Yes When NOT to Use Prevents false activations Yes Process Steps and commands Yes Examples Good/bad output pairs Recommended Quality Checklist Verify before reporting completion For complex skills","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tips","level":2,"title":"Tips","text":"
    • Description is everything. A great skill with a vague description never fires. Spend time on trigger coverage - synonyms, concrete situations, edge cases.
    • Stay under 500 lines. If your skill is growing past this, move detail into references/ files and point to them from SKILL.md.
    • Do not duplicate the platform. If the agent already knows how to do something (edit files, run git commands), do not restate it. Tag paragraphs as Expert/Activation/Redundant and delete Redundant ones.
    • Explain why, not just what. \"Sort by date because users want recent results first\" beats \"ALWAYS sort by date.\" The agent generalizes from reasoning better than from rigid rules.
    • Test negative triggers. Make sure the skill does not fire on unrelated prompts. A skill that activates too broadly becomes noise.
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#next-up","level":2,"title":"Next Up","text":"

    Parallel Agent Development with Git Worktrees ->: Split work across multiple agents using git worktrees.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: full listing of all bundled and project-local skills
    • Guide Your Agent: how commands, skills, and conversational patterns work together
    • Design Before Coding: the four-skill chain for front-loading design work
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/claude-code-permissions/","level":1,"title":"Claude Code Permission Hygiene","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code's .claude/settings.local.json controls what the agent can do without asking. Over time, this file accumulates one-off permissions from individual sessions: Exact commands with hardcoded paths, duplicate entries, and stale skill references.

    A noisy \"allowlist\" makes it harder to spot dangerous permissions and increases the surface area for unintended behavior.

    Since settings.local.json is .gitignored, it drifts independently of your codebase. There is no PR review, no CI check: just whatever you clicked \"Allow\" on.

    This recipe shows what a well-maintained permission file looks like and how to keep it clean.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                            # seeds safe defaults\n/ctx-drift                          # detects missing/stale permissions\n/ctx-permission-sanitize               # audits for dangerous patterns\n

    See Recommended Defaults for the full list.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Populates default ctx permissions /ctx-drift Detects missing or stale permission entries /ctx-permission-sanitize Audits for dangerous patterns (security-focused)","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#recommended-defaults","level":2,"title":"Recommended Defaults","text":"

    After running ctx init, your settings.local.json will have the ctx defaults pre-populated. Here is an opinionated safe starting point for a Go project using ctx:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(/tmp/ctx-*:*)\",\n      \"Bash(CGO_ENABLED=0 go build:*)\",\n      \"Bash(CGO_ENABLED=0 go test:*)\",\n      \"Bash(ctx:*)\",\n      \"Bash(git add:*)\",\n      \"Bash(git branch:*)\",\n      \"Bash(git check-ignore:*)\",\n      \"Bash(git checkout:*)\",\n      \"Bash(git commit:*)\",\n      \"Bash(git diff:*)\",\n      \"Bash(git log:*)\",\n      \"Bash(git remote:*)\",\n      \"Bash(git restore:*)\",\n      \"Bash(git show:*)\",\n      \"Bash(git stash:*)\",\n      \"Bash(git status:*)\",\n      \"Bash(git tag:*)\",\n      \"Bash(go build:*)\",\n      \"Bash(go fmt:*)\",\n      \"Bash(go test:*)\",\n      \"Bash(go vet:*)\",\n      \"Bash(golangci-lint run:*)\",\n      \"Bash(grep:*)\",\n      \"Bash(ls:*)\",\n      \"Bash(make:*)\",\n      \"Skill(ctx-convention-add)\",\n      \"Skill(ctx-decision-add)\",\n      \"Skill(ctx-learning-add)\",\n      \"Skill(ctx-task-add)\",\n      \"Skill(ctx-agent)\",\n      \"Skill(ctx-archive)\",\n      \"Skill(ctx-blog)\",\n      \"Skill(ctx-blog-changelog)\",\n      \"Skill(absorb)\",\n      \"Skill(ctx-commit)\",\n      \"Skill(ctx-drift)\",\n      \"Skill(ctx-implement)\",\n      \"Skill(ctx-journal-enrich)\",\n      \"Skill(ctx-journal-enrich-all)\",\n      \"Skill(ctx-loop)\",\n      \"Skill(ctx-next)\",\n      \"Skill(ctx-pad)\",\n      \"Skill(ctx-prompt-audit)\",\n      \"Skill(ctx-history)\",\n      \"Skill(ctx-reflect)\",\n      \"Skill(ctx-remember)\",\n      \"Skill(ctx-status)\",\n      \"Skill(ctx-worktree)\",\n      \"WebSearch\"\n    ],\n    \"deny\": [\n      \"Bash(sudo *)\",\n      \"Bash(git push *)\",\n      \"Bash(git push)\",\n      \"Bash(rm -rf /*)\",\n      \"Bash(rm -rf ~*)\",\n      \"Bash(curl *)\",\n      \"Bash(wget *)\",\n      \"Bash(chmod 777 *)\",\n      \"Read(**/.env)\",\n      \"Read(**/.env.*)\",\n      \"Read(**/*credentials*)\",\n      \"Read(**/*secret*)\",\n      \"Read(**/*.pem)\",\n      \"Read(**/*.key)\",\n      \"Edit(**/.env)\",\n      \"Edit(**/.env.*)\"\n    ]\n  }\n}\n

    This Is a Starting Point, Not a Mandate

    Your project may need more or fewer entries.

    The goal is intentional permissions: Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#design-principles","level":3,"title":"Design Principles","text":"

    Use wildcards for trusted binaries: If you trust the binary (your own project's CLI, make, go), a single wildcard like Bash(ctx:*) beats twenty subcommand entries. It reduces noise and means new subcommands work without re-prompting.

    Keep git commands granular: Unlike ctx or make, git has both safe commands (git log, git status) and destructive ones (git reset --hard, git clean -f). Listing safe commands individually prevents accidentally pre-approving dangerous ones.

    Pre-approve all ctx- skills: Skills shipped with ctx (Skill(ctx-*)) are safe to pre-approve. They are part of your project and you control their content. This prevents the agent from prompting on every skill invocation.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#default-deny-rules","level":3,"title":"Default Deny Rules","text":"

    ctx init automatically populates permissions.deny with rules that block dangerous operations. Deny rules are evaluated before allow rules: A denied pattern always prompts the user, even if it also matches an allow entry.

    The defaults block:

    Pattern Why Bash(sudo *) Cannot enter password; will hang Bash(git push *) Must be explicit user action Bash(rm -rf /*) etc. Recursive delete of system/home directories Bash(curl *) / wget Arbitrary network requests Bash(chmod 777 *) World-writable permissions Read/Edit(**/.env*) Secrets and credentials Read(**/*.pem, *.key) Private keys

    Read/Edit Deny Rules

    Read() and Edit() deny rules have known upstream enforcement issues (claude-code#6631,#24846).

    They are included as defense-in-depth and intent documentation.

    Blocked by default deny rules: no action needed, ctx init handles these:

    Pattern Risk Bash(git push:*) Must be explicit user action Bash(sudo:*) Privilege escalation Bash(rm -rf:*) Recursive delete with no confirmation Bash(curl:*) / Bash(wget:*) Arbitrary network requests

    Requires manual discipline: Never add these to allow:

    Pattern Risk Bash(git reset:*) Can discard uncommitted work Bash(git clean:*) Deletes untracked files Skill(ctx-permission-sanitize) Edits this file: self-modification vector Skill(release) Runs the release pipeline: high impact","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#hooks-regex-safety-net","level":2,"title":"Hooks: Regex Safety Net","text":"

    Deny rules handle prefix-based blocking natively. Hooks complement them by catching patterns that require regex matching: Things deny rules can't express.

    The ctx plugin ships these blocking hooks:

    Hook What it blocks ctx system block-non-path-ctx Running ctx from wrong path

    Project-local hooks (not part of the plugin) catch regex edge cases:

    Hook What it blocks block-dangerous-commands.sh Mid-command sudo/git push (after &&), copies to bin dirs, absolute-path ctx

    Pre-Approved + Hook-Blocked = Silent Block

    If you pre-approve a command that a hook blocks, the user never sees the confirmation dialog. The agent gets a block response and must handle it, which is confusing.

    It's better not to pre-approve commands that hooks are designed to intercept.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-maintenance-workflow","level":2,"title":"The Maintenance Workflow","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#after-busy-sessions","level":3,"title":"After Busy Sessions","text":"

    Permissions accumulate fastest during debugging and exploration sessions. After a session where you clicked \"Allow\" many times:

    1. Open .claude/settings.local.json in your editor;
    2. Look for entries at the bottom of the allowlist (new entries append there);
    3. Delete anything that looks session-specific:
      • Exact commands with hardcoded paths,
      • Commands with literal string arguments,
      • Entries that duplicate an existing wildcard.

    See the Sanitize Permissions runbook for a step-by-step procedure.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#periodically","level":3,"title":"Periodically","text":"

    Run /ctx-drift to catch permission drift:

    • Missing Bash(ctx:*) wildcard;
    • Missing Skill(ctx-*) entries for installed skills;
    • Stale Skill(ctx-*) entries for removed skills;
    • Granular Bash(ctx <subcommand>:*) entries that should be consolidated.

    Run /ctx-permission-sanitize to catch security issues:

    • Hook bypass patterns
    • Destructive commands
    • Overly broad permissions
    • Injection vectors
    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#when-adding-new-skills","level":3,"title":"When Adding New Skills","text":"

    If you create a custom ctx-* skill, add its Skill() entry to the allowlist manually.

    ctx init only populates the default permissions: It won't pick up custom skills.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#golden-image-snapshots","level":3,"title":"Golden Image Snapshots","text":"

    If manual cleanup is too tedious, use a golden image to automate it:

    Snapshot a curated permission set, then restore at session start to automatically drop session-accumulated permissions. See the Permission Snapshots recipe for the full workflow.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#adapting-for-other-languages","level":2,"title":"Adapting for Other Languages","text":"

    The recommended defaults above are Go-specific. For other stacks, swap the build/test tooling:

    Node.js / TypeScript:

    \"Bash(npm run:*)\",\n\"Bash(npm test:*)\",\n\"Bash(npx:*)\",\n\"Bash(node:*)\"\n

    Python:

    \"Bash(pytest:*)\",\n\"Bash(python:*)\",\n\"Bash(pip show:*)\",\n\"Bash(ruff:*)\"\n

    Rust:

    \"Bash(cargo build:*)\",\n\"Bash(cargo test:*)\",\n\"Bash(cargo clippy:*)\",\n\"Bash(cargo fmt:*)\"\n

    The ctx, git, and skill entries remain the same across all stacks.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#next-up","level":2,"title":"Next Up","text":"

    Permission Snapshots →: Save and restore permission baselines for reproducible setups.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#see-also","level":2,"title":"See Also","text":"
    • Setting Up ctx Across AI Tools: full setup recipe including settings.local.json creation
    • Context Health: keeping .context/ files accurate
    • Sanitize Permissions runbook: manual cleanup procedure
    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/configuration-profiles/","level":1,"title":"Configuration Profiles","text":"","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#configuration-profiles","level":1,"title":"Configuration Profiles","text":"

    Switch between dev and base runtime configurations without editing .ctxrc by hand. Useful when you want verbose logging and webhook notifications during development, then clean defaults for normal sessions.

    Uses: ctx config switch, ctx config status, /ctx-config

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#how-it-works","level":2,"title":"How It Works","text":"

    The ctx repo ships two source profiles committed to git:

    File Profile Description .ctxrc.base base All defaults, notifications off .ctxrc.dev dev Verbose logging, webhook notifications on

    The working copy (.ctxrc) is gitignored. Switching profiles copies the source file over .ctxrc, so your runtime configuration is always a clean snapshot of one of the two sources.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#switching-profiles","level":2,"title":"Switching Profiles","text":"
    # Switch to dev (verbose logging, notifications)\nctx config switch dev\n\n# Switch to base (defaults)\nctx config switch base\n\n# Toggle to the opposite profile\nctx config switch\n\n# \"prod\" is an alias for \"base\"\nctx config switch prod\n

    The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#checking-the-active-profile","level":2,"title":"Checking the Active Profile","text":"
    ctx config status\n

    Output examples:

    active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n
    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#typical-workflow","level":2,"title":"Typical Workflow","text":"
    1. Start of a debugging session: switch to dev for verbose logging and webhook notifications so you can trace hook activity and get push alerts.
    ctx config switch dev\n
    1. Work through the issue: hooks log verbosely, webhooks fire on key events (commits, ceremony nudges, drift warnings).

    2. Done debugging: switch back to base to silence the noise.

    ctx config switch base\n
    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#customizing-profiles","level":2,"title":"Customizing Profiles","text":"

    Edit the source files directly:

    • .ctxrc.dev -- add any .ctxrc keys you want active during development (e.g., log_level: debug, notify.events, notify.webhook_url).
    • .ctxrc.base -- keep this minimal. It represents your \"production\" defaults.

    After editing a source file, re-run ctx config switch <profile> to apply the changes to the working copy.

    Commit Your Profiles

    Both .ctxrc.base and .ctxrc.dev should be committed to git so team members share the same profile definitions. The working copy .ctxrc stays gitignored.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#using-the-skill","level":2,"title":"Using the Skill","text":"

    In a Claude Code session, say any of:

    • \"switch to dev mode\"
    • \"switch to base\"
    • \"what profile am I on?\"
    • \"toggle verbose logging\"

    The /ctx-config skill handles the rest.

    See also: ctx config reference, Configuration

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/context-health/","level":1,"title":"Detecting and Fixing Drift","text":"","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-problem","level":2,"title":"The Problem","text":"

    ctx files drift: you rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist, TASKS.md is 80 percent completed checkboxes, and CONVENTIONS.md describes patterns you stopped using two months ago.

    Stale context is worse than no context:

    An AI tool that trusts outdated references will hallucinate confidently.

    This recipe shows how to detect drift, fix it, and keep your .context/ directory lean and accurate.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tldr","level":2,"title":"TL;DR","text":"
    ctx drift                      # detect problems\nctx drift --fix                # auto-fix the easy ones\nctx sync --dry-run && ctx sync # reconcile after refactors\nctx compact --archive          # archive old completed tasks\nctx fmt                        # normalize line widths\nctx status                     # verify\n

    Or just ask your agent: \"Is our context clean?\"

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx drift Command Detect stale paths, missing files, violations ctx drift --fix Command Auto-fix simple issues ctx sync Command Reconcile context with codebase structure ctx compact Command Archive completed tasks, clean up empty sections ctx fmt Command Normalize context files to 80-char line width ctx status Command Quick health overview /ctx-drift Skill Structural plus semantic drift detection /ctx-architecture Skill Refresh ARCHITECTURE.md from actual codebase /ctx-status Skill In-session context summary /ctx-prompt-audit Skill Audit prompt quality and token efficiency","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-workflow","level":2,"title":"The Workflow","text":"

    The best way to maintain context health is conversational: Ask your agent, guide it, and let it detect problems, explain them, and fix them with your approval. CLI commands exist for CI pipelines, scripting, and fine-grained control.

    For day-to-day maintenance, talk to your agent.

    Your Questions Reinforce the Pattern

    Asking \"is our context clean?\" does two things:

    • It triggers a drift check right now
    • It reinforces the habit

    This is reinforcement, not enforcement.

    Do not wait for the agent to be proactive on its own:

    Guide your agent, especially in early sessions.

    Over time, you will ask less and the agent will start offering more.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-1-ask-your-agent","level":3,"title":"Step 1: Ask Your Agent","text":"

    The simplest way to check context health:

    Is our context clean?\nAnything stale?\nHow healthy are our context files?\n

    Or invoke the skill directly:

    /ctx-drift\n

    The agent performs two layers of analysis:

    Layer 1, structural checks (via ctx drift): Dead paths, missing files, completed task counts, constitution violations. Fast and programmatic.

    Layer 2, semantic analysis (agent-driven): Does CONVENTIONS.md describe patterns the code no longer follows? Does DECISIONS.md contain entries whose rationale no longer applies? Are there learnings about bugs that are now fixed? This is where the agent adds value the CLI cannot: It reads both context files and source code and compares them.

    The agent reports both layers together, explains each finding in plain language, and offers to fix what it can.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-2-maintenance-at-session-start","level":3,"title":"Step 2: Maintenance at Session Start","text":"

    You do not need to ask explicitly.

    Using Claude Code

    ctx ships with Claude Code hooks that remind the agent at the right time to take initiative.

    Checking context health at the session start, offering to persist learnings before you quit, and flagging drift when it matters. The agent stays proactive without you having to prompt it:

    Agent: Good morning. I've loaded the context files. A few things\n       before we start:\n\n       - ARCHITECTURE.md references `pkg/auth/` which is now empty\n       - DECISIONS.md hasn't been updated in 40 days\n       - There are 18 completed tasks ready for archival\n\n       Want me to run a quick maintenance pass, or should we jump\n       straight into today's work?\n

    ☝️️ this is what persistent, initiative-driven sessions feel like when context is treated as a system instead of a prompt.

    If the agent does not offer this on its own, a gentle nudge is enough:

    Anything stale before we start?\nHow's the context looking?\n

    This turns maintenance from a scheduled chore into a conversation that happens when it matters.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-3-real-time-detection-during-work","level":3,"title":"Step 3: Real-Time Detection during Work","text":"

    Agents can notice drift while working: When a mismatch is directly in the path of their current task. If an agent reads ARCHITECTURE.md to find where to add a handler and internal/handlers/ doesn't exist, it will notice because the stale reference blocks its work:

    Agent: ARCHITECTURE.md references `internal/handlers/` but that directory\n       doesn't exist. I'll look at the actual source tree to find where\n       handlers live now.\n

    This happens reliably when the drift intersects the task. What is less reliable is the agent generalizing from one mismatch to \"there might be more stale references; let me run drift detection\" That leap requires the agent to know /ctx-drift exists and to decide the current task should pause for maintenance.

    If you want that behavior, reinforce it:

    Good catch. Yes, run /ctx-drift and clean up any other stale references.\n

    Over time, agents that have seen this pattern will start offering proactively. But do not expect it from a cold start.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-4-archival-and-cleanup","level":3,"title":"Step 4: Archival and Cleanup","text":"

    ctx drift detects when TASKS.md has more than 10 completed items and flags it as a staleness warning. Running ctx drift --fix archives completed tasks automatically.

    You can also run /ctx-archive to compact on demand.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#knowledge-health-flow","level":3,"title":"Knowledge Health Flow","text":"

    Over time, LEARNINGS.md and DECISIONS.md accumulate entries that overlap or partially repeat each other. The check-persistence hook detects when entry counts exceed a configurable threshold and surfaces a nudge:

    \"LEARNINGS.md has 25+ entries. Consider running /ctx-consolidate to merge overlapping items.\"

    The consolidation workflow:

    1. Review: /ctx-consolidate groups entries by keyword similarity and presents candidate merges for your approval.
    2. Merge: Approved groups are combined into single entries that preserve the key information from each original.
    3. Archive: Originals move to .context/archive/, not deleted -- the full history is preserved in git and the archive directory.
    4. Verify: Run ctx drift after consolidation to confirm no cross-references were broken by the merge.

    This replaces ad-hoc cleanup with a repeatable, nudge-driven cycle: detect accumulation, review candidates, merge with approval, archive originals.

    See also: Knowledge Capture for the recording workflow that feeds into this maintenance cycle.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-doctor-the-superset-check","level":2,"title":"ctx doctor: The Superset Check","text":"

    ctx doctor combines drift detection with hook auditing, configuration checks, event logging status, and token size reporting in a single command. If you want one command that covers structural health, hooks, and state:

    ctx doctor          # everything in one pass\nctx doctor --json   # machine-readable for scripting\n

    Use /ctx-doctor Too

    For agent-driven diagnosis that adds semantic analysis on top of the structural checks, use /ctx-doctor.

    See the Troubleshooting recipe for the full workflow.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#cli-reference","level":2,"title":"CLI Reference","text":"

    The conversational approach above uses CLI commands under the hood. When you need direct control, use the commands directly.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift","level":3,"title":"ctx drift","text":"

    Scan context files for structural problems:

    ctx drift\n

    Sample output:

    Drift Report\n============\n\nWarnings (3):\n  ARCHITECTURE.md:14  path \"internal/api/router.go\" does not exist\n  ARCHITECTURE.md:28  path \"pkg/auth/\" directory is empty\n  CONVENTIONS.md:9    path \"internal/handlers/\" not found\n\nViolations (1):\n  TASKS.md            31 completed tasks (recommend archival)\n\nStaleness:\n  DECISIONS.md        last modified 45 days ago\n  LEARNINGS.md        last modified 32 days ago\n\nExit code: 1 (warnings found)\n
    Level Meaning Action Warning Stale path references, missing files Fix or remove Violation Constitution rule heuristic failures, heavy clutter Fix soon Staleness Files not updated recently Review content

    Exit codes: 0 equals clean, 1 equals warnings, 3 equals violations.

    For CI integration:

    ctx drift --json | jq '.warnings | length'\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift-fix","level":3,"title":"ctx drift --fix","text":"

    Auto-fix mechanical issues:

    ctx drift --fix\n

    This handles removing dead path references, updating unambiguous renames, clearing empty sections. Issues requiring judgment are flagged but left for you.

    Run ctx drift again afterward to confirm what remains.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-sync","level":3,"title":"ctx sync","text":"

    After a refactor, reconcile context with the actual codebase structure:

    ctx sync --dry-run   # preview first\nctx sync             # apply\n

    ctx sync scans for structural changes, compares with ARCHITECTURE.md, checks for new dependencies worth documenting, and identifies context referring to code that no longer exists.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-compact","level":3,"title":"ctx compact","text":"

    Consolidate completed tasks and clean up empty sections:

    ctx compact            # move completed tasks to Completed section,\n                       # remove empty sections\nctx compact --archive  # also archive old tasks to .context/archive/\n
    • Tasks: moves completed items (with all subtasks done) into the Completed section of TASKS.md
    • All files: removes empty sections left behind
    • With --archive: writes tasks older than 7 days to .context/archive/tasks-YYYY-MM-DD.md

    Without --archive, nothing is deleted: Tasks are reorganized in place.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-fmt","level":3,"title":"ctx fmt","text":"

    Normalize context file line widths:

    ctx fmt              # wrap long lines to 80 chars\nctx fmt --check      # CI: exit 1 if files need formatting\n

    Long task descriptions, decision rationale, and learning entries accumulate as single-line entries. ctx fmt wraps them at word boundaries with 2-space continuation indent for list items. Headings, tables, and comments are preserved.

    Idempotent: safe to run repeatedly.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-status","level":3,"title":"ctx status","text":"

    Quick health overview:

    ctx status --verbose\n

    Shows file counts, token estimates, modification times, and drift warnings in a single glance.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

    Checks whether your context files are readable, compact, and token-efficient for the model.

    /ctx-prompt-audit\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Conversational approach (recommended):

    Is our context clean?  -> agent runs structural plus semantic checks\nFix what you can       -> agent auto-fixes and proposes edits\nArchive the done tasks -> agent runs ctx compact --archive\nHow's token usage?     -> agent checks ctx status\n

    CLI approach (for CI, scripts, or direct control):

    ctx drift                      # 1. Detect problems\nctx drift --fix                # 2. Auto-fix the easy ones\nctx sync --dry-run && ctx sync # 3. Reconcile after refactors\nctx compact --archive          # 4. Archive old completed tasks\nctx fmt                        # 5. Normalize line widths\nctx status                     # 6. Verify\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tips","level":2,"title":"Tips","text":"

    Agents cross-reference context files with source code during normal work. When drift intersects their current task, they will notice: a renamed package, a deleted directory, a path that doesn't resolve. But they rarely generalize from one mismatch to a full audit on their own. Reinforce the pattern: when an agent mentions a stale reference, ask it to run /ctx-drift. Over time, it starts offering.

    When an agent says \"this reference looks stale,\" it is usually right.

    Semantic drift is more damaging than structural drift: ctx drift catches dead paths. But CONVENTIONS.md describing a pattern your code stopped following three weeks ago is worse. When you ask \"is our context clean?\", the agent can do both checks.

    Use ctx status as a quick check: It shows file counts, token estimates, and drift warnings in a single glance. Good for a fast \"is everything ok?\" before diving into work.

    Drift detection in CI: add ctx drift --json to your CI pipeline and fail on exit code 3 (violations). This catches constitution-level problems before they reach upstream.

    Do not over-compact: Completed tasks have historical value. The --archive flag preserves them in .context/archive/ so you can search past work without cluttering active context.

    Sync is cautious by default: Use --dry-run after large refactors, then apply.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#next-up","level":2,"title":"Next Up","text":"

    Claude Code Permission Hygiene →: Recommended permission defaults and maintenance workflow for Claude Code.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#see-also","level":2,"title":"See Also","text":"
    • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
    • Tracking Work Across Sessions: task lifecycle and archival
    • Persisting Decisions, Learnings, and Conventions: keeping knowledge files current
    • The Complete Session: where maintenance fits in the daily workflow
    • CLI Reference: full flag documentation for all commands
    • Context Files: structure and purpose of each .context/ file
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/customizing-hook-messages/","level":1,"title":"Customizing Hook Messages","text":"","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-problem","level":2,"title":"The Problem","text":"

    ctx hooks speak ctx's language, not your project's. The QA gate says \"lint the ENTIRE project\" and \"make build,\" but your Python project uses pytest and ruff. The post-commit nudge suggests running lints, but your project uses npm test. You could remove the hook entirely, but then you lose the logic (counting, state tracking, adaptive frequency) just to change the words.

    How do you customize what hooks say without removing what they do?

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tldr","level":2,"title":"TL;DR","text":"
    ctx hook message list                     # see all hooks and their messages\nctx hook message show qa-reminder gate    # view the current template\nctx hook message edit qa-reminder gate    # copy default to .context/ for editing\nctx hook message reset qa-reminder gate   # revert to embedded default\n
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx hook message list CLI command Show all hook messages with category and override status ctx hook message show CLI command Print the effective message template ctx hook message edit CLI command Copy embedded default to .context/ for editing ctx hook message reset CLI command Delete user override, revert to default","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#how-it-works","level":2,"title":"How It Works","text":"

    Hook messages use a 3-tier fallback:

    1. User override: .context/hooks/messages/{hook}/{variant}.txt
    2. Embedded default: compiled into the ctx binary
    3. Hardcoded fallback: belt-and-suspenders safety net

    The hook logic (when to fire, counting, state tracking, cooldowns) is unchanged. Only the content (what text gets emitted) comes from the template. You customize what the hook says without touching how it decides to speak.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#finding-the-original-templates","level":3,"title":"Finding the Original Templates","text":"

    The default templates live in the ctx source tree at:

    internal/assets/hooks/messages/{hook}/{variant}.txt\n

    You can also browse them on GitHub: internal/assets/hooks/messages/

    Or use ctx hook message show to print any template without digging through source code:

    ctx hook message show qa-reminder gate        # QA gate instructions\nctx hook message show check-persistence nudge  # persistence nudge\nctx hook message show post-commit nudge        # post-commit reminder\n

    The show output includes the template source and available variables -- everything you need to write a replacement.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables","level":3,"title":"Template Variables","text":"

    Some messages use Go text/template variables for dynamic content:

    No context files updated in {{.PromptsSinceNudge}}+ prompts.\nHave you discovered learnings, made decisions,\nestablished conventions, or completed tasks\nworth persisting?\n

    The show and edit commands list available variables for each message. When writing a replacement, keep the same {{.VariableName}} placeholders to preserve dynamic content. Variables that you omit render as <no value>: no error, but the output may look odd.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#intentional-silence","level":3,"title":"Intentional Silence","text":"

    An empty template file (0 bytes or whitespace-only) means \"don't emit a message\". The hook still runs its logic but produces no output. This lets you silence specific messages without removing the hook from hooks.json.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-python-project-qa-gate","level":2,"title":"Example: Python Project QA Gate","text":"

    The default QA gate says \"lint the ENTIRE project\" and references make lint. For a Python project, you want pytest and ruff:

    # See the current default\nctx hook message show qa-reminder gate\n\n# Copy it to .context/ for editing\nctx hook message edit qa-reminder gate\n\n# Edit the override\n

    Replace the content in .context/hooks/messages/qa-reminder/gate.txt:

    HARD GATE! DO NOT COMMIT without completing ALL of these steps first:\n(1) Run the full test suite: pytest -x\n(2) Run the linter: ruff check .\n(3) Verify a clean working tree\nRun tests and linter BEFORE every git commit, no exceptions.\n

    The hook still fires on every Edit call. The logic is identical. Only the instructions changed.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-silencing-ceremony-nudges","level":2,"title":"Example: Silencing Ceremony Nudges","text":"

    The ceremony check nudges you to use /ctx-remember and /ctx-wrap-up. If your team has a different workflow and finds these noisy:

    ctx hook message edit check-ceremonies both\nctx hook message edit check-ceremonies remember\nctx hook message edit check-ceremonies wrapup\n

    Then empty each file:

    echo -n \"\" > .context/hooks/messages/check-ceremonies/both.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/remember.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/wrapup.txt\n

    The hooks still track ceremony usage internally, but they no longer emit any visible output.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-javascript-project-post-commit","level":2,"title":"Example: JavaScript Project Post-Commit","text":"

    The default post-commit nudge mentions generic \"lints and tests.\" For a JavaScript project:

    ctx hook message edit post-commit nudge\n

    Replace with:

    Commit succeeded. 1. Offer context capture to the user: Decision (design\nchoice?), Learning (gotcha?), or Neither. 2. Ask the user: \"Want me to\nrun npm test and eslint before you push?\" Do NOT push. The user pushes\nmanually.\n
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-two-categories","level":2,"title":"The Two Categories","text":"

    Not all messages are equal. The list command shows each message's category:

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#customizable-17-messages","level":3,"title":"Customizable (17 Messages)","text":"

    Messages that are opinions: project-specific wording that benefits from customization. These are the primary targets for override.

    Hook Variant Description check-backup-age warning Backup staleness warning check-freshness stale Technology constant freshness warning check-ceremonies both Both ceremonies missing check-ceremonies remember Start-of-session ceremony check-ceremonies wrapup End-of-session ceremony check-context-size checkpoint Context capacity warning check-context-size oversize Injection oversize nudge check-context-size window Context window usage warning (>80%) check-journal both Unimported sessions + unenriched entries check-journal unenriched Unenriched journal entries check-journal unimported Unimported sessions check-knowledge warning Knowledge file growth check-map-staleness stale Architecture map staleness check-persistence nudge Context persistence nudge post-commit nudge Post-commit context capture qa-reminder gate Pre-commit QA gate","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#ctx-specific-10-messages","level":3,"title":"ctx-Specific (10 Messages)","text":"

    Messages specific to ctx's own development workflow. You can customize them, but edit will warn you first.

    Hook Variant Description block-dangerous-commands cp-to-bin Block copy to bin dirs block-dangerous-commands install-to-local-bin Block copy to ~/.local/bin block-dangerous-commands mid-git-push Block git push block-dangerous-commands mid-sudo Block sudo block-non-path-ctx absolute-path Block absolute path invocation block-non-path-ctx dot-slash Block ./ctx invocation block-non-path-ctx go-run Block go run invocation check-reminders reminders Pending reminders relay check-resources alert Resource pressure alert check-version key-rotation Key rotation nudge check-version mismatch Version mismatch","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables-reference","level":2,"title":"Template Variables Reference","text":"Hook Variant Variables check-backup-age warning {{.Warnings}} check-freshness stale {{.StaleFiles}} check-context-size checkpoint (none) check-context-size oversize {{.TokenCount}} check-context-size window {{.TokenCount}}, {{.Percentage}} check-ceremonies both, remember, wrapup (none) check-journal both {{.UnimportedCount}}, {{.UnenrichedCount}} check-journal unenriched {{.UnenrichedCount}} check-journal unimported {{.UnimportedCount}} check-knowledge warning {{.FileWarnings}} check-map-staleness stale {{.LastRefreshDate}}, {{.ModuleCount}} check-persistence nudge {{.PromptsSinceNudge}} check-reminders reminders {{.ReminderList}} check-resources alert {{.AlertMessages}} check-version key-rotation {{.KeyAgeDays}} check-version mismatch {{.BinaryVersion}}, {{.PluginVersion}} post-commit nudge (none) qa-reminder gate (none) block-dangerous-commands all variants (none) block-non-path-ctx all variants (none)

    Templates that reference undefined variables render <no value>: no error, graceful degradation.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tips","level":2,"title":"Tips","text":"
    • Override files are version-controlled: they live in .context/ alongside your other context files. Team members get the same customized messages.
    • Start with show: always check the current default before editing. The embedded template is the baseline your override replaces.
    • Use reset to undo: if a customization causes confusion, reset reverts to the embedded default instantly.
    • Empty file = silence: you don't need to delete the hook. An empty override file silences the message while preserving the hook's logic.
    • JSON output for scripting: ctx hook message list --json returns structured data for automation.
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#see-also","level":2,"title":"See Also","text":"
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Auditing System Hooks: verifying hooks are running and auditing their output
    • Configuration: project-level settings via .ctxrc
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/design-before-coding/","level":1,"title":"Design Before Coding","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-problem","level":2,"title":"The Problem","text":"

    You start coding a feature. Halfway through, you realize the approach doesn't handle a key edge case. You refactor. Then you discover the CLI interface doesn't fit the existing patterns. More refactoring.

    The design work happened during implementation, mixed in with debugging and trial-and-error. The result works, but the spec was never written down, the trade-offs were never recorded, and the next session has no idea why things are shaped this way.

    How do you front-load design so the implementation is straightforward?

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-brainstorm          # explore the design space\n/ctx-spec                # write the spec document\n/ctx-task-add            # break it into tasks\n/ctx-implement           # execute step-by-step\n

    Four skills, used in sequence. Each produces an artifact that feeds the next.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-brainstorm Skill Structured design dialogue: explore approaches /ctx-spec Skill Scaffold and fill out a spec from the template /ctx-task-add Skill Add implementation tasks to TASKS.md /ctx-implement Skill Execute a plan step-by-step with verification /ctx-decision-add Skill Record design choices made during brainstorming","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-1-brainstorm-the-design","level":3,"title":"Step 1: Brainstorm the Design","text":"

    Start with /ctx-brainstorm when you have a rough idea but haven't committed to an approach. The skill follows a gated process:

    1. Understand context: what exists today, what's the gap
    2. Clarify the idea: one question at a time, not a wall of questions
    3. Surface non-functional requirements: performance, security, compatibility
    4. Lock understanding: confirm before exploring solutions
    5. Explore 2-3 approaches: with trade-offs for each
    6. Stress-test: poke holes in the chosen approach
    7. Present the design: detailed enough to implement from
    You: \"I want to add RSS feed generation to the blog.\"\n\nAgent: \"Before we design, let me understand: is this for the journal\n  site, the docs site, or both?\"\n\nYou: \"Journal site only.\"\n\nAgent: [asks about update frequency, content scope, existing infra...]\n       [presents 3 approaches with trade-offs]\n\nYou: \"Option 2 - generate at build time.\"\n\nAgent: \"Let me stress-test that. What happens when...\"\n       [probes edge cases]\n       \"Design looks solid. Want me to record the approach as a\n       decision and write a spec?\"\n

    The brainstorm produces a validated design in conversation. It doesn't produce a file - that's what the spec is for.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-2-write-the-spec","level":3,"title":"Step 2: Write the Spec","text":"

    Once the design is clear, use /ctx-spec to produce the written artifact. The skill reads the project's spec template (specs/tpl/spec-template.md) and walks through each section:

    You: /ctx-spec (rss feed generation)\n\nAgent: \"Creating specs/rss-feed.md from template.\"\n\n       \"Problem: What user-visible problem does this solve? Why now?\"\n\nYou: \"Users can't subscribe to blog updates. They have to check\n      the site manually.\"\n\nAgent: [writes the Problem section, moves to Approach]\n\n       \"Approach: High-level, how does this work?\"\n\nYou: \"Generate feed.xml during ctx journal site. Atom format.\"\n\nAgent: [writes Approach, moves to Happy Path, then Edge Cases...]\n

    The skill spends extra time on Edge Cases and Error Handling: These are where specs earn their value. The skill pushes for at least three edge cases and won't accept \"none\" without a challenge.

    Sections that don't apply can be skipped. The result is a complete spec at specs/{feature-name}.md.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-3-break-into-tasks","level":3,"title":"Step 3: Break into Tasks","text":"

    After the spec is written, the skill offers to create tasks:

    Agent: \"Spec written to specs/rss-feed.md. Want me to break this\n  into tasks in TASKS.md?\"\n\nYou: \"Yes.\"\n\nAgent: [creates a Phase section referencing the spec, adds tasks]\n       \"Added Phase 0.8: RSS/Atom Feed Generation with 4 tasks:\n         - [ ] Implement feed.xml generation in ctx journal site\n         - [ ] Add --feed flag and .ctxrc feed config\n         - [ ] Write tests for feed generation\n         - [ ] Add recipe and update docs\"\n

    Each task is independently completable and references the spec via Spec: specs/rss-feed.md in the Phase header.

    You can also invoke /ctx-task-add directly at any point to add individual tasks.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-4-implement-step-by-step","level":3,"title":"Step 4: Implement Step-by-Step","text":"

    Pick up tasks with /ctx-implement. The skill loads the spec, breaks work into atomic steps, and checkpoints after every 3-5 steps with a build and test verification:

    You: /ctx-implement (specs/rss-feed.md)\n\nAgent: \"Loaded spec. Breaking into steps:\n         1. Add FeedEntry type to journal package\n         2. Implement feed.xml template\n         3. Wire into site generation pipeline\n         [builds and tests after step 3]\n         4. Add --feed flag to ctx journal site\n         5. Add .ctxrc feed configuration\n         [builds and tests after step 5]\n         ...\"\n

    If a build or test fails, the agent stops, diagnoses, and fixes before continuing.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#when-to-skip-steps","level":2,"title":"When to Skip Steps","text":"

    Not every feature needs all four steps. Use your judgment:

    Situation Start at Vague idea, multiple valid approaches Step 1: Brainstorm Clear approach, need to document it Step 2: Spec Spec already exists, need to plan work Step 3: Tasks Tasks exist, ready to code Step 4: Implement

    A brainstorm without a spec is fine for small decisions. A spec without a brainstorm is fine when the design is obvious. The full chain is for features complex enough to warrant front-loaded design.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need skill names. Natural language works:

    You say What happens \"Let's think through this feature\" /ctx-brainstorm \"Spec this out\" /ctx-spec \"Write a design doc for...\" /ctx-spec \"Break this into tasks\" /ctx-task-add \"Implement the spec\" /ctx-implement \"Let's design before we build\" Starts at brainstorm","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tips","level":2,"title":"Tips","text":"
    • Brainstorm first when uncertain. If you can articulate the approach in two sentences, skip to spec. If you can't, brainstorm.
    • Specs prevent scope creep. The Non-Goals section is as important as the approach. Writing down what you won't do keeps implementation focused.
    • Edge cases are the point. A spec that only describes the happy path isn't a spec - it's a wish. The /ctx-spec skill pushes for at least 3 edge cases because that's where designs break.
    • Record decisions during brainstorming. When you choose between approaches, the agent offers to persist the trade-off via /ctx-decision-add. Accept - future sessions need to know why, not just what.
    • Specs are living documents. Update them when implementation reveals new constraints. A spec that diverges from reality is worse than no spec.
    • The spec template is customizable. Edit specs/tpl/spec-template.md to match your project's needs. The /ctx-spec skill reads whatever template it finds there.
    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: /ctx-brainstorm: structured design dialogue
    • Skills Reference: /ctx-spec: spec scaffolding from template
    • Skills Reference: /ctx-implement: step-by-step execution with verification
    • Tracking Work Across Sessions: task lifecycle and archival
    • Importing Claude Code Plans: turning ephemeral plans into permanent specs
    • Persisting Decisions, Learnings, and Conventions: capturing design trade-offs
    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/external-context/","level":1,"title":"Keeping Context in a Separate Repo","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-problem","level":2,"title":"The Problem","text":"

    ctx files contain project-specific decisions, learnings, conventions, and tasks. By default, they live in .context/ inside the project tree, and that works well when the context can be public.

    But sometimes you need the context outside the project:

    • Open-source projects with private context: Your architectural notes, internal task lists, and scratchpad entries shouldn't ship with the public repo.
    • Compliance or IP concerns: Context files reference sensitive design rationale that belongs in a separate access-controlled repository.
    • Personal preference: You want a single context repo that covers multiple projects, or you just prefer keeping notes separate from code.

    ctx supports this through three configuration methods. This recipe shows how to set them up and how to tell your AI assistant where to find the context.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tldr","level":2,"title":"TL;DR","text":"

    First --allow-outside-cwd in your project:

    mkdir ~/repos/myproject-context && cd ~/repos/myproject-context && git init\ncd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context --allow-outside-cwd init\n

    Then, create a .ctxrc in your project root to specify the new .context folder location:

    context_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

    All ctx commands now use the external directory automatically.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context directory --context-dir Global flag Point ctx at a non-default directory --allow-outside-cwd Global flag Permit context outside the project root .ctxrc Config file Persist the context directory setting CTX_DIR Env variable Override context directory per-session /ctx-status Skill Verify context is loading correctly","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-1-create-the-private-context-repo","level":3,"title":"Step 1: Create the Private Context Repo","text":"

    Create a separate repository for your context files. This can live anywhere: a private GitHub repo, a shared drive, a sibling directory:

    # Create the context repo\nmkdir ~/repos/myproject-context\ncd ~/repos/myproject-context\ngit init\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-2-initialize-ctx-pointing-at-it","level":3,"title":"Step 2: Initialize ctx Pointing at It","text":"

    From your project root, initialize ctx with --context-dir pointing to the external location. Because the directory is outside your project tree, you also need --allow-outside-cwd:

    cd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context \\\n    --allow-outside-cwd \\\n    init\n

    This creates the full .context/-style file set inside ~/repos/myproject-context/ instead of ~/repos/myproject/.context/.

    Boundary Validation

    ctx validates that the .context directory is within the current working directory.

    If your external directory is truly outside the project root:

    • Either every ctx command needs --allow-outside-cwd,
    • or you can persist the setting in .ctxrc (next step).
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-3-make-it-stick","level":3,"title":"Step 3: Make It Stick","text":"

    Typing --context-dir and --allow-outside-cwd on every command is tedious. Pick one of these methods to make the configuration permanent.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-a-ctxrc-recommended","level":4,"title":"Option A: .ctxrc (Recommended)","text":"

    Create a .ctxrc file in your project root:

    # .ctxrc: committed to the project repo\ncontext_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

    ctx reads .ctxrc automatically. Every command now uses the external directory without extra flags:

    ctx status          # reads from ~/repos/myproject-context\nctx add learning \"Redis MULTI doesn't roll back on error\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    Commit .ctxrc

    .ctxrc belongs in the project repo. It contains no secrets: It's just a path and a boundary override.

    .ctxrc lets teammates share the same configuration.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-b-ctx_dir-environment-variable","level":4,"title":"Option B: CTX_DIR Environment Variable","text":"

    Good for CI pipelines, temporary overrides, or when you don't want to commit a .ctxrc:

    # In your shell profile (~/.bashrc, ~/.zshrc)\nexport CTX_DIR=~/repos/myproject-context\n

    Or for a single session:

    CTX_DIR=~/repos/myproject-context ctx status\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-c-shell-alias","level":4,"title":"Option C: Shell Alias","text":"

    If you prefer a shell alias over .ctxrc:

    # ~/.bashrc or ~/.zshrc\nalias ctx='ctx --context-dir ~/repos/myproject-context --allow-outside-cwd'\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#priority-order","level":4,"title":"Priority Order","text":"

    When multiple methods are set, ctx resolves the context directory in this order (highest priority first):

    1. --context-dir flag
    2. CTX_DIR environment variable
    3. context_dir in .ctxrc
    4. Default: .context/
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-4-agent-auto-discovery-via-bootstrap","level":3,"title":"Step 4: Agent Auto-Discovery via Bootstrap","text":"

    When context lives outside the project tree, your AI assistant needs to know where to find it. The ctx system bootstrap command resolves the configured context directory and communicates it to the agent automatically:

    $ ctx system bootstrap\nctx system bootstrap\n====================\n\ncontext_dir: /home/user/repos/myproject-context\n\nFiles:\n  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...\n

    The CLAUDE.md template generated by ctx init already instructs the agent to run ctx system bootstrap at session start. Because .ctxrc is in the project root, your agent inherits the external path automatically via the ctx system bootstrap call instruction.

    Here is the relevant section from CLAUDE.md for reference:

    <!-- CLAUDE.md -->\n1. **Run `ctx system bootstrap`**: CRITICAL, not optional.\n   This tells you where the context directory is. If it fails or returns\n   no context_dir, STOP and warn the user.\n

    Moreover, every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: /home/user/repos/myproject-context footer, so the agent remains anchored to the correct directory even in long sessions.

    If you use CTX_DIR instead of .ctxrc, export it in your shell profile so the hook process inherits it:

    export CTX_DIR=~/repos/myproject-context\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-5-share-with-teammates","level":3,"title":"Step 5: Share with Teammates","text":"

    Teammates clone both repos and set up .ctxrc:

    # Clone the project\ngit clone git@github.com:org/myproject.git\ncd myproject\n\n# Clone the private context repo\ngit clone git@github.com:org/myproject-context.git ~/repos/myproject-context\n

    If .ctxrc is already committed to the project, they're done: ctx commands will find the external context automatically.

    If teammates use different paths, each developer sets their own CTX_DIR:

    export CTX_DIR=~/my-own-path/myproject-context\n

    For encryption key distribution across the team, see the Syncing Scratchpad Notes recipe.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-6-day-to-day-sync","level":3,"title":"Step 6: Day-to-Day Sync","text":"

    The external context repo has its own git history. Treat it like any other repo: Commit and push after sessions:

    cd ~/repos/myproject-context\n\n# After a session\ngit add -A\ngit commit -m \"Session: refactored auth module, added rate-limit learning\"\ngit push\n

    Your AI assistant can do this too. When ending a session:

    You: \"Save what we learned and push the context repo.\"\n\nAgent: [runs ctx add learning, then commits and pushes the context repo]\n

    You can also set up a post-session habit: project code gets committed to the project repo, context gets committed to the context repo.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember the flags; simply ask your assistant:

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#set-up-your-system-using-natural-language","level":3,"title":"Set Up Your System Using Natural Language","text":"
    You: \"Set up ctx to use ~/repos/myproject-context as the context directory.\"\n\nAgent: \"I'll create a .ctxrc in the project root pointing to that path.\n       I'll also update CLAUDE.md so future sessions know where to find\n       context. Want me to initialize the context files there too?\"\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#configure-separate-repo-for-context-folder-using-natural-language","level":3,"title":"Configure Separate Repo for .context Folder Using Natural Language","text":"
    You: \"My context is in a separate repo. Can you load it?\"\n\nAgent: [reads .ctxrc, finds the path, loads context from the external dir]\n       \"Loaded. You have 3 pending tasks, last session was about the auth\n       refactor.\"\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tips","level":2,"title":"Tips","text":"
    • Start simple. If you don't need external context yet, don't set it up. The default .context/ in-tree is the easiest path. Move to an external repo when you have a concrete reason.
    • One context repo per project. Sharing a single context directory across multiple projects creates confusion. Keep the mapping 1:1.
    • Use .ctxrc over env vars when the path is stable. It's committed, documented, and works for the whole team without per-developer shell setup.
    • Don't forget the boundary flag. The most common error is Error: context directory is outside the project root. Set allow_outside_cwd: true in .ctxrc or pass --allow-outside-cwd.
    • Commit both repos at session boundaries. Context without code history (or code without context history) loses half the value.
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#next-up","level":2,"title":"Next Up","text":"

    The Complete Session →: Walk through a full ctx session from start to finish.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#see-also","level":2,"title":"See Also","text":"
    • Setting Up ctx Across AI Tools: initial setup recipe
    • Syncing Scratchpad Notes Across Machines: distribute encryption keys when context is shared
    • CLI Reference: all global flags including --context-dir and --allow-outside-cwd
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/guide-your-agent/","level":1,"title":"Guide Your Agent","text":"

    Commands vs. Skills

    Commands (ctx status, ctx add task) run in your terminal.

    Skills (/ctx-reflect, /ctx-next) run inside your AI coding assistant.

    Recipes combine both.

    Think of commands as structure and skills as behavior.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#proactive-behavior","level":2,"title":"Proactive Behavior","text":"

    These recipes show explicit commands and skills, but agents trained on the ctx playbook are proactive: They offer to save learnings after debugging, record decisions after trade-offs, create follow-up tasks after completing work, and suggest what to work on next.

    Your questions train the agent. Asking \"what have we learned?\" or \"is our context clean?\" does two things:

    • It triggers the workflow right now,
    • and it reinforces the pattern.

    The more you guide, the more the agent habituates the behavior and begins offering on its own.

    Each recipe includes a Conversational Approach section showing these natural-language patterns.

    Tip

    Don't wait passively for proactive behavior: especially in early sessions.

    Ask, guide, reinforce. Over time, you ask less and the agent offers more.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#next-up","level":2,"title":"Next Up","text":"

    Setup Across AI Tools →: Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle from start to finish
    • Prompting Guide: general tips for working effectively with AI coding assistants
    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/hook-output-patterns/","level":1,"title":"Hook Output Patterns","text":"","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code hooks can output text, JSON, or nothing at all. But the format of that output determines who sees it and who acts on it.

    Choose the wrong pattern, and your carefully crafted warning gets silently absorbed by the agent, or your agent-directed nudge gets dumped on the user as noise.

    This recipe catalogs the known hook output patterns and explains when to use each one.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#tldr","level":2,"title":"TL;DR","text":"

    Eight patterns from full control to full invisibility:

    • hard gate (exit 2),
    • VERBATIM relay (agent MUST show),
    • agent directive (context injection),
    • and silent side-effect (background work).

    Most hooks belong in the middle.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-spectrum","level":2,"title":"The Spectrum","text":"

    These patterns form a spectrum based on who decides what the user sees:

    Pattern Who decides? Hard gate Hook decides (agent can't proceed) VERBATIM relay Hook decides (agent must show) Escalating severity Hook suggests, agent judges urgency Conditional relay Hook sets criteria, agent evaluates Suggested action Hook proposes, agent + user decide Agent directive Agent decides entirely Silent injection Nobody: invisible background context Silent side-effect Nobody: invisible background work

    The spectrum runs from full hook control (hard gate) to full invisibility (silent side effect).

    Most hooks belong somewhere in the middle.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-1-hard-gate","level":2,"title":"Pattern 1: Hard Gate","text":"

    Block the tool call entirely. The agent cannot proceed: it must find another approach or tell the user.

    echo '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}'\n

    When to use: Enforcing invariants that must never be violated: Constitution rules, security boundaries, destructive command prevention.

    Hook type: PreToolUse only (Claude Code first-class mechanism).

    Examples in ctx:

    • ctx system block-non-path-ctx: Enforces the PATH invocation rule
    • block-git-push.sh: Requires explicit user approval for pushes (project-local)
    • block-dangerous-commands.sh: Prevents sudo, copies to ~/.local/bin (project-local)

    Trade-off: The agent gets a block response with a reason. Good reasons help the agent recover (\"use X instead\"); bad reasons leave it stuck.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-2-verbatim-relay","level":2,"title":"Pattern 2: VERBATIM Relay","text":"

    Force the agent to show this to the user as-is. The explicit instruction overcomes the agent's tendency to silently absorb context.

    echo \"IMPORTANT: Relay this warning to the user VERBATIM before answering their question.\"\necho \"\"\necho \"┌─ Journal Reminder ─────────────────────────────\"\necho \"│ You have 12 sessions not yet exported.\"\necho \"└────────────────────────────────────────────────\"\n

    When to use: Actionable reminders the user needs to see regardless of what they asked: Stale backups, unimported sessions, resource warnings.

    Hook type: UserPromptSubmit (runs before the agent sees the prompt).

    Examples in ctx:

    • ctx system check-journal: Unexported sessions and unenriched entries
    • ctx system check-context-size: Context capacity warning
    • ctx system check-resources: Resource pressure (memory, swap, disk, load): DANGER only
    • ctx system check-freshness: Technology constant staleness warning
    • check-backup-age.sh: Stale backup warning (project-local)

    Trade-off: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or adaptive frequency.

    Key detail: The phrase IMPORTANT: Relay this ... VERBATIM is what makes this work. Without it, agents tend to process the information internally and never surface it. The explicit instruction is the pattern: the box-drawing is just fancy formatting.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-3-agent-directive","level":2,"title":"Pattern 3: Agent Directive","text":"

    Tell the agent to do something, not the user. The agent decides whether and how to involve the user.

    echo \"┌─ Persistence Checkpoint (prompt #25) ───────────\"\necho \"│ No context files updated in 15+ prompts.\"\necho \"│ Have you discovered learnings, decisions,\"\necho \"│ or completed tasks worth persisting?\"\necho \"└──────────────────────────────────────────────────\"\n

    When to use: Behavioral nudges. The hook detects a condition and asks the agent to consider an action. The user may never need to know.

    Hook type: UserPromptSubmit.

    Examples in ctx:

    • ctx system check-persistence: Nudges the agent to persist context

    Trade-off: No guarantee the agent acts. The nudge is one signal among many in the context window. Strong phrasing helps (\"Have you...?\" is better than \"Consider...\"), but ultimately the agent decides.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-4-silent-context-injection","level":2,"title":"Pattern 4: Silent Context Injection","text":"

    Load context with no visible output. The agent gets enriched without either party noticing.

    ctx agent --budget 4000 >/dev/null || true\n

    When to use: Background context loading that should be invisible. The agent benefits from the information, but neither it, nor the user needs to know it happened.

    Hook type: PreToolUse with .* matcher (runs on every tool call).

    Examples in ctx:

    • The ctx agent PreToolUse hook: injects project context silently

    Trade-off: Adds latency to every tool call. Keep the injected content small and fast to generate.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-5-silent-side-effect","level":2,"title":"Pattern 5: Silent Side-Effect","text":"

    Do work, produce no output: Housekeeping that needs no acknowledgment.

    find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

    When to use: Cleanup, log rotation, temp file management. Anything where the action is the point and nobody needs to know it happened.

    Hook type: Any hook where output is irrelevant.

    Examples in ctx:

    • Log rotation, marker file cleanup, state directory maintenance

    Trade-off: None, if the action is truly invisible. If it can fail in a way that matters, consider logging.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-6-conditional-relay","level":3,"title":"Pattern 6: Conditional Relay","text":"

    Tell the agent to relay only if a condition holds in context.

    echo \"If the user's question involves modifying .context/ files,\"\necho \"relay this warning VERBATIM:\"\necho \"\"\necho \"┌─ Context Integrity ─────────────────────────────\"\necho \"│ CONSTITUTION.md has not been verified in 7 days.\"\necho \"└────────────────────────────────────────────────\"\necho \"\"\necho \"Otherwise, proceed normally.\"\n

    When to use: Warnings that only matter in certain contexts. Avoids noise when the user is doing unrelated work.

    Trade-off: Depends on the agent's judgment about when the condition holds. More fragile than VERBATIM relay, but less noisy.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-7-suggested-action","level":3,"title":"Pattern 7: Suggested Action","text":"

    Give the agent a specific command to propose to the user.

    echo \"┌─ Stale Dependencies ──────────────────────────\"\necho \"│ go.sum is 30+ days newer than go.mod.\"\necho \"│ Suggested: run \\`go mod tidy\\`\"\necho \"│ Ask the user before proceeding.\"\necho \"└───────────────────────────────────────────────\"\n

    When to use: The hook detects a fixable condition and knows the fix. Goes beyond a nudge: Gives the agent a concrete next step. The agent still asks for permission but knows exactly what to propose.

    Trade-off: The suggestion might be wrong or outdated. The \"ask the user before proceeding\" part is critical.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-8-escalating-severity","level":3,"title":"Pattern 8: Escalating Severity","text":"

    Different urgency tiers with different relay expectations.

    # INFO: agent processes silently, mentions if relevant\necho \"INFO: Last test run was 3 days ago.\"\n\n# WARN: agent should mention to user at next natural pause\necho \"WARN: 12 uncommitted changes across 3 branches.\"\n\n# CRITICAL: agent must relay immediately, before any other work\necho \"CRITICAL: Relay VERBATIM before answering. Disk usage at 95%.\"\n

    When to use: When you have multiple hooks producing output and need to avoid overwhelming the user. INFO gets absorbed, WARN gets mentioned, CRITICAL interrupts.

    Examples in ctx:

    • ctx system check-resources: Uses two tiers (WARNING/DANGER) internally but only fires the VERBATIM relay at DANGER level: WARNING is silent. See ctx system for the user-facing command that shows both tiers.

    Trade-off: Requires agent training or convention to recognize the tiers. Without a shared protocol, the prefixes are just text.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#choosing-a-pattern","level":2,"title":"Choosing a Pattern","text":"
    Is the agent about to do something forbidden?\n  └─ Yes → Hard gate\n\nDoes the user need to see this regardless of what they asked?\n  └─ Yes → VERBATIM relay\n  └─ Sometimes → Conditional relay\n\nShould the agent consider an action?\n  └─ Yes, with a specific fix → Suggested action\n  └─ Yes, open-ended → Agent directive\n\nIs this background context the agent should have?\n  └─ Yes → Silent injection\n\nIs this housekeeping?\n  └─ Yes → Silent side-effect\n
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#design-tips","level":2,"title":"Design Tips","text":"

    Throttle aggressively: VERBATIM relays that fire every prompt will be ignored or resented. Use once-per-day markers (touch $REMINDED), adaptive frequency (every Nth prompt), or staleness checks (only fire if condition persists).

    Include actionable commands: \"You have 12 unimported sessions\" is less useful than \"You have 12 unimported sessions. Run: ctx journal import --all.\" Give the user (or agent) the exact next step.

    Use box-drawing for visual structure: The ┌─ ─┐ │ └─ ─┘ pattern makes hook output visually distinct from agent prose. It also signals \"this is machine-generated, not agent opinion.\"

    Test the silence path: Most hook runs should produce no output (the condition isn't met). Make sure the common case is fast and silent.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

    Lessons from 19 days of hook debugging in ctx. Every one of these was encountered, debugged, and fixed in production.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#silent-misfire-wrong-key-name","level":3,"title":"Silent Misfire: Wrong Key Name","text":"
    { \"PreToolUseHooks\": [ ... ] }\n

    The key is PreToolUse, not PreToolUseHooks. Claude Code validates silently: A misspelled key means the hook is ignored with no error. Always test with a debug echo first to confirm the hook fires before adding real logic.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#json-escaping-breaks-shell-commands","level":3,"title":"JSON Escaping Breaks Shell Commands","text":"

    Go's json.Marshal escapes >, <, and & as Unicode sequences (\\u003e) by default. This breaks shell commands in generated config:

    \"command\": \"ctx agent 2\\u003e/dev/null\"\n

    Fix: use json.Encoder with SetEscapeHTML(false) when generating hook configuration.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#stdin-not-environment-variables","level":3,"title":"stdin, Not Environment Variables","text":"

    Hook input arrives as JSON via stdin, not environment variables:

    # Wrong:\nCOMMAND=\"$CLAUDE_TOOL_INPUT\"\n\n# Right:\nHOOK_INPUT=$(cat)\nCOMMAND=$(echo \"$HOOK_INPUT\" | jq -r '.tool_input.command // empty')\n
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#regex-overfitting","level":3,"title":"Regex Overfitting","text":"

    A regex meant to catch ctx as a binary will also match ctx as a directory component:

    # Too broad: blocks: git -C /home/jose/WORKSPACE/ctx status\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# Narrow to binary only:\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n

    Test hook regexes against paths that contain the target string as a substring, not just as the final component.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#repetition-fatigue","level":3,"title":"Repetition Fatigue","text":"

    Injecting context on every tool call sounds safe. In practice, after seeing the same context injection fifteen times, the agent treats it as background noise: Conventions stated in the injected context get violated because salience has been destroyed by repetition.

    Fix: cooldowns. ctx agent --session $PPID --cooldown 10m injects at most once per ten minutes per session using a tombstone file in /tmp/. This is not an optimization; it is a correction for a design flaw. Every injection consumes attention budget: 50 tool calls at 4,000 tokens each means 200,000 tokens of repeated context, most of it wasted.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#hardcoded-paths","level":3,"title":"Hardcoded Paths","text":"

    A username rename (parallels to jose) broke every hook at once. Use $CLAUDE_PROJECT_DIR instead of absolute paths:

    \"command\": \"\\\"$CLAUDE_PROJECT_DIR\\\"/.claude/hooks/block-git-push.sh\"\n

    If the platform provides a runtime variable for paths, always use it.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#next-up","level":2,"title":"Next Up","text":"

    Webhook Notifications →: Get push notifications when loops complete, hooks fire, or agents hit milestones.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#see-also","level":2,"title":"See Also","text":"
    • Customizing Hook Messages: override what hooks say without changing what they do
    • Claude Code Permission Hygiene: how permissions and hooks work together
    • Defense in Depth: why hooks matter for agent security
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/","level":1,"title":"Hook Sequence Diagrams","text":"","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#hook-lifecycle","level":2,"title":"Hook Lifecycle","text":"

    This page documents the ctx system hooks — the built-in ctx system * subcommands that Claude Code invokes via .claude/hooks.json at lifecycle events. These are owned by ctx itself, not authored by users.

    Not to Be Confused with ctx trigger

    ctx has three distinct hook-like layers:

    • ctx system hooks (this page) — built-in, owned by ctx, wired into Claude Code via internal/assets/claude/hooks/hooks.json.
    • ctx trigger — user-authored shell scripts in .context/hooks/<type>/*.sh. See ctx trigger reference and the trigger authoring recipe.
    • Claude Code hooks configured directly in .claude/settings.local.json — tool-specific, not portable across AI tools.

    This page is only about the first category.

    Every ctx system hook is a Go binary invoked by Claude Code at one of three lifecycle events: PreToolUse (before a tool runs, can block), PostToolUse (after a tool completes), or UserPromptSubmit (on every user prompt, before any tools run). Hooks receive JSON on stdin and emit JSON or plain text on stdout.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#pretooluse-hooks","level":2,"title":"PreToolUse Hooks","text":"

    These fire before a tool executes. They can block, gate, or inject context.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#context-load-gate","level":3,"title":"Context-Load-Gate","text":"

    Matcher: .* (all tools)

    Injects the full context packet on first tool use of a session. One-shot per session.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as context-load-gate\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Git as git log\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized\n    alt not initialized\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check ctx-loaded-{session} marker\n    alt marker exists\n        Hook-->>CC: (silent exit, already fired)\n    end\n    Hook->>State: Create marker (one-shot guard)\n    Hook->>State: Prune stale session files\n    loop Each file in ReadOrder\n        alt GLOSSARY or TASK\n            Note over Hook: Skip (Task mentioned in footer only)\n        else DECISION or LEARNING\n            Hook->>Ctx: Extract index table only\n        else other files\n            Hook->>Ctx: Read full content\n        end\n        Hook->>Hook: Estimate tokens per file\n    end\n    Hook->>Git: Detect changes since last session\n    Hook->>Hook: Build injection (files + changes + token counts)\n    Hook-->>CC: JSON {additionalContext: injection}\n    Hook->>Hook: Send webhook (metadata only)\n    Hook->>State: Write oversize flag if tokens > threshold
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-non-path-ctx","level":3,"title":"Block-Non-Path-ctx","text":"

    Matcher: Bash

    Blocks ./ctx, go run ./cmd/ctx, or absolute-path ctx invocations. Constitutionally enforced.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-non-path-ctx\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Test regex: relative-path, go-run, absolute-path\n    alt no match\n        Hook-->>CC: (silent exit)\n    end\n    alt absolute-path + test exception\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason + constitution suffix}\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#qa-reminder","level":3,"title":"Qa-Reminder","text":"

    Matcher: Bash

    Gate nudge before any git command. Reminds agent to lint/test.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as qa-reminder\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check command contains \"git\"\n    alt no git command\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, gate, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: QA gate}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#specs-nudge","level":3,"title":"Specs-Nudge","text":"

    Matcher: EnterPlanMode

    Nudges agent to save plans/specs when new implementation detected.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as specs-nudge\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: specs nudge}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#posttooluse-hooks","level":2,"title":"PostToolUse Hooks","text":"

    These fire after a tool completes. They observe, nudge, and track state.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#post-commit","level":3,"title":"Post-Commit","text":"

    Matcher: Bash

    Fires after git commit (not amend). Nudges for context capture and checks version drift.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as post-commit\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"git commit\"?\n    alt not a git commit\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"--amend\"?\n    alt is amend\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: post-commit nudge}\n    Hook->>Hook: Relay(message)\n    Hook->>Hook: CheckVersionDrift()
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-task-completion","level":3,"title":"Check-Task-Completion","text":"

    Matcher: Edit, Write

    Configurable-interval nudge after edits. Per-session counter resets after firing.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-task-completion\n    participant State as .context/state/\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read task nudge interval\n    alt interval <= 0 (disabled)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read per-session counter\n    Hook->>Hook: Increment counter\n    alt counter < interval\n        Hook->>State: Write counter\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Reset counter to 0\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: JSON {additionalContext: task nudge}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#userpromptsubmit-hooks","level":2,"title":"UserPromptSubmit Hooks","text":"

    These fire on every user prompt, before any tools run. They perform health checks, track state, and nudge for housekeeping.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-context-size","level":3,"title":"Check-Context-Size","text":"

    Adaptive context window monitoring. Fires checkpoints, window warnings, and billing alerts based on prompt count and token usage.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-context-size\n    participant State as .context/state/\n    participant Session as Session JSONL\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized\n    Hook->>Hook: Read input, resolve session ID\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: Pause acknowledgment message\n    end\n    Hook->>State: Increment session prompt counter\n    Hook->>Session: Read token info (tokens, model, window)\n\n    rect rgb(255, 240, 240)\n        Note over Hook: Billing check (independent, never suppressed)\n        alt tokens >= billing threshold (one-shot)\n            Hook->>Tpl: LoadMessage(hook, billing, vars)\n            Hook-->>CC: Billing warning nudge box\n            Hook->>Hook: NudgeAndRelay(billing message)\n        end\n    end\n\n    Hook->>State: Check wrap-up marker\n    alt wrapped up recently (< 2h)\n        Hook->>State: Write stats (event: suppressed)\n        Hook-->>CC: (silent exit)\n    end\n\n    rect rgb(240, 248, 255)\n        Note over Hook: Adaptive frequency check\n        alt count > 30 and count % 3 == 0\n            Note over Hook: High frequency trigger\n        else count > 15 and count % 5 == 0\n            Note over Hook: Medium frequency trigger\n        else\n            Hook->>State: Write stats (event: silent)\n            Hook-->>CC: (silent exit)\n        end\n    end\n\n    alt context window >= 80%\n        Hook->>Tpl: LoadMessage(hook, window, vars)\n        Hook-->>CC: Window warning nudge box\n        Hook->>Hook: NudgeAndRelay(window message)\n    else checkpoint trigger\n        Hook->>Tpl: LoadMessage(hook, checkpoint)\n        Hook-->>CC: Checkpoint nudge box\n        Hook->>Hook: NudgeAndRelay(checkpoint message)\n    end\n    Hook->>State: Write session stats
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-ceremonies","level":3,"title":"Check-Ceremonies","text":"

    Daily check for /ctx-remember and /ctx-wrap-up usage in recent journal entries.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-ceremonies\n    participant State as .context/state/\n    participant Journal as Journal files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Read recent files (lookback window)\n    alt no journal files\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Scan for /ctx-remember and /ctx-wrap-up\n    alt both ceremonies present\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Note over Hook: variant: both | remember | wrapup\n    Hook-->>CC: Nudge box (missing ceremonies)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-freshness","level":3,"title":"Check-Freshness","text":"

    Daily check for technology-dependent constants that may need review.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-freshness\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Stat tracked files (5 source files)\n    alt all files modified within 6 months\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {StaleFiles})\n    Hook-->>CC: Nudge box (stale file list + review URL)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-journal","level":3,"title":"Check-Journal","text":"

    Daily check for unimported sessions and unenriched journal entries.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-journal\n    participant State as .context/state/\n    participant Journal as Journal dir\n    participant Claude as Claude projects dir\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Check dir exists\n    Hook->>Claude: Check dir exists\n    alt either dir missing\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Get newest entry mtime\n    Hook->>Claude: Count .jsonl files newer than journal\n    Hook->>Journal: Count unenriched entries\n    alt unimported == 0 and unenriched == 0\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, {counts})\n    Note over Hook: variant: both | unimported | unenriched\n    Hook-->>CC: Nudge box (counts)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-knowledge","level":3,"title":"Check-Knowledge","text":"

    Daily check for knowledge file entry/line counts exceeding configured thresholds.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-knowledge\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read thresholds (decisions, learnings, conventions)\n    alt all thresholds disabled (0)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Ctx: Parse DECISIONS.md entry count\n    Hook->>Ctx: Parse LEARNINGS.md entry count\n    Hook->>Ctx: Count CONVENTIONS.md lines\n    Hook->>Hook: Compare against thresholds\n    alt all within limits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {FileWarnings})\n    Hook-->>CC: Nudge box (file warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-map-staleness","level":3,"title":"Check-Map-Staleness","text":"

    Daily check for architecture map age and relevant code changes.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-map-staleness\n    participant State as .context/state/\n    participant Tracking as map-tracking.json\n    participant Git as git log\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tracking: Read map-tracking.json\n    alt missing, invalid, or opted out\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Parse LastRun date\n    alt map not stale (< N days)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Git: Count commits touching internal/ since LastRun\n    alt no relevant commits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {date, count})\n    Hook-->>CC: Nudge box (last refresh + commit count)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-memory-drift","level":3,"title":"Check-Memory-Drift","text":"

    Per-session check for MEMORY.md changes since last sync.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-memory-drift\n    participant State as .context/state/\n    participant Mem as memory.Discover\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check session tombstone\n    alt already nudged this session\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: DiscoverMemoryPath(projectRoot)\n    alt auto memory not active\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: HasDrift(contextDir, sourcePath)\n    alt no drift\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: Nudge box (drift reminder)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch session tombstone
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-persistence","level":3,"title":"Check-Persistence","text":"

    Tracks context file modification and nudges when edits happen without persisting context. Adaptive threshold based on prompt count.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-persistence\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read persistence state {Count, LastNudge, LastMtime}\n    alt first prompt (no state)\n        Hook->>State: Initialize state {Count:1, LastNudge:0, LastMtime:now}\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Increment Count\n    Hook->>Ctx: Get current context mtime\n    alt context modified since LastMtime\n        Hook->>State: Reset LastNudge = Count, update LastMtime\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: sinceNudge = Count - LastNudge\n    Hook->>Hook: PersistenceNudgeNeeded(Count, sinceNudge)?\n    alt threshold not reached\n        Hook->>State: Write state\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, vars)\n    Hook-->>CC: Nudge box (prompt count, time since last persist)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Update LastNudge = Count, write state
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-reminders","level":3,"title":"Check-Reminders","text":"

    Per-prompt check for due reminders. No throttle.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-reminders\n    participant Store as Reminders store\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Store: ReadReminders()\n    alt load error\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter by due date (After <= today)\n    alt no due reminders\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, reminders, {list})\n    Hook-->>CC: Nudge box (reminder list + dismiss hints)\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-resources","level":3,"title":"Check-Resources","text":"

    Checks system resources (memory, swap, disk, load). Fires on every prompt. No initialization required.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-resources\n    participant Sys as sysinfo\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: HookPreamble (parse input, check pause)\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Sys: Collect snapshot (memory, swap, disk, load)\n    Hook->>Sys: Evaluate thresholds per metric\n    alt max severity < Danger\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter alerts to Danger level only\n    Hook->>Hook: Build alertMessages from danger alerts\n    Hook->>Tpl: LoadMessage(hook, alert, {alertMessages}, fallback)\n    Hook-->>CC: Nudge box (danger alerts)\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-version","level":3,"title":"Check-Version","text":"

    Daily binary-vs-plugin version comparison with piggybacked key rotation check.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-version\n    participant State as .context/state/\n    participant Config as Binary + Plugin version\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read binary version\n    alt dev build\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read plugin version\n    alt plugin version not found or parse error\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Compare major.minor\n    alt versions match\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, mismatch, {versions})\n    Hook-->>CC: Nudge box (version mismatch)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle\n    Hook->>Hook: CheckKeyAge() (piggybacked)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#heartbeat","level":3,"title":"Heartbeat","text":"

    Silent per-prompt pulse. Tracks prompt count, context modification, and token usage. The agent never sees this hook's output.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as heartbeat\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Notify as Webhook + EventLog\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Increment heartbeat counter\n    Hook->>Ctx: Get latest context file mtime\n    Hook->>State: Compare with last recorded mtime\n    Hook->>State: Update mtime record\n    Hook->>State: Read session token info\n    Hook->>Notify: Send heartbeat notification\n    Hook->>Notify: Append to event log\n    Hook->>State: Write heartbeat log entry\n    Note over Hook: No stdout - agent never sees this
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#project-local-hooks","level":2,"title":"Project-Local Hooks","text":"

    These hooks are configured in settings.local.json and are not shipped with ctx. They are specific to individual developer setups.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-dangerous-commands","level":3,"title":"Block-Dangerous-Commands","text":"

    Lifecycle: PreToolUse. Matcher: Bash

    Blocks dangerous shell patterns (sudo, git push, cp to bin). No initialization or pause checks: always active.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-dangerous-commands\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Note over Hook: Cascade: first matching regex wins\n    Hook->>Hook: Test MidSudo regex\n    alt match\n        Hook->>Hook: variant = sudo\n    end\n    Hook->>Hook: Test MidGitPush regex (if no variant)\n    alt match\n        Hook->>Hook: variant = git-push\n    end\n    Hook->>Hook: Test CpMvToBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = cp-to-bin\n    end\n    Hook->>Hook: Test InstallToLocalBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = install-to-bin\n    end\n    alt no variant matched\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason}\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-backup-age","level":3,"title":"Check-Backup-Age","text":"

    Lifecycle: UserPromptSubmit.

    Daily check for SMB mount and backup freshness.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-backup-age\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Check SMB mount (if env var set)\n    Hook->>FS: Check backup marker file age\n    alt no warnings\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {Warnings})\n    Hook-->>CC: Nudge box (warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#throttling-summary","level":2,"title":"Throttling Summary","text":"Hook Lifecycle Throttle Type Scope context-load-gate PreToolUse One-shot marker Per session block-non-path-ctx PreToolUse None Every match qa-reminder PreToolUse None Every git command specs-nudge PreToolUse None Every prompt post-commit PostToolUse None Every git commit check-task-completion PostToolUse Configurable interval Per session check-context-size UserPromptSubmit Adaptive counter Per session check-ceremonies UserPromptSubmit Daily marker Once per day check-freshness UserPromptSubmit Daily marker Once per day check-journal UserPromptSubmit Daily marker Once per day check-knowledge UserPromptSubmit Daily marker Once per day check-map-staleness UserPromptSubmit Daily marker Once per day check-memory-drift UserPromptSubmit Session tombstone Once per session check-persistence UserPromptSubmit Adaptive counter Per session check-reminders UserPromptSubmit None Every prompt check-resources UserPromptSubmit None Every prompt check-version UserPromptSubmit Daily marker Once per day heartbeat UserPromptSubmit None Every prompt block-dangerous-commands PreToolUse * None Every match check-backup-age UserPromptSubmit * Daily marker Once per day

    * Project-local hook (settings.local.json), not shipped with ctx.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#state-file-reference","level":2,"title":"State File Reference","text":"

    All state files live in .context/state/.

    File Pattern Hook Purpose ctx-loaded-{session} context-load-gate One-shot injection marker ctx-paused-{session} (all) Session pause marker ctx-wrapped-up check-context-size Suppress nudges after wrap-up (2h expiry) freshness-checked check-freshness Daily throttle backup-reminded check-backup-age Daily throttle ceremony-reminded check-ceremonies Daily throttle journal-reminded check-journal Daily throttle knowledge-reminded check-knowledge Daily throttle map-staleness-reminded check-map-staleness Daily throttle version-checked check-version Daily throttle memory-drift-nudged-{session} check-memory-drift Per-session tombstone ctx-context-count-{session} check-context-size Prompt counter stats-{session}.jsonl check-context-size Session stats log persist-{session} check-persistence Counter + mtime state ctx-task-count-{session} check-task-completion Prompt counter heartbeat-count-{session} heartbeat Prompt counter heartbeat-mtime-{session} heartbeat Last context mtime","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hub-cluster/","level":1,"title":"HA Cluster","text":"","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#ctx-hub-high-availability-cluster","level":1,"title":"ctx Hub: High-Availability Cluster","text":"

    Run multiple hub nodes with Raft-based leader election for redundancy. Any follower can take over if the leader dies.

    This recipe assumes you've read the ctx Hub overview and the Multi-machine setup. HA only makes sense in the \"small trusted team\" story — a personal cross-project brain on one workstation does not need three Raft peers.

    Raft-Lite

    ctx uses Raft only for leader election, not for data consensus. Entry replication happens via sequence-based gRPC sync on the append-only JSONL store. This is simpler than full Raft log replication and is possible because the store is append-only and clients are idempotent. The implication: a write accepted by the leader is durable on the leader immediately; followers catch up asynchronously. If the leader crashes between accepting a write and replicating it, that write can be lost. Do not use the hub as a bank ledger.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#topology","level":2,"title":"Topology","text":"

    A minimum HA cluster is three nodes. Two is worse than one — it doubles failure probability without providing quorum.

             +-------------+\n         |  client(s)  |\n         +------+------+\n                |\n    +-----------+-----------+\n    |           |           |\n+---v---+   +---v---+   +---v---+\n| hub A |   | hub B |   | hub C |\n| :9900 |   | :9900 |   | :9900 |\n+-------+   +-------+   +-------+\n    ^           ^           ^\n    +-----------+-----------+\n        Raft (leader election)\n        gRPC (data sync)\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-1-bootstrap-the-first-node","level":2,"title":"Step 1 — Bootstrap the First Node","text":"
    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

    The node starts a Raft election as soon as it sees its peers.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-2-start-the-other-nodes","level":2,"title":"Step 2 — Start the Other Nodes","text":"

    On hub-b.lan:

    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-c.lan:9900\n

    On hub-c.lan:

    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-b.lan:9900\n

    After a few seconds, one node wins the election and becomes the leader. The other two are followers.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-3-verify-cluster-state","level":2,"title":"Step 3 — Verify Cluster State","text":"

    From any node:

    ctx hub status\n

    Expected output:

    role:       leader\npeers:      hub-a.lan:9900 (leader)\n            hub-b.lan:9900 (follower, in-sync)\n            hub-c.lan:9900 (follower, in-sync)\nentries:    1248\nuptime:     3h42m\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-4-register-clients-with-failover-peers","level":2,"title":"Step 4 — Register Clients with Failover Peers","text":"

    When registering a client, give it the full peer list:

    ctx connection register hub-a.lan:9900 \\\n  --token ctx_adm_... \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

    If the leader becomes unreachable, the client reconnects to the next peer. Followers redirect to the current leader, so writes always land on the right node.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#runtime-membership-changes","level":2,"title":"Runtime Membership Changes","text":"

    Add a new peer without downtime:

    ctx hub peer add hub-d.lan:9900\n

    Remove a decommissioned peer:

    ctx hub peer remove hub-c.lan:9900\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#planned-maintenance","level":2,"title":"Planned Maintenance","text":"

    Before taking a leader offline, hand off leadership:

    ssh hub-a.lan 'ctx hub stepdown'\n

    stepdown triggers a new election among the remaining followers before the leader goes offline. In-flight clients briefly pause, then reconnect to the new leader.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#failure-modes-at-a-glance","level":2,"title":"Failure Modes at a Glance","text":"Event What happens Leader crashes New election; clients reconnect to new leader Follower crashes No write impact; catches up on restart Network partition (majority) Majority side keeps serving; minority read-only Network partition (split) No quorum; all nodes read-only Disk full on leader Writes rejected; read traffic continues

    For the full list, see Hub failure modes.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#see-also","level":2,"title":"See Also","text":"
    • Multi-machine recipe — single-node deployment
    • Hub operations — backup and maintenance
    • Hub security model — TLS, tokens
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-getting-started/","level":1,"title":"Getting Started","text":"","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#ctx-hub-getting-started","level":1,"title":"ctx Hub: Getting Started","text":"

    Stand up a single-node ctx Hub on localhost, register two projects, publish a decision from one, and see it appear in the other — all in under five minutes.

    Read This First

    If you haven't already, skim the ctx Hub overview. It explains the mental model, names the two user stories (personal vs small team), and — importantly — lists what the hub does not do. This recipe assumes you already know you want the feature.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-youll-get-out-of-this-recipe","level":2,"title":"What You'll Get out of This Recipe","text":"

    By the end, you will have:

    1. A local hub process running on port 9900.
    2. Two project directories both registered with the ctx Hub.
    3. A decision published from project alpha that appears automatically in project beta's .context/hub/ and in ctx agent --include-hub output.

    Concretely, the payoff this unlocks: a lesson you record in one project becomes visible to your agent the next time you open another project — without touching local files in the second project or opening another editor window.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-this-recipe-does-not-cover","level":2,"title":"What This Recipe Does Not Cover","text":"
    • Sharing .context/journal/, .context/pad, or any other local state. The hub only fans out decision, learning, convention, and task entries. Everything else stays local.
    • Multi-user attribution. The hub identifies projects, not people.
    • Running over a LAN — see Multi-machine setup.
    • Redundancy — see HA cluster.
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"
    • ctx installed and on PATH
    • Two project directories, each already initialized with ctx init
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-1-start-the-hub","level":2,"title":"Step 1 — Start the Hub","text":"

    In a dedicated terminal:

    ctx hub start\n

    On first run, the hub generates an admin token and prints it to stdout. Copy it — you'll need it for each project registration:

    ctx hub listening on :9900\nadmin token: ctx_adm_7f3a1c2d...\ndata dir: ~/.ctx/hub-data/\n

    The admin token is written to ~/.ctx/hub-data/admin.token so you can recover it later. Treat it like a password.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-2-register-the-first-project","level":2,"title":"Step 2 — Register the First Project","text":"
    cd ~/projects/alpha\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\n

    This stores an encrypted connection config in .context/.connect.enc. The admin token is exchanged for a per-project client token; the admin token itself is never persisted in the project.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-3-choose-what-to-receive","level":2,"title":"Step 3 — Choose What to Receive","text":"
    ctx connection subscribe decision learning convention\n

    Only the entry types you subscribe to will be delivered by sync and listen.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-4-publish-a-decision","level":2,"title":"Step 4 — Publish a Decision","text":"

    Either use ctx add --share to write locally and push to the ctx Hub:

    ctx add decision \"Use UTC timestamps everywhere\" --share \\\n  --context \"We had timezone drift between the API and journal\" \\\n  --rationale \"Single source of truth avoids conversion bugs\" \\\n  --consequence \"The UI does conversion at render time\"\n

    Or publish an existing entry directly:

    ctx connection publish decision \"Use UTC timestamps everywhere\"\n
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-5-register-a-second-project-and-sync","level":2,"title":"Step 5 — Register a Second Project and Sync","text":"
    cd ~/projects/beta\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\nctx connection subscribe decision learning convention\nctx connection sync\n

    The decision from alpha now appears in ~/projects/beta/.context/hub/decisions.md with an origin tag and timestamp.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-6-watch-entries-arrive-live","level":2,"title":"Step 6 — Watch Entries Arrive Live","text":"

    Instead of re-running sync, stream new entries as they land:

    ctx connection listen\n

    Leave this running in a terminal; every --share publish from any registered project will appear in .context/hub/ immediately.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-7-feed-shared-knowledge-into-the-agent","level":2,"title":"Step 7 — Feed Shared Knowledge into the Agent","text":"

    Once entries exist in .context/hub/, include them in the agent context packet:

    ctx agent --include-hub\n

    Shared entries are added as a dedicated tier in the budget-aware assembly, scored by recency and type relevance.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#auto-sync-on-session-start","level":2,"title":"Auto-Sync on Session Start","text":"

    After register, the check-hub-sync hook pulls new entries at the start of each session (daily throttled). Most users never need to call ctx connection sync manually.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Multi-machine hub: run the hub on a LAN host and connect from other workstations.
    • HA cluster: Raft-based leader election for high availability.
    • Hub operations: daemon mode, backup, log rotation, JSONL store layout.
    • Hub security model: token lifecycle, encryption at rest, threat model.
    • ctx connect reference and ctx hub start reference.
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-multi-machine/","level":1,"title":"Multi-Machine","text":"","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#ctx-hub-multi-machine","level":1,"title":"ctx Hub: Multi-Machine","text":"

    Run the hub on a LAN host and connect from project directories on other workstations. This recipe is the Story 2 (\"small trusted team\") shape described in the ctx Hub overview — read that first if you haven't, especially the trust-model warnings.

    This recipe assumes you've already walked through Getting Started and understand what flows through the hub (decisions, learnings, conventions, tasks — not journals, scratchpad, or raw context files).

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#topology","level":2,"title":"Topology","text":"
    +------------------+        +------------------+\n| workstation A    |        | workstation B    |\n|  ~/projects/x    |        |  ~/projects/y    |\n|  ctx connection  |        |  ctx connection  |\n+---------+--------+        +---------+--------+\n          |                           |\n          +-----------+   +-----------+\n                      v   v\n              +-------------------+\n              | LAN host \"nexus\"  |\n              | ctx hub start     |\n              | --daemon          |\n              | :9900             |\n              +-------------------+\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-1-start-the-daemon-on-the-lan-host","level":2,"title":"Step 1 — Start the Daemon on the LAN Host","text":"

    On the machine that will hold the hub (call it nexus):

    ctx hub start --daemon --port 9900\n

    The daemon writes a PID file to ~/.ctx/hub-data/hub.pid. Stop it later with:

    ctx hub stop\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-2-firewall-and-port","level":2,"title":"Step 2 — Firewall and Port","text":"

    Open port 9900/tcp on nexus to the LAN only — never expose the hub to the public internet without a reverse proxy and TLS in front of it (see Hub security model).

    Typical LAN allowlist rules:

    firewalldufwnftables
    sudo firewall-cmd --zone=internal \\\n  --add-port=9900/tcp --permanent\nsudo firewall-cmd --reload\n
    sudo ufw allow from 192.168.1.0/24 to any port 9900 proto tcp\n
    sudo nft add rule inet filter input ip saddr 192.168.1.0/24 \\\n  tcp dport 9900 accept\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-3-retrieve-the-admin-token","level":2,"title":"Step 3 — Retrieve the Admin Token","text":"

    The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead:

    cat ~/.ctx/hub-data/admin.token\n

    Copy the token over a trusted channel (SSH, password manager, or an encrypted note). Do not email it or put it in chat.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-4-register-projects-from-each-workstation","level":2,"title":"Step 4 — Register Projects from Each Workstation","text":"

    On workstation A:

    cd ~/projects/x\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

    On workstation B:

    cd ~/projects/y\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

    Each registration exchanges the admin token for a per-project client token. Only the client token is persisted in .context/.connect.enc, encrypted with the same AES-256-GCM scheme ctx uses for notification credentials.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-5-verify","level":2,"title":"Step 5 — Verify","text":"

    From either workstation:

    ctx connection status\n

    You should see the ctx Hub address, role (leader for single-node), subscription filters, and the sequence number you're synced to.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#tls-recommended","level":2,"title":"TLS (Recommended)","text":"

    For anything beyond a trusted home LAN, terminate TLS in front of the hub. The hub speaks gRPC, so the reverse proxy must speak HTTP/2:

    server {\n    listen 443 ssl http2;\n    server_name nexus.example.com;\n\n    ssl_certificate     /etc/letsencrypt/live/nexus.example.com/fullchain.pem;\n    ssl_certificate_key /etc/letsencrypt/live/nexus.example.com/privkey.pem;\n\n    location / {\n        grpc_pass grpc://127.0.0.1:9900;\n    }\n}\n

    Point ctx connection register at the public hostname and port 443.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#handling-daemon-restarts","level":2,"title":"Handling Daemon Restarts","text":"

    The hub is append-only JSONL — restarts are safe. Clients keep their last-seen sequence in .context/hub/.sync-state.json and pick up exactly where they left off on the next sync or listen reconnect.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#see-also","level":2,"title":"See Also","text":"
    • HA cluster recipe — for redundancy
    • Hub operations — backup, rotation
    • Hub failure modes
    • Hub security model
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-overview/","level":1,"title":"Overview","text":"","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#ctx-hub-overview","level":1,"title":"ctx Hub: Overview","text":"

    Start here before the other hub recipes. This page answers what the hub is, who it's for, why you'd run one, and — equally important — what it is not.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#mental-model-in-one-paragraph","level":2,"title":"Mental Model in One Paragraph","text":"

    The hub is a fan-out channel for structured knowledge entries across projects. When you publish a decision, learning, convention, or task with --share, the hub stores it in an append-only log and delivers it to every other project subscribed to that type. The next time your agent loads context in any of those projects, shared entries can be included in the context packet alongside local ones.

    That's the whole feature. It is a project-to-project knowledge bus for a small, curated set of entry types. It is not a shared memory, a shared journal, or a multi-user database.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-flows-through-the-hub","level":2,"title":"What Flows through the Hub","text":"

    Only four entry types:

    Type What it is decision Architectural decisions with rationale learning Gotchas, lessons, surprising behaviors convention Coding patterns and standards task Work items worth sharing across projects

    Each entry is an immutable record with a content blob, the publishing project's name as Origin, a timestamp, and a hub-assigned sequence number. Once published, entries are never rewritten.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-does-not-flow-through-the-hub","level":2,"title":"What Does Not Flow through the Hub","text":"

    This is the part new users get wrong most often:

    • Session journals (~/.claude/ logs, .context/journal/) stay local. The hub does not sync your AI session history.
    • Scratchpad (.context/pad) stays local. Encrypted notes never leave the machine they were written on.
    • Local context files as a whole — TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md — are not mirrored wholesale. Only entries you explicitly --share, or publish later with ctx connection publish, cross the boundary.
    • Anything under .context/ that isn't one of the four entry types above. Configuration, state, logs, memory, journal metadata — all local.

    If you were expecting \"now my agent in project B can see everything my agent did in project A,\" that's not this feature. Local session density still lives on the local machine.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#two-user-stories","level":2,"title":"Two User Stories","text":"

    The hub makes sense in two different shapes. Pick the one that matches your situation — the mechanics are identical but the trust model and threat surface are very different.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-1-personal-cross-project-brain","level":3,"title":"Story 1: Personal Cross-Project Brain","text":"

    One developer, many projects, one hub — usually on localhost.

    You're working across several projects on the same machine (or a handful of machines you own). You want a lesson learned debugging project A to show up when you open project B a week later, without re-discovering it. You want a convention you codified in one project to be visible as-you-type in another.

    Concrete payoff:

    • ctx add learning --share \"...\" in project A → ctx agent --include-hub in project B shows that learning in the next context packet.
    • A decision recorded in your personal \"dotfiles\" project is instantly visible to every other project on your workstation.
    • Cross-project conventions (e.g., \"use UTC timestamps everywhere\") live in one place and propagate.

    Trust model: high — you trust every participant because every participant is you. Run the hub on localhost or on your own LAN, use the default single-node setup, don't worry about TLS.

    Start here: Getting Started for the one-time setup, then Personal cross-project brain for the day-to-day workflow.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-2-small-trusted-team","level":3,"title":"Story 2: Small Trusted Team","text":"

    A few teammates, projects they each own, one hub on a LAN host they all trust.

    Your team has a handful of services and you want a shared \"things we've learned the hard way\" stream. Someone on the platform team records a convention about timestamp handling; everyone else's agents see it the next session. An on-call engineer records a learning from a 3 AM incident; the rest of the team inherits the lesson without needing to read the postmortem.

    Concrete payoff:

    • Team conventions propagate without needing a wiki or chat.
    • Lessons from one team member become available to everyone else's agent context packets automatically.
    • Cross-project decisions (shared libraries, deployment patterns, naming rules) live in a single log the whole team reads.

    Trust model: the hub assumes everyone holding a client token is friendly. There is no per-user attribution you can rely on, Origin is self-asserted by the publishing client, and there is no read ACL beyond the subscription filter. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

    Operational shape: run the hub on a LAN host (or a three-node HA cluster for redundancy), put TLS in front of it for anything beyond a home LAN, distribute client tokens over a trusted channel.

    Start here: Multi-machine setup for the deployment, Team knowledge bus for the day-to-day team workflow, then HA cluster if you need redundancy.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#identity-projects-not-users","level":2,"title":"Identity: Projects, Not Users","text":"

    The hub has no concept of users. Its unit of identity is the project. ctx connection register binds a hub token to a project directory, not to a person. Two developers working on the same project share either:

    • The same .connect.enc, copied between machines over a trusted channel, or
    • Different project names (alpha@laptop-a, alpha@laptop-b), because the hub rejects duplicate registrations of the same project name.

    Either works; neither gives you per-human attribution. If you need \"who wrote this,\" the hub is the wrong tool.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#when-not-to-use-it","level":2,"title":"When Not to Use It","text":"
    • Solo, single-project work. Local .context/ files are enough. The hub adds operational surface for no payoff.
    • Untrusted participants. The hub assumes everyone with a client token is friendly. It is not hardened against hostile insiders or compromised tokens.
    • Compliance-sensitive environments. There is no audit trail that can prove who published what, only which project published what, and Origin is self-asserted.
    • Secrets or PII. Entry content is stored plaintext on the hub and fanned out to every subscribed client. Don't publish anything you wouldn't paste in a team chat.
    • Wholesale journal sharing. See \"what does not flow\" above. If that's what you want, this feature won't provide it — talk to us in the issue tracker about what would.
    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#how-entries-reach-your-agent","level":2,"title":"How Entries Reach Your Agent","text":"

    Once a project is registered and subscribed, entries arrive by three mechanisms:

    1. ctx connection sync — an on-demand pull, replays everything new since the last sequence you saw.
    2. ctx connection listen — a long-lived gRPC stream that writes new entries to .context/hub/ as they arrive.
    3. check-hub-sync hook — runs at session start, daily throttled, so most users never call sync manually.

    Once entries exist in .context/hub/, ctx agent --include-hub adds a dedicated tier to the budget-aware context packet, scored by recency and type relevance. That's the end of the pipeline.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#where-to-go-next","level":2,"title":"Where to Go Next","text":"If you're… Read Trying it for yourself on one machine Getting Started A solo developer using the hub day-to-day Personal cross-project brain Setting up for a small team on a LAN Multi-machine setup A small team using the hub day-to-day Team knowledge bus Running redundant nodes HA cluster Operating a hub in production Operations Assessing the security posture Security model Debugging a hub in trouble Failure modes Just reading the commands ctx connect, ctx serve, ctx hub","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-personal/","level":1,"title":"Personal Cross-Project Brain","text":"","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#personal-cross-project-brain","level":1,"title":"Personal Cross-Project Brain","text":"

    This recipe shows how one developer uses a ctx Hub across their own projects day-to-day — the \"Story 1\" shape from the Hub overview. You're not setting up infrastructure for a team; you're making a lesson you learned last Tuesday in project A automatically surface when you open project B next Thursday.

    Prerequisites: a working ctx Hub on localhost (see Getting Started for the ~5-minute setup). This recipe assumes the hub is already running and you've registered at least two projects.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#the-core-loop","level":2,"title":"The Core Loop","text":"

    Every day, the same three verbs matter:

    1. Record — notice a decision, learning, or convention and capture it with ctx add --share.
    2. Subscribe — every project you care about is subscribed to the types you want delivered (set once with ctx connection subscribe).
    3. Load — your agent picks up shared entries on next session start via the auto-sync hook, or explicitly via ctx agent --include-hub.

    That's the whole workflow. The rest of this recipe fills in the concrete moments where each verb matters.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#a-realistic-day","level":2,"title":"A Realistic Day","text":"

    You have three projects on your workstation:

    • ~/projects/api — a Go service you're actively developing
    • ~/projects/cli — a companion CLI that consumes the API
    • ~/projects/dotfiles — your personal conventions and cross-project learnings

    All three are registered with a single hub running on localhost:9900 (started once at boot, or via a systemd user unit — see Hub operations). All three subscribe to decision, learning, and convention.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#0900-start-work-on-api","level":3,"title":"09:00 — Start Work on api","text":"

    You cd ~/projects/api and start a Claude Code session. Behind the scenes, the plugin's PreToolUse hook calls ctx agent --budget 8000 --include-hub before the first tool call. Agent loads:

    • Local .context/ (TASKS, DECISIONS, LEARNINGS, etc.)
    • Foundation steering files (always-inclusion)
    • Everything you've shared from the other two projects

    So the \"use UTC timestamps everywhere\" decision you recorded in dotfiles last week is already in Claude's context for this session, without any manual sync.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1030-you-discover-a-gotcha","level":3,"title":"10:30 — You Discover a Gotcha","text":"

    While debugging, you find that the API's retry loop silently drops the last error when the transport times out. This is the kind of thing you'd normally add to LEARNINGS.md in api/. But it's useful across every Go service you'll ever write, not just this one. So:

    ctx add learning --share \\\n  --context \"Go http.Client retries mask the final error\" \\\n  --lesson  \"Transport timeouts don't surface as errors when the retry loop re-assigns err without wrapping. Check for context.DeadlineExceeded on the request context instead.\" \\\n  --application \"Any retry loop over http.Client.Do that uses a per-attempt timeout\"\n

    The --share flag does two things:

    1. Writes the learning to api/.context/LEARNINGS.md locally (as a normal ctx add learning would).
    2. Publishes the same entry to the ctx Hub, which stores it in the append-only JSONL and fans it out to every subscribed client.

    Within seconds, cli/.context/hub/learnings.md and dotfiles/.context/hub/learnings.md both contain a copy of this learning (the ctx connection listen daemon picks it up from the ctx Hub's Listen stream).

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1200-you-switch-to-cli","level":3,"title":"12:00 — You Switch to cli","text":"

    cd ~/projects/cli, open a new session. The agent packet for cli now includes the learning you just recorded in api, because cli is subscribed to learning and the entry has already been synced into cli/.context/hub/learnings.md.

    You don't have to re-explain the retry-loop gotcha. Claude already sees it.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1400-you-codify-a-convention","level":3,"title":"14:00 — You Codify a Convention","text":"

    You've been writing error messages in api and decided you want a consistent pattern: lowercase start, no trailing period, single-sentence. This is a convention, not a decision — it applies to every Go project you touch. Record it in dotfiles (since that's your \"personal standards\" project), and share it:

    cd ~/projects/dotfiles\nctx add convention --share \\\n  \"Error messages: lowercase start, no trailing period, single sentence (follows Go's stdlib style)\"\n

    The convention lands in dotfiles/CONVENTIONS.md locally and fans out to api and cli via the hub. The next Claude Code session in either project gets the convention injected into the steering-adjacent slot of the agent packet.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1630-end-of-day","level":3,"title":"16:30 — End of Day","text":"

    You didn't run ctx connection sync once. You didn't git push anything between projects. You didn't remember to tell your agent about the retry-loop gotcha in the new project. The hub did all of it for you.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-the-workflow-actually-looks-like","level":2,"title":"What the Workflow Actually Looks Like","text":"

    Stripped of prose, the day's commands were:

    # Morning: nothing. Agent loads --include-hub automatically.\n\n# Mid-morning: record a learning that should cross projects\nctx add learning --share \\\n  --context \"...\" --lesson \"...\" --application \"...\"\n\n# Afternoon: codify a convention in the \"standards\" project\nctx add convention --share \"...\"\n\n# Evening: nothing. Everything's already propagated.\n

    The hub is passive infrastructure. You never talk to it directly — you talk through it by using --share on commands you were already running.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#tips-for-solo-use","level":2,"title":"Tips for Solo Use","text":"

    Pick a \"standards\" project. One of your projects should play the role of \"canonical source for rules you want everywhere.\" Your dotfiles, a personal scratch repo, or a dedicated ctx-standards project all work. Record cross-cutting conventions there and let the hub propagate them to everything else.

    Subscribe to task only if you want cross-project todos. The four subscribable types are decision, learning, convention, task. Tasks are usually project-local; subscribing makes every hub-shared task from every project show up in every other project's agent packet. That's probably not what you want. Skip task in ctx connection subscribe unless you have a specific reason.

    Run the hub as a user-level daemon so you don't have to remember to start it. On Linux with systemd:

    # ~/.config/systemd/user/ctx-hub.service\n[Unit]\nDescription=ctx Hub (personal)\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/ctx hub start\nRestart=on-failure\n\n[Install]\nWantedBy=default.target\n
    systemctl --user enable --now ctx-hub.service\n

    Don't overthink subscription filters. For personal use, subscribe every project to all four types at first (or three, if you skip task). Tune later if the context packets get noisy.

    Local storage is fine; no TLS needed. The hub runs on localhost. No one else is on the network. Skip the TLS setup from the Multi-machine recipe — it's relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

    Not a setup guide. For the one-time hub install and project registration, use Getting Started.

    Not a team guide. If you're sharing across humans, not just across your own projects, read Team knowledge bus instead — the trust model and operational concerns are different.

    Not production operations. For backup, log rotation, failure recovery, and HA, see Hub operations and Hub failure modes.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#see-also","level":2,"title":"See Also","text":"
    • Hub overview — when to use the Hub and when not to.
    • Team knowledge bus — the multi-human companion recipe.
    • ctx connect — the client-side commands used above (subscribe, publish, sync, listen, status).
    • ctx add — the --share flag reference.
    • ctx hub — operator commands for starting, stopping, and inspecting the hub.
    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-team/","level":1,"title":"Team Knowledge Bus","text":"","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#team-knowledge-bus","level":1,"title":"Team Knowledge Bus","text":"

    This recipe shows how a small trusted team uses a ctx Hub as a shared knowledge bus — the \"Story 2\" shape from the Hub overview. You're not building a wiki, you're not replacing your issue tracker, and you're not running a multi-tenant service. You're connecting 3-10 developers who trust each other so that lessons, decisions, and conventions flow between them without ceremony.

    Prerequisites:

    • A running ctx Hub on a LAN host or internal server everyone on the team can reach. See Multi-machine setup for the deployment guide.
    • Each team member has ctx installed and has ctx connection register-ed their working projects with the hub.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#trust-model-read-this-first","level":2,"title":"Trust Model — Read This First","text":"

    The hub assumes everyone holding a client token is friendly. There's no per-user attribution you can rely on, no read ACL beyond subscription filters, and Origin is self-asserted by the publishing client. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

    If your team is:

    • ✅ 3-10 engineers, all known to each other, all trusted with production access
    • ✅ On a single internal network or behind a VPN
    • ✅ Comfortable with \"the hub assumes friendly participants\"

    …this recipe fits. If your team is:

    • ❌ Larger than ~15, with turnover
    • ❌ Includes contractors, untrusted agents, or compromised-workstation concerns
    • ❌ Needs audit trails that prove who published what
    • ❌ Requires per-team-member isolation

    …you're in \"Story 3\" territory, which the hub does not support today. Use a wiki or a dedicated knowledge platform instead.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#the-teams-three-verbs","level":2,"title":"The Team's Three Verbs","text":"

    Everyone on the team does three things, same as in the personal recipe, but with different social expectations:

    1. Record — when you learn something that would save a teammate time, capture it with ctx add --share.
    2. Subscribe — every engineer's project directories subscribe to the types the team cares about.
    3. Load — agents pick up shared entries automatically via the auto-sync hook and the --include-hub flag in the PreToolUse hook pipeline.

    The operational shape is identical to solo use. What's different is the culture around publishing: when do you --share, and what belongs on the hub vs. in your local .context/.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-goes-on-the-hub-team-rules-of-thumb","level":2,"title":"What Goes on the Hub (Team Rules of Thumb)","text":"

    Share it if it's true for more than one person. The central question: \"would the next teammate who hits this problem save time if they already knew this?\" If yes, --share. If no, record it locally and move on.

    Decisions:

    • ✅ Cross-service decisions (database choice, auth model, deployment pattern, monitoring stack).
    • ✅ Policy decisions that apply to all services (naming, API versioning, error-message format).
    • ❌ Internal implementation decisions inside a single service (\"chose a map over a slice here because lookups dominate\").
    • ❌ One-off tactical calls for a specific PR.

    Learnings:

    • ✅ Gotchas, surprising behavior, flaky infrastructure quirks — anything you'd tell a teammate over coffee with \"watch out for X\".
    • ✅ Lessons from incidents — right after the postmortem is the highest-value time to share.
    • ❌ Internal debugging notes that only make sense with context from your current branch.

    Conventions:

    • ✅ Repo layout, commit message format, pre-commit hooks, review expectations.
    • ✅ Language-level style decisions that apply across services.
    • ❌ Per-service idioms (\"in billing/ we prefer…\").

    Tasks: almost always project-local. Don't subscribe to task unless the team has a specific reason (e.g., a cross-cutting migration you want visible everywhere).

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#a-realistic-week","level":2,"title":"A Realistic Week","text":"

    Monday — 3 AM incident, shared learning

    On-call engineer Alice gets paged: the payment service starts returning 500s after a dependency update. After an hour she finds the culprit — a breaking change in a transitive gRPC dep that only manifests under high concurrency. Postmortem on Tuesday, but right now she records the learning:

    ctx add learning --share \\\n  --context \"Payment service 3 AM incident, 2026-04-03\" \\\n  --lesson  \"grpc-go v1.62+ changes DialContext behavior under high \\\n  concurrency: connections from a single channel can deadlock if the \\\n  server emits GOAWAY mid-stream. Symptom: 500 errors cluster in \\\n  30s bursts, no error in grpc client logs.\" \\\n  --application \"Any service on grpc-go. Pin to v1.61 or patch with \\\n  keepalive: https://github.com/grpc/grpc-go/issues/...\" \n

    By Tuesday morning, every other engineer's agent context packet contains this learning. When Bob starts work on the ledger service (which also uses grpc-go), his Claude Code session already knows about the gotcha without Bob having to read the incident channel.

    Wednesday — cross-service decision

    The team agrees on a new pattern for API versioning — header-based instead of URL-based. Platform lead Carol records the decision:

    ctx add decision --share \\\n  --context \"Need consistent API versioning across all 6 services. \\\n  Current URL-based /v1/ isn't working for gradual rollouts.\" \\\n  --rationale \"Header-based versioning lets us route by header at the \\\n  edge, which makes canary rollouts trivial. URL-based versioning \\\n  forces clients to update their paths.\" \\\n  --consequence \"All new endpoints use X-API-Version header. \\\n  Existing /v1/ endpoints stay. Deprecation schedule in q3.\" \\\n  \"Use header-based API versioning for new endpoints\"\n

    Every engineer's next session knows about this decision automatically. When Dave starts adding endpoints to the inventory service on Thursday, Claude already prompts him for the header pattern instead of defaulting to /v1/.

    Friday — convention drift caught at review

    Dave notices that his PR auto-formatted some error messages to end with periods. He recalls the team convention is \"no trailing period\" but can't remember where it was documented. He runs ctx connection status, sees the hub is healthy, greps his local .context/hub/conventions.md, and finds:

    ## [2026-03-12] Error message format\nLowercase start, no trailing period, single sentence.\n

    He fixes the PR. No lookup on the wiki, no question in chat, no context-switch penalty.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#workflow-tips-for-teams","level":2,"title":"Workflow Tips for Teams","text":"

    Designate a \"champion\" for decisions. The team lead or platform engineer should be the person who explicitly --shares cross-cutting decisions. Other team members share learnings freely but should ask \"should this be a decision?\" in review before --shareing a decision. This keeps the decision stream signal-rich.

    Publish postmortem learnings immediately, not after the meeting. The postmortem itself is a document; the actionable rules that come out of it belong on the hub, and they should land within an hour of the incident. \"Share fast, edit later\" is the rule.

    Delete noisy entries, don't tolerate them. The hub is append-only, but the .context/hub/ mirror on each client is just markdown. If a shared learning turns out to be wrong or obsolete, remove it from local mirrors and stop the hub daemon to truncate entries.jsonl (see Hub operations). Noisy shared feeds lose trust fast.

    Don't subscribe every project to every type. For backend engineers, subscribing to decision + learning + convention is usually right. For platform or DevOps projects, adding task makes sense. For a prototype or experiment project, subscribing only to convention might be enough.

    Run a single hub, not one per team. If two teams need to share knowledge, they should share a hub. Splitting hubs by team creates silos — which is often exactly the thing you were trying to solve.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#operational-concerns","level":2,"title":"Operational Concerns","text":"

    The team recipe assumes someone owns the hub host. That person (or a small group) is responsible for:

    • Uptime: the hub is infrastructure; treat it like any other internal service you run. See Hub operations.
    • Backups: entries.jsonl is the source of truth. Snapshot it to the same backup tier as your other internal data.
    • Upgrades: cadence the team agrees on. Major upgrades may require everyone to re-register, so do them at natural breaks.
    • Failures: see Hub failure modes for the standard oncall playbook.

    Optional but recommended: run a 3-node Raft cluster so the hub survives individual node failures. See HA cluster. For teams under 10 people, a single-node hub with daily backups is usually fine.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#token-management","level":2,"title":"Token Management","text":"

    Every team member has a client token stored in their .context/.connect.enc. Rules of thumb:

    • One token per engineer per project. Not one token per team; not one shared token. Each engineer registers each of their working projects separately.
    • Token compromise = revoke immediately. When an engineer leaves, their tokens should be removed from clients.json on the hub. This is a manual operation today; see Hub security for the revocation steps.
    • No checked-in tokens. .context/.connect.enc is encrypted with the local machine key, but don't push it to shared repos — it's per-workstation.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

    Not a wiki replacement. The hub is for structured entries, not prose. Put your architecture overviews, onboarding docs, and design discussions in a real wiki.

    Not an audit log. Origin on the hub is self-asserted. If compliance requires provenance, the hub is the wrong tool.

    Not a ticket system. Task sharing works, but mature teams already have Jira/Linear/Github Issues. Don't try to replace those with hub tasks — use the hub for lightweight cross-project todos that your existing tracker doesn't capture well.

    Not a production service for end users. This is internal team infrastructure. Do not expose the hub to customers, partners, or the open internet.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#see-also","level":2,"title":"See Also","text":"
    • Hub overview — when to use the hub and when not to.
    • Personal cross-project brain — the single-developer companion recipe.
    • Multi-machine setup — standing up the hub on a LAN host.
    • HA cluster — optional redundancy for larger teams.
    • Hub operations — backup, rotation, monitoring.
    • Hub security — threat model and hardening checklist.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/import-plans/","level":1,"title":"Importing Claude Code Plans","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code plan files (~/.claude/plans/*.md) are ephemeral: They have structured context, approach, and file lists, but they're orphaned after the session ends. The filenames are UUIDs, so you can't tell what's in them without opening each one.

    How do you turn a useful plan into a permanent project spec?

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tldr","level":2,"title":"TL;DR","text":"
    You: /ctx-plan-import\nAgent: [lists plans with dates and titles]\n       1. 2026-02-28  Add authentication middleware\n       2. 2026-02-27  Refactor database connection pool\nYou: \"import 1\"\nAgent: [copies to specs/add-authentication-middleware.md]\n

    Plans are copied (not moved) to specs/, slugified by their H1 heading.

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-plan-import Skill List, filter, and import plan files to specs /ctx-task-add Skill Optionally add a task referencing the spec","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-1-list-available-plans","level":3,"title":"Step 1: List Available Plans","text":"

    Invoke the skill and it lists plans with modification dates and titles:

    You: /ctx-plan-import\n\nAgent: Found 3 plan files:\n         1. 2026-02-28  Add authentication middleware\n         2. 2026-02-27  Refactor database connection pool\n         3. 2026-02-25  Import plans skill\n       Which plans would you like to import?\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-2-filter-optional","level":3,"title":"Step 2: Filter (Optional)","text":"

    You can narrow the list with arguments:

    Argument Effect --today Only plans modified today --since YYYY-MM-DD Only plans modified on or after the date --all Import everything without prompting (none) Interactive selection
    You: /ctx-plan-import --today\nYou: /ctx-plan-import --since 2026-02-27\nYou: /ctx-plan-import --all\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-3-select-and-import","level":3,"title":"Step 3: Select and Import","text":"

    Pick one or more plans by number:

    You: \"import 1 and 3\"\n\nAgent: Imported 2 plan(s):\n         ~/.claude/plans/abc123.md -> specs/add-authentication-middleware.md\n         ~/.claude/plans/ghi789.md -> specs/import-plans-skill.md\n       Want me to add tasks referencing these specs?\n

    The agent reads the H1 heading from each plan and slugifies it for the filename. If a plan has no H1 heading, the original filename (minus extension) is used as the slug.

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-4-add-follow-up-tasks-optional","level":3,"title":"Step 4: Add Follow-Up Tasks (Optional)","text":"

    If you say yes, the agent creates tasks in TASKS.md that reference the imported specs:

    You: \"yes, add tasks\"\n\nAgent: [runs /ctx-task-add for each spec]\n       Added:\n         - [ ] Implement authentication middleware (spec: specs/add-authentication-middleware.md)\n         - [ ] Import plans skill (spec: specs/import-plans-skill.md)\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember the exact skill name:

    You say What happens \"import my plans\" /ctx-plan-import (interactive) \"save today's plans as specs\" /ctx-plan-import --today \"import all plans from this week\" /ctx-plan-import --since ... \"turn that plan into a spec\" /ctx-plan-import (filtered)","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tips","level":2,"title":"Tips","text":"
    • Plans are copied, not moved: The originals stay in ~/.claude/plans/. Claude Code manages that directory; ctx doesn't delete from it.
    • Conflict handling: If specs/{slug}.md already exists, the agent asks whether to overwrite or pick a different name.
    • Specs are project memory: Once imported, specs are tracked in git and available to future sessions. Reference them from TASKS.md phase headers with Spec: specs/slug.md.
    • Pair with /ctx-implement: After importing a plan as a spec, use /ctx-implement to execute it step-by-step with verification.
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: /ctx-plan-import: full skill description
    • The Complete Session: where plan import fits in the session flow
    • Tracking Work Across Sessions: managing tasks that reference imported specs
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/knowledge-capture/","level":1,"title":"Persisting Decisions, Learnings, and Conventions","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-problem","level":2,"title":"The Problem","text":"

    You debug a subtle issue, discover the root cause, and move on.

    Three weeks later, a different session hits the same issue. The knowledge existed briefly in one session's memory but was never written down.

    Architectural decisions suffer the same fate: you weigh trade-offs, pick an approach, and six sessions later the AI suggests the alternative you already rejected.

    How do you make sure important context survives across sessions?

    Prefer Skills to Raw Commands

    Use /ctx-decision-add and /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context — no manual flags needed.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-reflect               # surface items worth persisting\n/ctx-decision-add \"Title\"  # record with context/rationale/consequence\n/ctx-learning-add \"Title\"  # record with context/lesson/application\n

    Or just tell your agent: \"What have we learned this session?\"

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add decision Command Record an architectural decision ctx add learning Command Record a gotcha, tip, or lesson ctx add convention Command Record a coding pattern or standard ctx reindex Command Rebuild both quick-reference indices ctx decision reindex Command Rebuild the DECISIONS.md index ctx learning reindex Command Rebuild the LEARNINGS.md index /ctx-decision-add Skill AI-guided decision capture with validation /ctx-learning-add Skill AI-guided learning capture with validation /ctx-convention-add Skill AI-guided convention recording with placement /ctx-reflect Skill Surface items worth persisting at breakpoints","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-workflow","level":2,"title":"The Workflow","text":"

    Decision, Learning, or Convention?

    • If you chose between alternatives, it is a decision.
    • If you discovered something surprising, it is a learning.
    • If you are codifying a repeated pattern, it is a convention.
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-1-understand-what-to-persist","level":3,"title":"Step 1: Understand What to Persist","text":"

    Three context files serve different purposes:

    Decisions (DECISIONS.md) answer \"why is it this way?\" They record trade-offs between alternatives with structured fields: context, rationale, and consequences. Decisions prevent re-debating settled questions.

    Learnings (LEARNINGS.md) answer \"what did we discover the hard way?\" They record gotchas and debugging insights specific to this project with structured fields: context, lesson, and application.

    Learnings prevent repeating past mistakes.

    Conventions (CONVENTIONS.md) answer \"how do we do things here?\" They record patterns and standards. No structured fields required: just a name, a rule, and an example. Conventions keep code consistent across sessions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-2-record-decisions","level":3,"title":"Step 2: Record Decisions","text":"

    Decisions have three structured fields:

    • context,
    • rationale,
    • and consequences.

    The easiest way to record one is to tell your agent:

    You: \"Save that as a decision: we picked file-based cooldown\n      tokens over env vars.\"\n\nAgent: \"Recorded in DECISIONS.md:\n  - Decision: Use file-based cooldown tokens instead of env vars\n  - Context: Hook subprocesses cannot persist env vars to parent shell\n  - Rationale: File tokens survive across processes. Simpler than\n    IPC. Cleanup is automatic via TTL.\n  - Consequence: Tombstone files accumulate in /tmp. Cannot share\n    state across machines.\"\n

    The agent was present for the trade-off discussion. It fills in the structured fields from what it observed, which is hard to reconstruct later as a terminal one-liner.

    When multiple options were considered, the agent includes rejected alternatives in the rationale automatically.

    The /ctx-decision-add skill also guides you through the fields interactively. For quick decisions, it supports a Y-statement:

    \"In the context of [situation], facing [constraint], we decided for [choice] and against [alternatives], to achieve [benefit], accepting that [trade-off].\"

    CLI Command for Scripting and Automation

    When no agent is in the loop (CI pipelines, shell scripts, hooks), use the CLI directly with structured flags:

    ctx add decision \"Use file-based cooldown tokens instead of env vars\" \\\n  --context \"Hook subprocesses cannot persist env vars to parent shell\" \\\n  --rationale \"File tokens survive across processes. Simpler than IPC. Cleanup is automatic via TTL.\" \\\n  --consequence \"Tombstone files accumulate in /tmp. Cannot share state across machines.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-3-record-learnings","level":3,"title":"Step 3: Record Learnings","text":"

    Learnings have three structured fields: context, lesson, and application.

    Again, your agent is the best tool for this. It was there when you discovered the gotcha and can express it more completely than a backslash-escaped terminal command:

    You: \"That subprocess thing was painful. Save it as a learning.\"\n\nAgent: \"Added to LEARNINGS.md:\n  - Learning: Claude Code hooks run in a subprocess\n  - Context: Set env var in PreToolUse hook, but it was not visible\n    in the main session\n  - Lesson: Hook scripts execute in a child process. Env changes do\n    not propagate to parent.\n  - Application: Use tombstone files for hook-to-session\n    communication. Never rely on hook env vars.\"\n

    You said one sentence. The agent produced a structured entry with all three fields filled from the session context.

    That is the point: the agent has the full conversation and can be far more expressive than what you would type at a command prompt.

    The /ctx-learning-add skill applies three quality filters:

    1. Could someone Google this in 5 minutes?
    2. Is it specific to this codebase?
    3. Did it take real effort to discover?

    All three must pass.

    Learnings capture principles and heuristics, not code snippets.

    CLI Command for Scripting and Automation

    When no agent is in the loop:

    ctx add learning \"Claude Code hooks run in a subprocess\" \\\n  --context \"Set env var in PreToolUse hook, but it was not visible in the main session\" \\\n  --lesson \"Hook scripts execute in a child process. Env changes do not propagate to parent.\" \\\n  --application \"Use tombstone files for hook-to-session communication. Never rely on hook env vars.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-4-record-conventions","level":3,"title":"Step 4: Record Conventions","text":"

    Conventions are simpler: just a name, a rule, and optionally a section.

    These are short enough that either approach works:

    You: \"We've been using kebab-case for every CLI flag. Codify that.\"\n\nAgent: \"Added to CONVENTIONS.md under Naming:\n  'Use kebab-case for all CLI flag names.'\"\n

    Or from the terminal:

    ctx add convention \"Use kebab-case for all CLI flag names\" --section \"Naming\"\n

    Conventions work best for rules that come up repeatedly. Codify a pattern the third time you see it, not the first.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-5-reindex-after-manual-edits","level":3,"title":"Step 5: Reindex After Manual Edits","text":"

    DECISIONS.md and LEARNINGS.md maintain a quick-reference index at the top: a compact table of date and title for each entry. The index updates automatically via ctx add, but falls out of sync after hand edits.

    ctx reindex\n

    This single command regenerates both indices. You can also reindex individually with ctx decision reindex or ctx learning reindex.

    Run reindex after any manual edit. The index lets AI tools scan all entries without reading the full file, which matters when token budgets are tight.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-6-use-ctx-reflect-to-surface-what-to-capture","level":3,"title":"Step 6: Use /ctx-reflect to Surface What to Capture","text":"

    Keep It Conversational

    /ctx-reflect is not the only way to trigger reflection.

    Agents trained on the ctx playbook naturally surface persist-worthy items at breakpoints, even without invoking the skill explicitly.

    A conversational prompt like \"anything worth saving?\" or \"let's wrap up\" can trigger the same review.

    The skill provides a structured checklist, but the behavior is available through natural conversation.

    At natural breakpoints (after completing a feature, fixing a bug, or before ending a session) use /ctx-reflect to identify items worth persisting.

    /ctx-reflect\n

    The skill walks through learnings, decisions, tasks, and session notes, skipping categories with nothing to report. The output includes specific commands for each suggested persist:

    This session implemented file-based cooldown for `ctx agent` and\ndiscovered that hook subprocesses cannot set env vars in the parent.\n\nI'd suggest persisting:\n- **Learning**: Hook subprocesses cannot propagate env vars\n  `ctx add learning \"...\" --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Decision**: File-based cooldown tokens over env vars\n  `ctx add decision \"...\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

    The skill always asks before persisting.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-7-the-conversational-approach","level":3,"title":"Step 7: The Conversational Approach","text":"

    The commands shown in Steps 2 to 4 are precise, but you rarely need to type them yourself. When working with an agent that has loaded the ctx playbook, natural language triggers the same operations.

    You ask, the agent acts:

    You say What the agent does \"What have we learned this session?\" Reviews recent work, identifies learnings, offers to persist \"Save that as a decision\" Captures the trade-off you just discussed with structured fields \"Any gotchas worth remembering?\" Scans recent work for non-obvious lessons \"Record that convention\" Adds the pattern you just agreed on to CONVENTIONS.md \"Let's wrap up\" Runs through the reflect checklist conversationally

    Example exchange:

    You: \"What did we learn from that debugging session?\"\n\nAgent: \"We discovered three things worth persisting:\n  1. Hook subprocesses can't propagate env vars to the parent. We\n     spent 20 minutes on this before switching to tombstone files.\n  2. Tombstone files need a TTL or they accumulate in /tmp.\n  3. The cooldown logic must check file mtime, not just existence.\n  Want me to save these as learnings?\"\n\nYou: \"Yes, all of them.\"\n\nAgent: \"Done. Added all three to LEARNINGS.md with structured fields.\"\n

    The agent can also be proactive without being asked. The playbook encourages agents to persist context at natural milestones.

    You may see:

    • After fixing a tricky bug: \"I noticed this was a non-obvious issue. Want me to save it as a learning?\"
    • After choosing between approaches: \"We just made a trade-off between file-based tokens and IPC. Should I record this as a decision?\"
    • At session milestones: the agent checks what is worth persisting and offers a brief summary.

    When agents run unattended (no human in the loop), they may persist directly:

    • \"I've added the subprocess env var gotcha to LEARNINGS.md so we don't hit it again.\"
    • \"Recorded the decision to use file-based tokens over env vars in DECISIONS.md.\"
    • \"Marked the cooldown task done and added a follow-up for TTL cleanup.\"

    This is behavior by design.

    The playbook's self-check prompt, \"If this session ended right now, would the next session know what happened?\" drives agents to persist early and often rather than waiting for explicit instructions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#command-line-approach-scripting-and-automation","level":3,"title":"Command-Line Approach (Scripting and Automation)","text":"
    # Decision: record the trade-off\nctx add decision \"Use PostgreSQL over SQLite\" \\\n  --context \"Need concurrent multi-user access\" \\\n  --rationale \"SQLite locks on writes; Postgres handles concurrency\" \\\n  --consequence \"Requires a database server; team needs Postgres training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Learning: record the gotcha\nctx add learning \"SQL migrations must be idempotent\" \\\n  --context \"Deploy failed when migration ran twice after rollback\" \\\n  --lesson \"CREATE TABLE without IF NOT EXISTS fails on retry\" \\\n  --application \"Always use IF NOT EXISTS guards in migrations\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Convention: record the pattern\nctx add convention \"API handlers return structured errors\" --section \"API\"\n\n# Reindex after manual edits\nctx reindex\n\n# Reflect at breakpoints (from AI assistant)\n# /ctx-reflect\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#conversational-approach-interactive-sessions","level":3,"title":"Conversational Approach (Interactive Sessions)","text":"
    You: \"We just picked Postgres over SQLite for the concurrency\n      reasons. Save that?\"\n\nAgent: \"Recorded. Added to DECISIONS.md:\n  - Decision: Use PostgreSQL over SQLite\n  - Context: Need concurrent multi-user access\n  - Rationale: SQLite locks on writes; Postgres handles concurrency\n  - Consequence: Requires a database server; team needs Postgres\n    training\"\n\nYou: \"Also that migration thing. The deploy blew up when the\n      migration ran twice.\"\n\nAgent: \"Got it. Added to LEARNINGS.md:\n  - Learning: SQL migrations must be idempotent\n  - Lesson: CREATE TABLE without IF NOT EXISTS fails on retry\n  - Application: Always use IF NOT EXISTS guards in migrations\"\n\nYou: \"Good. Anything else worth saving before we wrap up?\"\n\nAgent: \"We've been using structured error returns in every API\n  handler this session. Want me to codify that as a convention?\"\n\nYou: \"Do it.\"\n\nAgent: \"Done. Added 'API handlers return structured errors' to\n  CONVENTIONS.md under the API section.\"\n

    Both approaches produce the same structured entries in the same context files.

    • The conversational approach is the natural fit for interactive sessions;
    • the CLI commands are better suited for scripts, hooks, and automation pipelines.
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tips","level":2,"title":"Tips","text":"
    • Record decisions at the moment of choice. The alternatives you considered and the reasons you rejected them fade quickly. Capture trade-offs while they are fresh.
    • Learnings should fail the Gemini test. If someone could find it in a 5-minute Gemini search, it does not belong in LEARNINGS.md.
    • Conventions earn their place through repetition. Add a convention the third time you see a pattern, not the first.
    • Use /ctx-reflect at natural breakpoints. The checklist catches items you might otherwise lose.
    • Keep the entries self-contained. Each entry should make sense on its own. A future session may load only one due to token budget constraints.
    • Reindex after every hand edit. It takes less than a second. A stale index causes AI tools to miss entries.
    • Prefer the structured fields. The verbosity forces clarity. A decision without a rationale is just a fact. A learning without an application is just a story.
    • Talk to your agent, do not type commands. In interactive sessions, the conversational approach is the recommended way to capture knowledge. Say \"save that as a learning\" or \"any decisions worth recording?\" and let the agent handle the structured fields. Reserve the CLI commands for scripting, automation, and CI/CD pipelines where there is no agent in the loop.
    • Trust the agent's proactive instincts. Agents trained on the ctx playbook will offer to persist context at milestones. A brief \"want me to save this?\" is cheaper than re-discovering the same lesson three sessions later.
    • Relax provenance per-project if --session-id, --branch, or --commit are impractical (e.g., manual notes outside an AI session). Add to .ctxrc:

      provenance_required:\n  session_id: false   # allow entries without --session-id\n  branch: true        # still require --branch\n  commit: true        # still require --commit\n

      Default is all three required. Only human config relaxes: Agents cannot bypass, and that's by design.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#next-up","level":2,"title":"Next Up","text":"

    Tracking Work Across Sessions →: Add, prioritize, complete, and archive tasks across sessions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#see-also","level":2,"title":"See Also","text":"
    • Tracking Work Across Sessions: managing the tasks that decisions and learnings support
    • The Complete Session: full session lifecycle including reflection and context persistence
    • Detecting and Fixing Drift: keeping knowledge files accurate as the codebase evolves
    • CLI Reference: full documentation for ctx add, ctx decision, ctx learning
    • Context Files: format and conventions for DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/memory-bridge/","level":1,"title":"Bridging Claude Code Auto Memory","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This file is:

    • Outside the repo - not version-controlled, not portable
    • Machine-specific - tied to one ~/.claude/ directory
    • Invisible to ctx - context loading and hooks don't read it

    Meanwhile, ctx maintains structured context files (DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) that are git-tracked, portable, and token-budgeted - but Claude Code doesn't automatically write to them.

    The two systems hold complementary knowledge with no bridge between them.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#tldr","level":2,"title":"TL;DR","text":"
    ctx memory sync          # Mirror MEMORY.md into .context/memory/mirror.md\nctx memory status        # Check for drift\nctx memory diff          # See what changed since last sync\n

    The check-memory-drift hook nudges automatically when MEMORY.md changes - you don't need to remember to sync manually.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx memory sync CLI command Copy MEMORY.md to mirror, archive previous ctx memory status CLI command Show drift, timestamps, line counts ctx memory diff CLI command Show changes since last sync ctx memory import CLI command Classify and promote entries to .context/ files ctx memory publish CLI command Push curated .context/ content to MEMORY.md ctx memory unpublish CLI command Remove published block from MEMORY.md ctx system check-memory-drift Hook Nudge when MEMORY.md has changed (once/session)","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#how-it-works","level":2,"title":"How It Works","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#discovery","level":3,"title":"Discovery","text":"

    Claude Code encodes project paths as directory names under ~/.claude/projects/. The encoding replaces / with - and prefixes with -:

    /home/jose/WORKSPACE/ctx  →  ~/.claude/projects/-home-jose-WORKSPACE-ctx/\n

    ctx memory uses this encoding to locate MEMORY.md automatically from your project root - no configuration needed.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#mirroring","level":3,"title":"Mirroring","text":"

    When you run ctx memory sync:

    1. The previous mirror is archived to .context/memory/archive/mirror-<timestamp>.md
    2. MEMORY.md is copied to .context/memory/mirror.md
    3. Sync state is updated in .context/state/memory-import.json

    The mirror is git-tracked, so it travels with the project. Archives provide a fallback for projects that don't use git.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#drift-detection","level":3,"title":"Drift Detection","text":"

    The check-memory-drift hook compares MEMORY.md's modification time against the mirror. When drift is detected, the agent sees:

    ┌─ Memory Drift ────────────────────────────────────────────────\n│ MEMORY.md has changed since last sync.\n│ Run: ctx memory sync\n│ Context: .context\n└────────────────────────────────────────────────────────────────\n

    The nudge fires once per session to avoid noise.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#typical-workflow","level":2,"title":"Typical Workflow","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#at-session-start","level":3,"title":"At Session Start","text":"

    If the hook fires a drift nudge, sync before diving into work:

    ctx memory diff     # Review what changed\nctx memory sync     # Mirror the changes\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#periodic-check","level":3,"title":"Periodic Check","text":"
    ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#dry-run","level":3,"title":"Dry Run","text":"

    Preview what sync would do without writing:

    ctx memory sync --dry-run\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#storage-layout","level":2,"title":"Storage Layout","text":"
    .context/\n├── memory/\n│   ├── mirror.md                          # Raw copy of MEMORY.md (often git-tracked)\n│   └── archive/\n│       ├── mirror-2026-03-05-143022.md    # Timestamped pre-sync snapshots\n│       └── mirror-2026-03-04-220015.md\n├── state/\n│   └── memory-import.json                 # Sync tracking state\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#edge-cases","level":2,"title":"Edge Cases","text":"Scenario Behavior Auto memory not active sync exits 1 with message. status reports \"not active\". Hook skips silently. First sync (no mirror) Creates mirror without archiving. MEMORY.md is empty Syncs to empty mirror (valid). Not initialized Init guard rejects (same as all ctx commands).","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#importing-entries","level":2,"title":"Importing Entries","text":"

    Once you've synced, you can classify and promote entries into structured .context/ files:

    ctx memory import --dry-run    # Preview classification\nctx memory import              # Actually promote entries\n

    Each entry is classified by keyword heuristics:

    Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

    Entries that don't match any pattern are skipped - they stay in the mirror for manual review. Deduplication (hash-based) prevents re-importing the same entry on subsequent runs.

    Review Before Importing

    Use --dry-run first. The heuristic classifier is deliberately simple - it may misclassify ambiguous entries. Review the plan, then import.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-workflow","level":3,"title":"Full Workflow","text":"
    ctx memory sync                # 1. Mirror MEMORY.md\nctx memory import --dry-run    # 2. Preview what would be imported\nctx memory import              # 3. Promote entries to .context/ files\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#publishing-context-to-memorymd","level":2,"title":"Publishing Context to MEMORY.md","text":"

    Push curated .context/ content back into MEMORY.md so Claude Code sees structured project context on session start - without needing hooks.

    ctx memory publish --dry-run    # Preview what would be published\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter line budget\n

    Published content is wrapped in markers:

    <!-- ctx:published -->\n# Project Context (managed by ctx)\n\n## Pending Tasks\n- [ ] Implement feature X\n...\n<!-- ctx:end -->\n

    Rules:

    • ctx owns everything between the markers
    • Claude owns everything outside the markers
    • ctx memory import reads only outside the markers
    • ctx memory publish replaces only inside the markers

    To remove the published block entirely:

    ctx memory unpublish\n

    Publish at Wrap-Up, Not on Commit

    The best time to publish is during session wrap-up, after persisting decisions and learnings. Never auto-publish - give yourself a chance to review what's going into MEMORY.md.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-bidirectional-workflow","level":3,"title":"Full Bidirectional Workflow","text":"
    ctx memory sync                 # 1. Mirror MEMORY.md\nctx memory import --dry-run     # 2. Check what Claude wrote\nctx memory import               # 3. Promote entries to .context/\nctx memory publish --dry-run    # 4. Check what would be published\nctx memory publish              # 5. Push context to MEMORY.md\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/multi-tool-setup/","level":1,"title":"Setup Across AI Tools","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-problem","level":2,"title":"The Problem","text":"

    You have installed ctx and want to set it up with your AI coding assistant so that context persists across sessions. Different tools have different integration depths. For example:

    • Claude Code supports native hooks that load and save context automatically.
    • Cursor injects context via its system prompt.
    • Aider reads context files through its --read flag.

    This recipe walks through the complete setup for each tool, from initialization through verification, so you end up with a working memory layer regardless of which AI tool you use.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tldr","level":2,"title":"TL;DR","text":"
    cd your-project\nctx init                      # creates .context/\nsource <(ctx completion zsh)  # shell completion (or bash/fish)\n\n# ## Claude Code (automatic after plugin install) ##\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n\n# ## Cursor / Aider / Copilot / Windsurf ##\nctx setup cursor # or: aider, copilot, windsurf\n\n# ## Companion tools (highly recommended) ##\nnpx gitnexus analyze          # code knowledge graph\n# Add Gemini Search MCP server for grounded web search\n

    Create a .ctxrc in your project root to configure token budgets, context directory, drift thresholds, and more.

    Then start your AI tool and ask: \"Do you remember?\"

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Create .context/ directory, templates, and permissions ctx setup Generate integration configuration for a specific AI tool ctx agent Print a token-budgeted context packet for AI consumption ctx load Output assembled context in read order (for manual pasting) ctx watch Auto-apply context updates from AI output (non-native tools) ctx completion Generate shell autocompletion for bash, zsh, or fish ctx journal import Import sessions to editable journal Markdown","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-1-initialize-ctx","level":3,"title":"Step 1: Initialize ctx","text":"

    Run ctx init in your project root. This creates the .context/ directory with all template files and seeds ctx permissions in settings.local.json.

    cd your-project\nctx init\n

    This produces the following structure:

    .context/\n  CONSTITUTION.md     # Hard rules the AI must never violate\n  TASKS.md            # Current and planned work\n  CONVENTIONS.md      # Code patterns and standards\n  ARCHITECTURE.md     # System overview\n  DECISIONS.md        # Architectural decisions with rationale\n  LEARNINGS.md        # Lessons learned, gotchas, tips\n  GLOSSARY.md         # Domain terms and abbreviations\n  AGENT_PLAYBOOK.md   # How AI tools should use this system\n

    Using a Different .context Directory

    The .context/ directory doesn't have to live inside your project. You can point ctx to an external folder via .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

    This is useful for monorepos or shared context across repositories.

    See Configuration for details and External Context for a full recipe.

    For Claude Code, install the ctx plugin to get hooks and skills:

    claude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

    If you only need the core files (useful for lightweight setups), use the --minimal flag:

    ctx init --minimal\n

    This creates only TASKS.md, DECISIONS.md, and CONSTITUTION.md.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-2-generate-tool-specific-hooks","level":3,"title":"Step 2: Generate Tool-Specific Hooks","text":"

    If you are using a tool other than Claude Code (which is configured automatically by ctx init), generate its integration configuration:

    # For Cursor\nctx setup cursor\n\n# For Aider\nctx setup aider\n\n# For GitHub Copilot\nctx setup copilot\n\n# For Windsurf\nctx setup windsurf\n

    Each command prints the configuration you need. How you apply it depends on the tool.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#claude-code","level":4,"title":"Claude Code","text":"

    No action needed. Just install ctx from the Marketplace as ActiveMemory/ctx.

    Claude Code Is a First-Class Citizen

    With the ctx plugin installed, Claude Code gets hooks and skills automatically. The PreToolUse hook runs ctx agent --budget 4000 on every tool call (with a 10-minute cooldown so it only fires once per window).

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#cursor","level":4,"title":"Cursor","text":"

    Add the system prompt snippet to .cursor/settings.json:

    {\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and .context/CONVENTIONS.md before responding. Follow rules in .context/CONSTITUTION.md.\"\n}\n

    Context files appear in Cursor's file tree. You can also paste a context packet directly into chat:

    ctx agent --budget 4000 | xclip    # Linux\nctx agent --budget 4000 | pbcopy   # macOS\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#aider","level":4,"title":"Aider","text":"

    Create .aider.conf.yml so context files are loaded on every session:

    read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n

    Then start Aider normally:

    aider\n

    Or specify files on the command line:

    aider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-3-set-up-shell-completion","level":3,"title":"Step 3: Set Up Shell Completion","text":"

    Shell completion lets you tab-complete ctx subcommands and flags, which is especially useful while learning the CLI.

    # Bash (add to ~/.bashrc)\nsource <(ctx completion bash)\n\n# Zsh (add to ~/.zshrc)\nsource <(ctx completion zsh)\n\n# Fish\nctx completion fish > ~/.config/fish/completions/ctx.fish\n

    After sourcing, typing ctx a<TAB> completes to ctx agent, and ctx journal <TAB> shows list, show, and export.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-4-verify-the-setup-works","level":3,"title":"Step 4: Verify the Setup Works","text":"

    Start a fresh session in your AI tool and ask:

    \"Do you remember?\"

    A correctly configured tool responds with specific context: current tasks from TASKS.md, recent decisions, and previous session topics. It should not say \"I don't have memory\" or \"Let me search for files.\"

    This question checks the passive side of memory. A properly set-up agent is also proactive: it treats context maintenance as part of its job:

    • After a debugging session, it offers to save a learning.
    • After a trade-off discussion, it asks whether to record the decision.
    • After completing a task, it suggests follow-up items.

    The \"do you remember?\" check verifies both halves: recall and responsibility.

    For example, after resolving a tricky bug, a proactive agent might say:

    That Redis timeout issue was subtle. Want me to save this as a *learning*\nso we don't hit it again?\n

    If you see behavior like this, the setup is working end to end.

    In Claude Code, you can also invoke the /ctx-status skill:

    /ctx-status\n

    This prints a summary of all context files, token counts, and recent activity, confirming that hooks are loading context.

    If context is not loading, check the basics:

    Symptom Fix ctx: command not found Ensure ctx is in your PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list Context not refreshing Cooldown may be active; wait 10 minutes or set --cooldown 0","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-5-enable-watch-mode-for-non-native-tools","level":3,"title":"Step 5: Enable Watch Mode for Non-Native Tools","text":"

    Tools like Aider, Copilot, and Windsurf do not support native hooks for saving context automatically. For these, run ctx watch alongside your AI tool.

    Pipe the AI tool's output through ctx watch:

    # Terminal 1: Run Aider with output logged\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch the log for context updates\nctx watch --log /tmp/aider.log\n

    Or for any generic tool:

    your-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

    When the AI emits structured update commands, ctx watch parses and applies them automatically:

    <context-update type=\"learning\"\n  context=\"Debugging rate limiter\"\n  lesson=\"Redis MULTI/EXEC does not roll back on error\"\n  application=\"Wrap rate-limit checks in Lua scripts instead\"\n>Redis Transaction Behavior</context-update>\n

    To preview changes without modifying files:

    ctx watch --dry-run --log /tmp/ai.log\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-6-import-session-transcripts-optional","level":3,"title":"Step 6: Import Session Transcripts (Optional)","text":"

    If you want to browse past session transcripts, import them to the journal:

    ctx journal import --all\n

    This converts raw session data into editable Markdown files in .context/journal/. You can then enrich them with metadata using /ctx-journal-enrich-all inside your AI assistant.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Here is the condensed setup for all three tools:

    # ## Common (run once per project) ##\ncd your-project\nctx init\nsource <(ctx completion zsh)       # or bash/fish\n\n# ## Claude Code (automatic, just verify) ##\n# Start Claude Code, then ask: \"Do you remember?\"\n\n# ## Cursor ##\nctx setup cursor\n# Add the system prompt to .cursor/settings.json\n# Paste context: ctx agent --budget 4000 | pbcopy\n\n# ## Aider ##\nctx setup aider\n# Create .aider.conf.yml with read: paths\n# Run watch mode alongside: ctx watch --log /tmp/aider.log\n\n# ## Verify any Tool ##\n# Ask your AI: \"Do you remember?\"\n# Expect: specific tasks, decisions, recent context\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tips","level":2,"title":"Tips","text":"
    • Start with ctx init (not --minimal) for your first project. The full template set gives the agent more to work with, and you can always delete files later.
    • For Claude Code, the token budget is configured in the plugin's hooks.json. To customize, adjust the --budget flag in the ctx agent hook command.
    • The --session $PPID flag isolates cooldowns per Claude Code process, so parallel sessions do not suppress each other.
    • Commit your .context/ directory to version control. Several ctx features (journals, changelogs, blog generation) rely on git history.
    • For Cursor and Copilot, keep CONVENTIONS.md visible. These tools treat open files as higher-priority context.
    • Run ctx drift periodically to catch stale references before they confuse the agent.
    • The agent playbook instructs the agent to persist context at natural milestones (completed tasks, decisions, gotchas). In practice, this works best when you reinforce the habit: a quick \"anything worth saving?\" after a debugging session goes a long way.
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#companion-tools-highly-recommended","level":2,"title":"Companion Tools (Highly Recommended)","text":"

    ctx skills can leverage external MCP servers for web search and code intelligence. ctx works without them, but they significantly improve agent behavior across sessions — the investment is small and the benefits compound. Skills like /ctx-code-review, /ctx-explain, and /ctx-refactor all become noticeably better with these tools connected.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gemini-search","level":3,"title":"Gemini Search","text":"

    Provides grounded web search with citations. Used by skills and the agent playbook as the preferred search backend (faster and more accurate than built-in web search).

    Setup: Add the Gemini Search MCP server to your Claude Code settings. See the Gemini Search MCP documentation for installation.

    Verification:

    # The agent checks this automatically during /ctx-remember\n# Manual test: ask the agent to search for something\n

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gitnexus","level":3,"title":"GitNexus","text":"

    Provides a code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Used by skills like /ctx-refactor (impact analysis) and /ctx-code-review (dependency awareness).

    Setup: Add the GitNexus MCP server to your Claude Code settings, then index your project:

    npx gitnexus analyze\n

    Verification:

    # The agent checks this automatically during /ctx-remember\n# If the index is stale, it will suggest rehydrating\n

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#suppressing-the-check","level":3,"title":"Suppressing the Check","text":"

    If you don't use companion tools and want to skip the availability check at session start, add to .ctxrc:

    companion_check: false\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#future-direction","level":3,"title":"Future Direction","text":"

    The companion tool integration is evolving toward a pluggable model: bring your own search engine, bring your own code intelligence. The current integration is MCP-based and limited to Gemini Search and GitNexus. If you use a different search or code intelligence tool, skills will degrade gracefully to built-in capabilities.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#next-up","level":2,"title":"Next Up","text":"

    Keeping Context in a Separate Repo →: Store context files outside the project tree for multi-repo or open source setups.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle recipe
    • Multilingual Session Parsing: configure session header prefixes for other languages
    • CLI Reference: all commands and flags
    • Integrations: detailed per-tool integration docs
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multilingual-sessions/","level":1,"title":"Multilingual Session Parsing","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#the-problem","level":2,"title":"The Problem","text":"

    Your team works across languages. Session files written by AI tools might use headers like # Oturum: 2026-01-15 - API Düzeltme (Turkish) or # セッション: 2026-01-15 - テスト (Japanese) instead of # Session: 2026-01-15 - Fix API.

    By default, ctx only recognizes Session: as a session header prefix. Files with other prefixes are silently skipped during journal import and journal generation: They look like regular Markdown, not sessions.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#tldr","level":2,"title":"TL;DR","text":"

    Add recognized prefixes to .ctxrc:

    session_prefixes:\n  - \"Session:\"      # English (include to keep default)\n  - \"Oturum:\"       # Turkish\n  - \"セッション:\"     # Japanese\n

    Restart your session. All configured prefixes are now recognized.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#how-it-works","level":2,"title":"How It Works","text":"

    The Markdown session parser detects session files by looking for an H1 header that starts with a known prefix followed by a date:

    # Session: 2026-01-15 - Fix API Rate Limiting\n# Oturum: 2026-01-15 - API Düzeltme\n# セッション: 2026-01-15 - テスト\n

    The list of recognized prefixes comes from session_prefixes in .ctxrc. When the key is absent or empty, ctx falls back to the built-in default: [\"Session:\"].

    Date-only headers (# 2026-01-15 - Morning Work) are always recognized regardless of prefix configuration.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#configuration","level":2,"title":"Configuration","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#adding-a-language","level":3,"title":"Adding a Language","text":"

    Add the prefix with a trailing colon to your .ctxrc:

    session_prefixes:\n  - \"Session:\"\n  - \"Sesión:\"       # Spanish\n

    Include Session: Explicitly

    When you override session_prefixes, the default is replaced, not extended. If you still want English headers recognized, include \"Session:\" in your list.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#team-setup","level":3,"title":"Team Setup","text":"

    Commit .ctxrc to the repo so all team members share the same prefix list. This ensures ctx journal import and journal generation pick up sessions from all team members regardless of language.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#common-prefixes","level":3,"title":"Common Prefixes","text":"Language Prefix English Session: Turkish Oturum: Spanish Sesión: French Session: German Sitzung: Japanese セッション: Korean 세션: Portuguese Sessão: Chinese 会话:","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#verifying","level":3,"title":"Verifying","text":"

    After configuring, test with ctx journal source. Sessions with the new prefixes should appear in the output.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#what-this-does-not-do","level":2,"title":"What This Does NOT Do","text":"
    • Change the interface language: ctx output is always English. This setting only controls which session files ctx can parse.
    • Generate headers: ctx never writes session headers. The prefix list is recognition-only (input, not output).
    • Affect JSONL sessions: Claude Code JSONL transcripts don't use header prefixes. This only applies to Markdown session files in .context/sessions/.
    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#see-also","level":2,"title":"See Also","text":"

    See also: Setup Across AI Tools - complete multi-tool setup including Markdown session configuration.

    See also: CLI Reference - full .ctxrc field reference including session_prefixes.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/parallel-worktrees/","level":1,"title":"Parallel Agent Development with Git Worktrees","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-problem","level":2,"title":"The Problem","text":"

    You have a large backlog (10, 20, 30 open tasks) and many of them are independent: docs work that doesn't touch Go code, a new package that doesn't overlap with existing ones, test coverage for a stable module.

    Running one agent at a time means serial execution. You want 3-4 agents working in parallel, each on its own track, without stepping on each other's files.

    Git worktrees solve this.

    Each worktree is a separate working directory with its own branch, but they share the same .git object database. Combined with ctx's persistent context, each agent session picks up the full project state and works independently.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-worktree                                   # 1. group tasks by file overlap\ngit worktree add ../myproject-docs -b work/docs # 2. create worktrees\ncd ../myproject-docs && claude                  # 3. launch agents (one per track)\n/ctx-worktree teardown docs                     # 4. merge back and clean up\n

    TASKS.md will conflict on merge: Accept all [x] completions from both sides.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-worktree Skill Create, list, and tear down worktrees /ctx-next Skill Pick tasks from the backlog for each track git worktree Command Underlying git worktree management git merge Command Merge completed tracks back to main","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-1-assess-the-backlog","level":3,"title":"Step 1: Assess the Backlog","text":"

    Start in your main checkout. Ask the agent to analyze your tasks and group them by blast radius: which files and directories each task touches.

    /ctx-worktree\nLook at TASKS.md and group the pending tasks into 2-3 independent\ntracks based on which files they'd touch. Show me the grouping\nbefore creating anything.\n

    The agent reads TASKS.md, estimates file overlap, and proposes groups:

    Proposed worktree groups:\n\n  work/docs   # recipe updates, blog post (touches: docs/)\n  work/crypto # scratchpad encryption infra (touches: internal/crypto/)\n  work/tests  # journal test coverage (touches: internal/cli/journal/)\n
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-2-create-the-worktrees","level":3,"title":"Step 2: Create the Worktrees","text":"

    Once you approve the grouping, the agent creates worktrees as sibling directories:

    Create the worktrees for those three groups.\n

    Behind the scenes:

    git worktree add ../myproject-docs -b work/docs\ngit worktree add ../myproject-crypto -b work/crypto\ngit worktree add ../myproject-tests -b work/tests\n

    Each worktree is a full working copy on its own branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-3-launch-agents","level":3,"title":"Step 3: Launch Agents","text":"

    Open a separate terminal (or editor window) for each worktree and start a Claude Code session:

    # Terminal 1\ncd ../myproject-docs\nclaude\n\n# Terminal 2\ncd ../myproject-crypto\nclaude\n\n# Terminal 3\ncd ../myproject-tests\nclaude\n

    Each agent sees the full project, including .context/, and can work independently.

    Do Not Initialize Context in Worktrees

    Do not run ctx init in worktrees: The .context directory is already tracked in git.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-4-work","level":3,"title":"Step 4: Work","text":"

    Each agent works through its assigned tasks. They can read TASKS.md to know what's assigned to their track, use /ctx-next to pick the next item, and commit normally on their work/* branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-5-merge-back","level":3,"title":"Step 5: Merge Back","text":"

    As each track finishes, return to the main checkout and merge:

    /ctx-worktree teardown docs\n

    The agent checks for uncommitted changes, merges work/docs into your current branch, removes the worktree, and deletes the branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-6-handle-tasksmd-conflicts","level":3,"title":"Step 6: Handle TASKS.md Conflicts","text":"

    TASKS.md will almost always conflict when merging: Multiple agents will mark different tasks as [x]. This is expected and easy to resolve:

    Accept all completions from both sides. No task should go from [x] back to [ ]. The merge resolution is always additive.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-7-cleanup","level":3,"title":"Step 7: Cleanup","text":"

    After all tracks are merged, verify everything is clean:

    /ctx-worktree list\n

    Should show only the main working tree. All work/* branches should be gone.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't have to use the skill directly for every step. These natural prompts work:

    • \"I have a big backlog. Can we split it across worktrees?\"
    • \"Which of these tasks can run in parallel without conflicts?\"
    • \"Merge the docs track back in.\"
    • \"Clean up all the worktrees, we're done.\"
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#what-works-differently-in-worktrees","level":2,"title":"What Works Differently in Worktrees","text":"

    The encryption key lives at ~/.ctx/.ctx.key (user-level, outside the project). Because all worktrees on the same machine share this path, ctx pad and ctx hook notify work in worktrees automatically - no special setup needed.

    One thing to watch:

    • Journal enrichment: ctx journal import and ctx journal enrich write files relative to the current working directory. Enrichments created in a worktree stay there and are discarded on teardown. Enrich journals on the main branch after merging: the JSONL session logs are always intact, and you don't lose any data.

    Context Files Will Merge Just Fine

    Tracked context files (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) work normally; git handles them.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tips","level":2,"title":"Tips","text":"
    • 3-4 worktrees max. Beyond that, merge complexity outweighs the parallelism benefit. The skill enforces this limit.
    • Group by package or directory, not by priority. Two high-priority tasks that touch the same files must be in the same track.
    • TASKS.md will conflict on merge. This is normal. Accept all [x] completions: The resolution is always additive.
    • Don't run ctx init in worktrees. The .context/ directory is tracked in git. Running init overwrites shared context files.
    • Name worktrees by concern, not by number. work/docs and work/crypto are more useful than work/track-1 and work/track-2.
    • Commit frequently in each worktree. Smaller commits make merge conflicts easier to resolve.
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#next-up","level":2,"title":"Next Up","text":"

    Back to the beginning: Guide Your Agent →

    Or explore the full recipe list.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#see-also","level":2,"title":"See Also","text":"
    • Running an Unattended AI Agent: for serial autonomous loops instead of parallel tracks
    • Tracking Work Across Sessions: managing the task backlog that feeds into parallelization
    • The Complete Session: the complete session workflow end-to-end, with examples
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/permission-snapshots/","level":1,"title":"Permission Snapshots","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code's .claude/settings.local.json accumulates one-off permissions every time you click \"Allow\". After busy sessions the file is full of session-specific entries that expand the agent's surface area beyond intent.

    Since settings.local.json is .gitignored, there is no PR review or CI check. The file drifts independently on every machine, and there is no built-in way to reset to a known-good state.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-permission-sanitize               # audit for dangerous patterns\nctx permission snapshot            # save golden image\n# ... sessions accumulate cruft ...\nctx permission restore             # reset to golden state\n
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-solution","level":2,"title":"The Solution","text":"

    Save a curated settings.local.json as a golden image, then restore from it to drop session-accumulated permissions. The golden file (.claude/settings.golden.json) is committed to version control and shared with the team.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx permission snapshot Save settings.local.json as golden image ctx permission restore Reset settings.local.json from golden image /ctx-permission-sanitize Audit for dangerous patterns before snapshotting","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#step-by-step","level":2,"title":"Step by Step","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#1-curate-your-permissions","level":3,"title":"1. Curate Your Permissions","text":"

    Start with a clean settings.local.json. Optionally run /ctx-permission-sanitize to remove dangerous patterns first.

    Review the file manually. Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

    See the Permission Hygiene recipe for recommended defaults.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#2-take-a-snapshot","level":3,"title":"2. Take a Snapshot","text":"
    ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n

    This creates a byte-for-byte copy. No re-encoding, no indent changes.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#3-commit-the-golden-file","level":3,"title":"3. Commit the Golden File","text":"
    git add .claude/settings.golden.json\ngit commit -m \"Add permission golden image\"\n

    The golden file is not gitignored (unlike settings.local.json). This is intentional: it becomes a team-shared baseline.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#4-auto-restore-at-the-session-start","level":3,"title":"4. Auto-Restore at the Session Start","text":"

    Add this instruction to your CLAUDE.md:

    ## On Session Start\n\nRun `ctx permission restore` to reset permissions to the golden image.\n

    The agent will restore the golden image at the start of every session, automatically dropping any permissions accumulated during previous sessions.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#5-update-when-intentional-changes-are-made","level":3,"title":"5. Update When Intentional Changes Are Made","text":"

    When you add a new permanent permission (not a one-off debugging entry):

    # Edit settings.local.json with the new permission\n# Then update the golden image:\nctx permission snapshot\ngit add .claude/settings.golden.json\ngit commit -m \"Update permission golden image: add cargo test\"\n
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember exact commands. These natural-language prompts work with agents trained on the ctx playbook:

    What you say What happens \"Save my current permissions as baseline\" Agent runs ctx permission snapshot \"Reset permissions to the golden image\" Agent runs ctx permission restore \"Clean up my permissions\" Agent runs /ctx-permission-sanitize then snapshot \"What permissions did I accumulate?\" Agent diffs local vs golden","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#next-up","level":2,"title":"Next Up","text":"

    Turning Activity into Content →: Generate blog posts, changelogs, and journal sites from your project activity.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#see-also","level":2,"title":"See Also","text":"
    • Permission Hygiene: recommended defaults and maintenance workflow
    • CLI Reference: ctx permission: full command documentation
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/publishing/","level":1,"title":"Turning Activity into Content","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-problem","level":2,"title":"The Problem","text":"

    Your .context/ directory is full of decisions, learnings, and session history.

    Your git log tells the story of a project evolving.

    But none of this is visible to anyone outside your terminal.

    You want to turn this raw activity into:

    • a browsable journal site,
    • blog posts,
    • changelog posts.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tldr","level":2,"title":"TL;DR","text":"
    ctx journal import --all             # 1. import sessions to markdown\n\n/ctx-journal-enrich-all             # 2. add metadata and tags\n\nctx journal site --serve            # 3. build and serve the journal\n\n/ctx-blog about the caching layer   # 4. draft a blog post\n/ctx-blog-changelog v0.1.0 \"v0.2\"   # 5. write a changelog post\n

    Read on for details on each stage.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal import Command Import session JSONL to editable markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx serve Command Serve any zensical directory (default: journal) ctx site feed Command Generate Atom feed from finalized blog posts make journal Makefile Shortcut for import + site rebuild /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich (recommended) /ctx-journal-enrich Skill Add metadata, summaries, and tags to one entry /ctx-blog Skill Draft a blog post from recent project activity /ctx-blog-changelog Skill Write a themed post from a commit range","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-1-import-sessions-to-markdown","level":3,"title":"Step 1: Import Sessions to Markdown","text":"

    Raw session data lives as JSONL files in Claude Code's internal storage. The first step is converting these into readable, editable markdown.

    # Import all sessions from the current project\nctx journal import --all\n\n# Import from all projects (if you work across multiple repos)\nctx journal import --all --all-projects\n\n# Import a single session by ID or slug\nctx journal import abc123\nctx journal import gleaming-wobbling-sutherland\n

    Imported files land in .context/journal/ as individual Markdown files with session metadata and the full conversation transcript.

    --all is safe by default: Only new sessions are imported. Existing files are skipped. Use --regenerate to re-import existing files (YAML frontmatter is preserved). Use --regenerate --keep-frontmatter=false -y to regenerate everything including frontmatter.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-2-enrich-entries-with-metadata","level":3,"title":"Step 2: Enrich Entries with Metadata","text":"

    Raw entries have timestamps and conversations but lack the structured metadata that makes a journal searchable. Use /ctx-journal-enrich-all to process your entire backlog at once:

    /ctx-journal-enrich-all\n

    The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

    For large backlogs (20+ entries), it can spawn subagents to process entries in parallel.

    To enrich a single entry instead:

    /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich 2026-01-24\n

    After enrichment, an entry gains YAML frontmatter:

    ---\ntitle: \"Implement Redis caching for API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n

    This metadata powers better navigation in the journal site:

    • titles replace slugs,
    • summaries appear in the index,
    • and search covers topics and technologies.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-3-generate-the-journal-site","level":3,"title":"Step 3: Generate the Journal Site","text":"

    With entries exported and enriched, generate the static site:

    # Generate site files\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally (opens at http://localhost:8000)\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

    The site is generated in .context/journal-site/ by default. It uses zensical for static site generation (pipx install zensical).

    Or use the Makefile shortcut that combines export and rebuild:

    make journal\n

    This runs ctx journal import --all followed by ctx journal site --build, then reminds you to enrich before rebuilding. To serve the built site, use make journal-serve or ctx serve (serve-only, no regeneration).

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#alternative-export-to-obsidian-vault","level":3,"title":"Alternative: Export to Obsidian Vault","text":"

    If you use Obsidian for knowledge management, generate a vault instead of (or alongside) the static site:

    ctx journal obsidian\nctx journal obsidian --output ~/vaults/ctx-journal\n

    This produces an Obsidian-ready directory with wikilinks, MOC (Map of Content) pages for topics/files/types, and a \"Related Sessions\" footer on each entry for graph connectivity. Open the output directory in Obsidian as a vault.

    The vault uses the same enriched source entries as the static site. Both outputs can coexist: The static site goes to .context/journal-site/, the vault to .context/journal-obsidian/.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-4-draft-blog-posts-from-activity","level":3,"title":"Step 4: Draft Blog Posts from Activity","text":"

    When your project reaches a milestone worth sharing, use /ctx-blog to draft a post from recent activity. The skill gathers context from multiple sources: git log, DECISIONS.md, LEARNINGS.md, completed tasks, and journal entries.

    /ctx-blog about the caching layer we just built\n/ctx-blog last week's refactoring work\n/ctx-blog lessons learned from the migration\n

    The skill gathers recent commits, decisions, and learnings; identifies a narrative arc; drafts an outline for approval; writes the full post; and saves it to docs/blog/YYYY-MM-DD-slug.md.

    Posts are written in first person with code snippets, commit references, and an honest discussion of what went wrong.

    The Output Is zensical-Flavored Markdown

    The blog skills produce Markdown tuned for a zensical site: topics: frontmatter (zensical's tag field), a docs/blog/ output path, and a banner image reference.

    The content is still standard Markdown and can be adapted to other static site generators, but the defaults assume a zensical project structure.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-5-write-changelog-posts-from-commit-ranges","level":3,"title":"Step 5: Write Changelog Posts from Commit Ranges","text":"

    For release notes or \"what changed\" posts, /ctx-blog-changelog takes a starting commit and a theme, then analyzes everything that changed:

    /ctx-blog-changelog 040ce99 \"building the journal system\"\n/ctx-blog-changelog HEAD~30 \"what's new in v0.2.0\"\n/ctx-blog-changelog v0.1.0 \"the road to v0.2.0\"\n

    The skill diffs the commit range, identifies the most-changed files, and constructs a narrative organized by theme rather than chronology, including a key commits table and before/after comparisons.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-6-generate-the-blog-feed","level":3,"title":"Step 6: Generate the Blog Feed","text":"

    After publishing blog posts, generate the Atom feed so readers and automation can discover new content:

    ctx site feed\n

    This scans docs/blog/ for finalized posts (reviewed_and_finalized: true), extracts title, date, author, topics, and summary, and writes a valid Atom 1.0 feed to site/feed.xml. The feed is also generated automatically as part of make site.

    The feed is available at ctx.ist/feed.xml.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-conversational-approach","level":2,"title":"The Conversational Approach","text":"

    You can also drive your publishing anytime with natural language:

    \"write about what we did this week\"\n\"turn today's session into a blog post\"\n\"make a changelog post covering everything since the last release\"\n\"enrich the last few journal entries\"\n

    The agent has full visibility into your .context/ state (tasks completed, decisions recorded, learnings captured), so its suggestions are grounded in what actually happened.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    The full pipeline from raw transcripts to published content:

    # 1. Import all sessions\nctx journal import --all\n\n# 2. In Claude Code: enrich all entries with metadata\n/ctx-journal-enrich-all\n\n# 3. Build and serve the journal site\nmake journal\nmake journal-serve\n\n# 3b. Or generate an Obsidian vault\nctx journal obsidian\n\n# 4. In Claude Code: draft a blog post\n/ctx-blog about the features we shipped this week\n\n# 5. In Claude Code: write a changelog post\n/ctx-blog-changelog v0.1.0 \"what's new in v0.2.0\"\n

    The journal pipeline is idempotent at every stage. You can rerun ctx journal import --all without losing enrichment. You can rebuild the site as many times as you want.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tips","level":2,"title":"Tips","text":"
    • Import regularly. Run ctx journal import --all after each session to keep your journal current. Only new sessions are imported: Existing files are skipped by default.
    • Use batch enrichment. /ctx-journal-enrich-all filters noise (suggestion sessions, trivial sessions, multipart continuations) so you do not have to decide what is worth enriching.
    • Keep journal files in .gitignore. Session journals can contain sensitive data: file contents, commands, internal discussions, and error messages with stack traces. Add .context/journal/ and .context/journal-site/ to .gitignore.
    • Use /ctx-blog for narrative posts and /ctx-blog-changelog for release posts. One finds a story in recent activity, the other explains a commit range by theme.
    • Edit the drafts. These skills produce drafts, not final posts. Review the narrative, add your perspective, and remove anything that does not serve the reader.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#next-up","level":2,"title":"Next Up","text":"

    Running an Unattended AI Agent →: Set up an AI agent that works through tasks overnight without you at the keyboard.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#see-also","level":2,"title":"See Also","text":"
    • Session Journal: journal system, enrichment schema
    • CLI Reference: ctx journal: import, list, show session history
    • CLI Reference: ctx journal site: static site generation
    • CLI Reference: ctx journal obsidian: Obsidian vault export
    • CLI Reference: ctx serve: serve-only (no regeneration)
    • Browsing and Enriching Past Sessions: journal browsing workflow
    • The Complete Session: capturing context during a session
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/scratchpad-sync/","level":1,"title":"Syncing Scratchpad Notes Across Machines","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-problem","level":2,"title":"The Problem","text":"

    You work from multiple machines: a desktop and a laptop, or a local machine and a remote dev server.

    The scratchpad entries are encrypted. The ciphertext (.context/scratchpad.enc) travels with git, but the encryption key lives outside the project at ~/.ctx/.ctx.key and is never committed. Without the key on each machine, you cannot read or write entries.

    How do you distribute the key and keep the scratchpad in sync?

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                                                  # 1. generates key\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key        # 2. copy key\nchmod 600 ~/.ctx/.ctx.key                                 # 3. secure it\n# Normal git push/pull syncs the encrypted scratchpad.enc\n# On conflict: ctx pad resolve → rebuild → git add + commit\n

    Finding Your Key File

    The key is always at ~/.ctx/.ctx.key - one key, one machine.

    Treat the Key like a Password

    The scratchpad key is the only thing protecting your encrypted entries.

    Store a backup in a secure enclave such as a password manager, and treat it with the same care you would give passwords, certificates, or API tokens.

    Anyone with the key can decrypt every scratchpad entry.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context (generates the key automatically) ctx pad add CLI command Add a scratchpad entry ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad edit CLI command Edit a scratchpad entry ctx pad resolve CLI command Show both sides of a merge conflict ctx pad merge CLI command Merge entries from other scratchpad files ctx pad import CLI command Bulk-import lines from a file ctx pad export CLI command Export blob entries to a directory scp Shell Copy the key file between machines git push / git pull Shell Sync the encrypted file via git/ctx-pad Skill Natural language interface to pad commands","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-1-initialize-on-machine-a","level":3,"title":"Step 1: Initialize on Machine A","text":"

    Run ctx init on your first machine. The key is created automatically at ~/.ctx/.ctx.key:

    ctx init\n# ...\n# Created ~/.ctx/.ctx.key (0600)\n# Created .context/scratchpad.enc\n

    The key lives outside the project directory and is never committed. The .enc file is tracked in git.

    Key Folder Change (v0.7.0+)

    If you built ctx from source or upgraded past v0.6.0, the key location changed to ~/.ctx/.ctx.key. Check these legacy folders and copy your key manually:

    # Old locations (pick whichever exists)\nls ~/.local/ctx/keys/        # pre-v0.7.0 user-level\nls .context/.ctx.key         # pre-v0.6.0 project-local\n\n# Copy to the new location\nmkdir -p ~/.ctx && chmod 700 ~/.ctx\ncp <old-key-path> ~/.ctx/.ctx.key\nchmod 600 ~/.ctx/.ctx.key\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-2-copy-the-key-to-machine-b","level":3,"title":"Step 2: Copy the Key to Machine B","text":"

    Use any secure transfer method. The key is always at ~/.ctx/.ctx.key:

    # scp - create the target directory first\nssh user@machine-b \"mkdir -p ~/.ctx && chmod 700 ~/.ctx\"\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key\n\n# Or use a password manager, USB drive, etc.\n

    Set permissions on Machine B:

    chmod 600 ~/.ctx/.ctx.key\n

    Secure the Transfer

    The key is a raw 256-bit AES key. Anyone with the key can decrypt the scratchpad. Use an encrypted channel (SSH, password manager, vault).

    Never paste it in plaintext over email or chat.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-3-normal-pushpull-workflow","level":3,"title":"Step 3: Normal Push/Pull Workflow","text":"

    The encrypted file is committed, so standard git sync works:

    # Machine A: add entries and push\nctx pad add \"staging API key: sk-test-abc123\"\ngit add .context/scratchpad.enc\ngit commit -m \"Update scratchpad\"\ngit push\n\n# Machine B: pull and read\ngit pull\nctx pad\n#   1. staging API key: sk-test-abc123\n

    Both machines have the same key, so both can decrypt the same .enc file.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-4-read-and-write-from-either-machine","level":3,"title":"Step 4: Read and Write from Either Machine","text":"

    Once the key is distributed, all ctx pad commands work identically on both machines. Entries added on Machine A are visible on Machine B after a git pull, and vice versa.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-5-handle-merge-conflicts","level":3,"title":"Step 5: Handle Merge Conflicts","text":"

    If both machines add entries between syncs, pulling will create a merge conflict on .context/scratchpad.enc. Git cannot merge binary (encrypted) content automatically.

    The fastest approach is ctx pad merge: It reads both conflict sides, deduplicates, and writes the union:

    # Extract theirs to a temp file, then merge it in\ngit show :3:.context/scratchpad.enc > /tmp/theirs.enc\ngit checkout --ours .context/scratchpad.enc\nctx pad merge /tmp/theirs.enc\n\n# Done: Commit the resolved scratchpad:\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n

    Alternatively, use ctx pad resolve to inspect both sides manually:

    ctx pad resolve\n# === Ours (this machine) ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n#\n# === Theirs (incoming) ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n

    Then reconstruct the merged scratchpad:

    # Start fresh with all entries from both sides\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\n# Mark the conflict resolved\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#merge-conflict-walkthrough","level":2,"title":"Merge Conflict Walkthrough","text":"

    Here's a full scenario showing how conflicts arise and how to resolve them:

    1. Both machines start in sync (1 entry):

    Machine A: 1. staging API key: sk-test-abc123\nMachine B: 1. staging API key: sk-test-abc123\n

    2. Both add entries independently:

    Machine A adds: \"check DNS after deploy\"\nMachine B adds: \"new endpoint: api.example.com/v2\"\n

    3. Machine A pushes first. Machine B pulls and gets a conflict:

    git pull\n# CONFLICT (content): Merge conflict in .context/scratchpad.enc\n

    4. Machine B runs ctx pad resolve:

    ctx pad resolve\n# === Ours ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n#\n# === Theirs ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n

    5. Rebuild with entries from both sides and commit:

    # Clear and rebuild (or use the skill to guide you)\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\ngit add .context/scratchpad.enc\ngit commit -m \"Merge scratchpad: keep entries from both machines\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#conversational-approach","level":3,"title":"Conversational Approach","text":"

    When working with an AI assistant, you can resolve conflicts naturally:

    You: \"I have a scratchpad merge conflict. Can you resolve it?\"\n\nAgent: \"Let me extract theirs and merge it in.\"\n       [runs git show :3:.context/scratchpad.enc > /tmp/theirs.enc]\n       [runs git checkout --ours .context/scratchpad.enc]\n       [runs ctx pad merge /tmp/theirs.enc]\n       \"Merged 2 new entries (1 duplicate skipped). Want me to\n       commit the resolution?\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tips","level":2,"title":"Tips","text":"
    • Back up the key: If you lose it, you lose access to all encrypted entries. Store a copy in your password manager.
    • One key per project: Each ctx init generates a unique key. Don't reuse keys across projects.
    • Keys work in worktrees: Because the key lives at ~/.ctx/.ctx.key (outside the project), git worktrees on the same machine share the key automatically. No special setup needed.
    • Plaintext fallback for non-sensitive projects: If encryption adds friction and you have nothing sensitive, set scratchpad_encrypt: false in .ctxrc. Merge conflicts become trivial text merges.
    • Never commit the key: The key is stored outside the project at ~/.ctx/.ctx.key and should never be copied into the repository.
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#next-up","level":2,"title":"Next Up","text":"

    Hook Output Patterns →: Choose the right output pattern for your Claude Code hooks.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#see-also","level":2,"title":"See Also","text":"
    • Scratchpad: feature overview, all commands, when to use scratchpad vs context files
    • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-with-claude/","level":1,"title":"Using the Scratchpad","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-problem","level":2,"title":"The Problem","text":"

    During a session you accumulate quick notes, reminders, intermediate values, and sometimes sensitive tokens. They don't fit TASKS.md (not work items) or DECISIONS.md (not decisions). They don't have the structured fields that LEARNINGS.md requires.

    Without somewhere to put them, they get lost between sessions.

    How do you capture working memory that persists across sessions without polluting your structured context files?

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tldr","level":2,"title":"TL;DR","text":"
    ctx pad add \"check DNS propagation after deploy\"\nctx pad         # list entries\nctx pad show 1  # print entry (pipe-friendly)\n

    Entries are encrypted at rest and travel with git.

    Use the /ctx-pad skill to manage entries from inside your AI session.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx pad CLI command List all scratchpad entries ctx pad show N CLI command Output raw text of entry N (pipe-friendly) ctx pad add CLI command Add a new entry ctx pad edit CLI command Replace, append to, or prepend to an entry ctx pad add --file CLI command Ingest a file as a blob entry ctx pad show N --out CLI command Extract a blob entry to a file ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad normalize CLI command Reassign entry IDs as 1..N ctx pad mv CLI command Reorder entries ctx pad --tag CLI command Filter entries by tag ctx pad tags CLI command List all tags with counts ctx pad import CLI command Bulk-import lines from a file (or stdin) ctx pad export CLI command Export all blob entries to a directory /ctx-pad Skill Natural language interface to all pad commands","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-1-add-a-note-naturally","level":3,"title":"Step 1: Add a Note Naturally","text":"

    You don't need to remember any syntax. Just tell your assistant what to jot down:

    You: \"jot down: check DNS propagation after deploy\"\n\nAgent: \"Added to scratchpad:\n  1. check DNS propagation after deploy\"\n

    Behind the scenes, the agent runs ctx pad add \"check DNS propagation after deploy\".

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-2-view-your-scratchpad","level":3,"title":"Step 2: View Your Scratchpad","text":"
    You: \"show my scratchpad\"\n\nAgent: \"Your scratchpad has 3 entries:\n  1. check DNS propagation after deploy\n  2. staging API endpoint: api.staging.example.com/v2\n  3. retry limit should be 5, not 3\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-3-edit-an-entry","level":3,"title":"Step 3: Edit an Entry","text":"
    You: \"update entry 2 to include the new endpoint\"\n\nAgent: \"Updated entry 2:\n  was: staging API endpoint: api.staging.example.com/v2\n  now: staging API endpoint: api.prod.example.com/v2\"\n

    For partial edits, the agent can use --append or --prepend:

    You: \"append the port number to entry 2\"\n\nAgent: \"Updated entry 2:\n  staging API endpoint: api.prod.example.com/v2 :8443\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-4-compose-entries","level":3,"title":"Step 4: Compose Entries","text":"

    Use ctx pad show N to pipe entry content into other operations:

    You: \"append the content of entry 3 to entry 1\"\n\nAgent: [runs ctx pad edit 1 --append \"$(ctx pad show 3)\"]\n       \"Done. Entry 1 is now:\n        check DNS propagation after deploy retry limit should be 5, not 3\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-5-reorder-by-priority","level":3,"title":"Step 5: Reorder by Priority","text":"
    You: \"move entry 4 to the top\"\n\nAgent: [runs ctx pad mv 4 1]\n       \"Moved entry 4 to position 1. Scratchpad reordered.\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-6-clean-up","level":3,"title":"Step 6: Clean Up","text":"
    You: \"remove entry 2, it's done\"\n\nAgent: [runs ctx pad rm 2]\n       \"Removed entry 2. 3 entries remaining.\n        (IDs are stable — remaining entries keep their IDs.)\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-7-store-a-file-as-a-blob","level":3,"title":"Step 7: Store a File as a Blob","text":"

    The scratchpad can hold small files (up to 64 KB) as encrypted blob entries. The file is base64-encoded and stored alongside a label you provide:

    # Ingest a file: the first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# List shows the label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-8-extract-a-blob","level":3,"title":"Step 8: Extract a Blob","text":"

    Use show --out to write the decoded file back to disk:

    # Write blob entry to a file\nctx pad show 2 --out ./recovered-deploy.yaml\n\n# Or print to stdout (for piping)\nctx pad show 2 | head -5\n

    Blob entries are encrypted identically to text entries: They're just base64-encoded before encryption. The --out flag decodes and writes the raw bytes.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-9-bulk-import-notes","level":3,"title":"Step 9: Bulk Import Notes","text":"

    When you have a file with many notes (one per line), import them in bulk instead of adding one at a time:

    # Import from a file: Each non-empty line becomes an entry\nctx pad import notes.txt\n\n# Or pipe from stdin\ngrep TODO *.go | ctx pad import -\n

    All entries are written in a single encrypt/write cycle, regardless of how many lines the file contains.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-10-export-blobs-to-disk","level":3,"title":"Step 10: Export Blobs to Disk","text":"

    Export all blob entries to a directory as individual files. Each blob's label becomes the filename:

    # Export to a directory (created if needed)\nctx pad export ./ideas\n\n# Preview what would be exported\nctx pad export --dry-run ./ideas\n\n# Force overwrite existing files\nctx pad export --force ./backup\n

    When a file already exists, a unix timestamp is prepended to the filename to avoid collisions. Use --force to overwrite instead.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-11-tag-entries-for-organization","level":3,"title":"Step 11: Tag Entries for Organization","text":"

    Tags let you categorize entries without any structure beyond a #word token in the text. Add them when creating or editing entries:

    You: \"jot down: check DNS propagation #later\"\nYou: \"tag entry 2 as urgent\"\n\nAgent: [runs ctx pad edit 2 --tag urgent]\n       \"Updated entry 2.\"\n

    Filter your scratchpad by tag:

    You: \"show me everything tagged later\"\n\nAgent: [runs ctx pad --tag later]\n       \"  1. check DNS propagation #later\n        3. review PR feedback #later #ci\"\n

    Entry IDs are stable — they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry regardless of deletions or active filters. Use ctx pad normalize to reassign IDs as 1..N.

    Exclude a tag with ~:

    ctx pad --tag ~later         # everything NOT tagged #later\nctx pad --tag later --tag ci # entries with BOTH tags (AND logic)\n

    See what tags you're using:

    You: \"what tags do I have?\"\n\nAgent: [runs ctx pad tags]\n       \"ci       1\n        later    2\n        urgent   1\"\n

    Tags work on blob entries too — they're extracted from the label:

    ctx pad add \"deploy config #prod\" --file ./deploy.yaml\nctx pad --tag prod\n#   1. deploy config #prod [BLOB]\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#using-ctx-pad-in-a-session","level":2,"title":"Using /ctx-pad in a Session","text":"

    Invoke the /ctx-pad skill first, then describe what you want in natural language. Without the skill prefix, the agent may route your request to TASKS.md or another context file instead of the scratchpad.

    You: /ctx-pad jot down: check DNS after deploy\nYou: /ctx-pad show my scratchpad\nYou: /ctx-pad delete entry 3\n

    Once the skill is active, it translates intent into commands:

    You say (after /ctx-pad) What the agent does \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"remember this: retry limit is 5\" ctx pad add \"retry limit is 5\" \"show my scratchpad\" / \"what's on my pad\" ctx pad \"show me entry 3\" ctx pad show 3 \"delete the third one\" / \"remove entry 3\" ctx pad rm 3 \"remove entries 3 through 5\" ctx pad rm 3-5 \"renumber my scratchpad\" ctx pad normalize \"change entry 2 to ...\" ctx pad edit 2 \"new text\" \"append ' +important' to entry 3\" ctx pad edit 3 --append \" +important\" \"prepend 'URGENT:' to entry 1\" ctx pad edit 1 --prepend \"URGENT: \" \"prioritize entry 4\" / \"move to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./ideas\" ctx pad export ./ideas \"show entries tagged later\" ctx pad --tag later \"show everything except later\" ctx pad --tag ~later \"what tags do I have\" ctx pad tags \"tag entry 5 as urgent\" ctx pad edit 5 --tag urgent

    When in Doubt, Use the CLI Directly

    The ctx pad commands work the same whether you run them yourself or let the skill invoke them.

    If the agent misroutes a request, fall back to ctx pad add \"...\" in your terminal.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#when-to-use-scratchpad-vs-context-files","level":2,"title":"When to Use Scratchpad vs Context Files","text":"Situation Use Temporary reminders (\"check X after deploy\") Scratchpad Session-start reminders (\"remind me next session\") ctx remind Working values during debugging (ports, endpoints, counts) Scratchpad Sensitive tokens or API keys (short-term storage) Scratchpad Quick notes that don't fit anywhere else Scratchpad Work items with completion tracking TASKS.md Trade-offs between alternatives with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

    Decision Guide

    • If it has structured fields (context, rationale, lesson, application), it belongs in a context file like DECISIONS.md or LEARNINGS.md.
    • If it's a work item you'll mark done, it belongs in TASKS.md.
    • If you want a message relayed VERBATIM at the next session start, it belongs in ctx remind.
    • If it's a quick note, reminder, or working value (especially if it's sensitive or ephemeral) it belongs on the scratchpad.

    Scratchpad Is Not a Junk Drawer

    The scratchpad is for working memory, not long-term storage.

    If a note is still relevant after several sessions, promote it:

    A persistent reminder becomes a task, a recurring value becomes a convention, a hard-won insight becomes a learning.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tips","level":2,"title":"Tips","text":"
    • Entries persist across sessions: The scratchpad is committed (encrypted) to git, so entries survive session boundaries. Pick up where you left off.
    • Entries are numbered and reorderable: Use ctx pad mv to put high-priority items at the top.
    • ctx pad show N enables unix piping: Output raw entry text with no numbering prefix. Compose with --append, --prepend, or other shell tools.
    • Never mention the key file contents to the AI: The agent knows how to use ctx pad commands but should never read or print the encryption key (~/.ctx/.ctx.key) directly.
    • Encryption is transparent: You interact with plaintext; the encryption/decryption happens automatically on every read/write.
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#next-up","level":2,"title":"Next Up","text":"

    Syncing Scratchpad Notes Across Machines →: Distribute encryption keys and scratchpad data across environments.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#see-also","level":2,"title":"See Also","text":"
    • Scratchpad: feature overview, all commands, encryption details, plaintext override
    • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
    • The Complete Session: full session lifecycle showing how the scratchpad fits into the broader workflow
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/session-archaeology/","level":1,"title":"Browsing and Enriching Past Sessions","text":"","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-problem","level":2,"title":"The Problem","text":"

    After weeks of AI-assisted development you have dozens of sessions scattered across JSONL files in ~/.claude/projects/. Finding the session where you debugged the Redis connection pool, or remembering what you decided about the caching strategy three Tuesdays ago, often means grepping raw JSON.

    There is no table of contents, no search, and no summaries.

    This recipe shows how to turn that raw session history into a browsable, searchable, and enriched journal site you can navigate in your browser.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tldr","level":2,"title":"TL;DR","text":"

    Export and Generate

    ctx journal import --all\nctx journal site --serve\n

    Enrich

    /ctx-journal-enrich-all\n

    Rebuild

    ctx journal site --serve\n

    Read on for what each stage does and why.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal source Command List parsed sessions with metadata ctx journal source --show Command Inspect a specific session in detail ctx journal import Command Import sessions to editable journal Markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx journal schema check Command Validate JSONL files and report schema drift ctx journal schema dump Command Print the embedded JSONL schema definition ctx serve Command Serve any zensical directory (default: journal) /ctx-history Skill Browse sessions inside your AI assistant /ctx-journal-enrich Skill Add frontmatter metadata to a single entry /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-workflow","level":2,"title":"The Workflow","text":"

    The session journal follows a four-stage pipeline.

    Each stage is idempotent and safe to re-run:

    By default, each stage skips entries that have already been processed.

    import -> enrich -> rebuild\n
    Stage Tool What it does Skips if Where Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) CLI or agent Enrich /ctx-journal-enrich-all Adds frontmatter, summaries, topic tags Frontmatter already present Agent only Rebuild ctx journal site --build Generates browsable static HTML N/A CLI only Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks N/A CLI only

    Where Do You Run Each Stage?

    Import (Steps 1 to 3) works equally well from the terminal or inside your AI assistant via /ctx-history. The CLI is fine here: the agent adds no special intelligence, it just runs the same command.

    Enrich (Step 4) requires the agent: it reads conversation content and produces structured metadata.

    Rebuild and serve (Step 5) is a terminal operation that starts a long-running server.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-1-list-your-sessions","level":3,"title":"Step 1: List Your Sessions","text":"

    Start by seeing what sessions exist for the current project:

    ctx journal source\n

    Sample output:

    Sessions (newest first)\n=======================\n\n  Slug                           Project   Date         Duration  Turns  Tokens\n  gleaming-wobbling-sutherland   ctx       2026-02-07   1h 23m    47     82,341\n  twinkly-stirring-kettle        ctx       2026-02-06   0h 45m    22     38,102\n  bright-dancing-hopper          ctx       2026-02-05   2h 10m    63     124,500\n  quiet-flowing-dijkstra         ctx       2026-02-04   0h 18m    11     15,230\n  ...\n

    Slugs Look Cryptic?

    These auto-generated slugs (gleaming-wobbling-sutherland) are hard to recognize later.

    Use /ctx-journal-enrich to add human-readable titles, topic tags, and summaries to exported journal entries, making them easier to find.

    Filter by project or tool if you work across multiple codebases:

    ctx journal source --project ctx --limit 10\nctx journal source --tool claude-code\nctx journal source --all-projects\n
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-2-inspect-a-specific-session","level":3,"title":"Step 2: Inspect a Specific Session","text":"

    Before exporting everything, inspect a single session to see its metadata and conversation summary:

    ctx journal source --show --latest\n

    Or look up a specific session by its slug, partial ID, or UUID:

    ctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show twinkly\nctx journal source --show abc123\n

    Add --full to see the complete message content instead of the summary view:

    ctx journal source --show --latest --full\n

    This is useful for checking what happened before deciding whether to export and enrich it.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-3-import-sessions-to-the-journal","level":3,"title":"Step 3: Import Sessions to the Journal","text":"

    Import converts raw session data into editable Markdown files in .context/journal/:

    # Import all sessions from the current project\nctx journal import --all\n\n# Import a single session\nctx journal import gleaming-wobbling-sutherland\n\n# Include sessions from all projects\nctx journal import --all --all-projects\n

    --keep-frontmatter=false Discards Enrichments

    --keep-frontmatter=false discards enriched YAML frontmatter during regeneration.

    Back up your journal before using this flag.

    Each imported file contains session metadata (date, time, duration, model, project, git branch), a tool usage summary, and the full conversation transcript.

    Re-importing is safe. Running ctx journal import --all only imports new sessions: Existing files are never touched. Use --dry-run to preview what would be imported without writing anything.

    To re-import existing files (e.g., after a format improvement), use --regenerate: Conversation content is regenerated while preserving any YAML frontmatter you or the enrichment skill has added. You'll be prompted before any files are overwritten.

    --regenerate Replaces the Markdown Body

    --regenerate preserves YAML frontmatter but replaces the entire Markdown body with freshly generated content from the source JSONL.

    If you manually edited the conversation transcript (added notes, redacted sensitive content, restructured sections), those edits will be lost.

    BACK UP YOUR JOURNAL FIRST.

    To protect entries you've hand-edited, you can explicitly lock them:

    ctx journal lock <pattern>\n

    Locked entries are always skipped, regardless of flags.

    If you prefer to add locked: true directly in frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json:

    ctx journal sync\n

    See ctx journal lock --help and ctx journal sync --help for details.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-4-enrich-with-metadata","level":3,"title":"Step 4: Enrich with Metadata","text":"

    Raw imports have timestamps and transcripts but lack the semantic metadata that makes sessions searchable: topics, technology tags, outcome status, and summaries. The /ctx-journal-enrich* skills add this structured frontmatter.

    Locked entries are skipped by enrichment skills, just as they are by import. Lock entries you want to protect before running batch enrichment.

    Batch enrichment (recommended):

    /ctx-journal-enrich-all\n

    The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

    It shows you a grouped summary before applying changes so you can scan quickly rather than reviewing one by one.

    For large backlogs (20+ entries), the skill can spawn subagents to process entries in parallel.

    Single-entry enrichment:

    /ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-02-06\n

    Each enriched entry gets YAML frontmatter like this:

    ---\ntitle: \"Implement Redis caching middleware\"\ndate: 2026-02-06\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/api/middleware/cache.go\n---\n

    The skill also generates a summary and can extract decisions, learnings, and tasks mentioned during the session.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-5-generate-and-serve-the-site","level":3,"title":"Step 5: Generate and Serve the Site","text":"

    With imported and enriched journal files, generate the static site:

    # Generate site structure only\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate, build, and serve locally\nctx journal site --serve\n

    Then open http://localhost:8000 to browse.

    The site includes a date-sorted index, individual session pages with full conversations, search (press /), dark mode, and enriched titles in the navigation when frontmatter exists.

    You can also serve an already-generated site without regenerating using ctx serve (serve-only, no regeneration).

    The site generator requires zensical (pipx install zensical).

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#where-does-agent-add-value","level":2,"title":"Where Does Agent Add Value?","text":"

    Export, list, and show are mechanical: The agent runs the same CLI commands you would, so you can stay in your terminal for those.

    The agent earns its keep in enrich. It reads conversation content, understands what happened, and produces structured metadata. That is agent work, not CLI work.

    You can also ask your agent to browse sessions conversationally instead of remembering flags:

    What did we work on last week?\nShow me the session about Redis.\nImport everything to the journal.\n

    This is convenient but not required: ctx journal source gives you the same inventory.

    Where the agent genuinely helps is chaining the pipeline:

    You:   What happened last Tuesday?\nAgent: Last Tuesday you worked on two sessions:\n       - bright-dancing-hopper (2h 10m): refactored the middleware\n         pipeline and added Redis caching\n       - quiet-flowing-dijkstra (18m): quick fix for a nil pointer\n         in the config loader\n       Want me to export and enrich them?\nYou:   Yes, do it.\nAgent: Exports both, enriches, then proposes frontmatter.\n

    The value is staying in one context while the agent runs import -> enrich without you manually switching tools.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A typical pipeline from raw sessions to a browsable site:

    # Terminal: import and generate\nctx journal import --all\nctx journal site --serve\n
    # AI assistant: enrich\n/ctx-journal-enrich-all\n
    # Terminal: rebuild with enrichments\nctx journal site --serve\n

    If your project includes Makefile.ctx (deployed by ctx init), use make journal to combine import and rebuild stages. Then enrich inside Claude Code, then make journal again to pick up enrichments.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#session-retention-and-cleanup","level":2,"title":"Session Retention and Cleanup","text":"

    Claude Code does not keep JSONL transcripts forever. Understanding its cleanup behavior helps you avoid losing session history.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#default-behavior","level":3,"title":"Default Behavior","text":"

    Claude Code retains session transcripts for approximately 30 days. After that, JSONL files are automatically deleted during cleanup. Once deleted, ctx journal can no longer see those sessions - the data is gone.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-cleanupperioddays-setting","level":3,"title":"The cleanupPeriodDays Setting","text":"

    Claude Code exposes a cleanupPeriodDays setting in its configuration (~/.claude/settings.json) that controls retention:

    Value Behavior 30 (default) Transcripts older than 30 days are deleted 60, 90, etc. Extends the retention window 0 Disables writing new transcripts entirely - not \"keep forever\"

    Setting cleanupPeriodDays To 0

    Setting this to 0 does not mean \"never delete.\" It disables transcript creation altogether. No new JSONL files are written, which means ctx journal sees nothing new. This is rarely what you want.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#why-journal-import-matters","level":3,"title":"Why Journal Import Matters","text":"

    The journal import pipeline (Steps 1-4 above) is your archival mechanism. Imported Markdown files in .context/journal/ persist independently of Claude Code's cleanup cycle. Even after the source JSONL files are deleted, your journal entries remain.

    Recommendation: import regularly - weekly, or after any session worth revisiting. A quick ctx journal import --all takes seconds and ensures nothing falls through the 30-day window.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#quick-archival-checklist","level":3,"title":"Quick Archival Checklist","text":"
    1. Run ctx journal import --all at least weekly
    2. Enrich high-value sessions with /ctx-journal-enrich before the details fade from your own memory
    3. Lock enriched entries (ctx journal lock <pattern>) to protect them from accidental regeneration
    4. Rebuild the journal site periodically to keep it current
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tips","level":2,"title":"Tips","text":"
    • Start with /ctx-history inside your AI assistant. If you want to quickly check what happened in a recent session without leaving your editor, /ctx-history lets you browse interactively without importing.
    • Large sessions may be split automatically. Sessions with 200+ messages can be split into multiple parts (session-abc123.md, session-abc123-p2.md, session-abc123-p3.md) with navigation links between them. The site generator can handle this.
    • Suggestion sessions can be separated. Claude Code can generate short suggestion sessions for autocomplete. These may appear under a separate section in the site index, so they do not clutter your main session list.
    • Your agent is a good session browser. You do not need to remember slugs, dates, or flags. Ask \"what did we do yesterday?\" or \"find the session about Redis\" and it can map the question to recall commands.

    Journal Files Are Sensitive

    Journal files MUST be .gitignored.

    Session transcripts can contain sensitive data such as file contents, commands, error messages with stack traces, and potentially API keys.

    Add .context/journal/, .context/journal-site/, and .context/journal-obsidian/ to your .gitignore.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#next-up","level":2,"title":"Next Up","text":"

    Persisting Decisions, Learnings, and Conventions →: Record decisions, learnings, and conventions so they survive across sessions.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: where session saving fits in the daily workflow
    • Turning Activity into Content: generating blog posts from session history
    • Session Journal: full documentation of the journal system
    • CLI Reference: ctx journal: all journal subcommands and flags
    • CLI Reference: ctx serve: serve-only (no regeneration)
    • Context Files: the .context/ directory structure
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-ceremonies/","level":1,"title":"Session Ceremonies","text":"","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#the-problem","level":2,"title":"The Problem","text":"

    Sessions have two critical moments: the start and the end.

    • At the start, you need the agent to load context and confirm it knows what is going on.
    • At the end, you need to capture whatever the session produced before the conversation disappears.

    Most ctx skills work conversationally: \"jot down: check DNS after deploy\" is as good as /ctx-pad add \"check DNS after deploy\". But session boundaries are different. They are well-defined moments with specific requirements, and partial execution is costly.

    If the agent only half-loads context at the start, it works from stale assumptions. If it only half-persists at the end, learnings and decisions are lost.

    This Is One of the Few Times Being Explicit Matters

    Session ceremonies are the two bookend skills that mark these boundaries.

    They are the exception to the conversational rule:

    Invoke /ctx-remember and /ctx-wrap-up explicitly as slash commands.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tldr","level":2,"title":"TL;DR","text":"

    Start: /ctx-remember: load context, get a structured readback.

    End: /ctx-wrap-up: review session, propose candidates, persist approved items.

    Use the slash commands, not conversational triggers, for completeness.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#explicit-invocation-matters","level":2,"title":"Explicit Invocation Matters","text":"

    Most ctx skills encourage natural language. These two are different:

    Well-defined moments: Sessions have clear boundaries. A slash command marks the boundary unambiguously.

    Ambiguity risk: \"Do you remember?\" could mean many things. /ctx-remember means exactly one thing: load context and present a structured readback.

    Completeness: Conversational triggers risk partial execution. The agent might load some files but skip the session history, or persist one learning but forget to check for uncommitted changes. The slash command runs the full ceremony.

    Muscle memory: Typing /ctx-remember at session start and /ctx-wrap-up at session end becomes a habit, like opening and closing braces.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-remember Skill Load context and present structured readback /ctx-wrap-up Skill Gather session signal, propose and persist context /ctx-commit Skill Commit with context capture (offered by wrap-up) ctx agent CLI Load token-budgeted context packet ctx journal source CLI List recent sessions ctx add CLI Persist learnings, decisions, conventions, tasks","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-start-ctx-remember","level":2,"title":"Session Start: /ctx-remember","text":"

    Invoke at the beginning of every session:

    /ctx-remember\n

    The skill silently:

    1. Loads the context packet via ctx agent --budget 4000
    2. Reads TASKS.md, DECISIONS.md, LEARNINGS.md
    3. Checks recent sessions via ctx journal source --limit 3

    Then presents a structured readback with four sections:

    • Last session: topic, date, what was accomplished
    • Active work: pending and in-progress tasks
    • Recent context: 1-2 relevant decisions or learnings
    • Next step: suggestion or question about what to focus on

    The readback should feel like recall, not a file system tour. If the agent says \"Let me check if there are files...\" instead of a confident summary, the skill is not working correctly.

    What about 'do you remember?'

    The conversational trigger still works. But /ctx-remember guarantees the full ceremony runs:

    • context packet,
    • file reads,
    • session history,
    • and all four readback sections.

    The conversational version may cut corners.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-end-ctx-wrap-up","level":2,"title":"Session End: /ctx-wrap-up","text":"

    Invoke before ending a session where meaningful work happened:

    /ctx-wrap-up\n

    The skill runs four phases:

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-1-gather-signal","level":3,"title":"Phase 1: Gather Signal","text":"

    Silently checks git diff --stat, recent commits, and scans the conversation for themes: architectural choices, gotchas, patterns established, follow-up work identified.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-2-propose-candidates","level":3,"title":"Phase 2: Propose Candidates","text":"

    Presents a structured list grouped by type:

    ## Session Wrap-Up\n\n### Learnings (2 candidates)\n1. **PyMdownx details extension breaks pre/code rendering**\n   - Context: Journal site showed broken code blocks inside details tags\n   - Lesson: details extension wraps content in <details> HTML, which\n     interferes with <pre><code> rendering\n   - Application: Use fenced code blocks instead of indented code inside\n     admonitions when details extension is active\n\n2. **Hook subprocesses cannot propagate env vars**\n   - Context: Set env var in PreToolUse hook, invisible in main session\n   - Lesson: Hooks execute in child processes; env changes don't propagate\n   - Application: Use tombstone files for hook-to-session communication\n\n### Decisions (1 candidate)\n1. **File-based cooldown tokens over env vars**\n   - Context: Need session-scoped cooldown for ctx agent auto-loading\n   - Rationale: File tokens survive across processes, simpler than IPC\n   - Consequence: Tombstone files accumulate in /tmp; need TTL cleanup\n\nPersist all? Or select which to keep?\n

    Each candidate has complete structured fields, not just a title. Empty categories are omitted.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-3-persist","level":3,"title":"Phase 3: Persist","text":"

    After you approve (all, some, or modified), the skill runs the appropriate ctx add commands and reports results.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#nudge-suppression","level":3,"title":"Nudge Suppression","text":"

    After persisting, the skill marks the session as wrapped up via ctx system mark-wrapped-up. This suppresses context checkpoint nudges for 2 hours so the wrap-up ceremony itself does not trigger noisy reminders.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-4-commit-offer","level":3,"title":"Phase 4: Commit Offer","text":"

    If there are uncommitted changes, offers to run /ctx-commit. Does not auto-commit.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#when-to-skip","level":2,"title":"When to Skip","text":"

    Not every session needs ceremonies.

    Skip /ctx-remember when:

    • You are doing a quick one-off lookup (reading a file, checking a value)
    • Context was already loaded this session via /ctx-agent
    • You are continuing immediately after a previous session and context is still fresh

    Skip /ctx-wrap-up when:

    • Nothing meaningful happened (only read files, answered a question)
    • You already persisted everything manually during the session
    • The session was trivial (typo fix, quick config change)

    A good heuristic: if the session produced something a future session should know about, run /ctx-wrap-up. If not, just close.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#quick-reference","level":2,"title":"Quick Reference","text":"
    # Session start\n/ctx-remember\n\n# ... do work ...\n\n# Session end\n/ctx-wrap-up\n

    That is the complete ceremony. Two commands, bookending your session.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#relationship-to-other-skills","level":2,"title":"Relationship to Other Skills","text":"Skill When Purpose /ctx-remember Session start Load and confirm context /ctx-reflect Mid-session breakpoints Checkpoint at milestones /ctx-wrap-up Session end Full session review and persist /ctx-commit After completing work Commit with context capture

    /ctx-reflect is for mid-session checkpoints. /ctx-wrap-up is for end-of-session: it is more thorough, covers the full session arc, and includes the commit offer. If you already ran /ctx-reflect recently, /ctx-wrap-up avoids proposing the same candidates again.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tips","level":2,"title":"Tips","text":"
    • Make it a habit: The value of ceremonies compounds over sessions. Each /ctx-wrap-up makes the next /ctx-remember richer.
    • Trust the candidates: The agent scans the full conversation. It often catches learnings you forgot about.
    • Edit before approving: If a proposed candidate is close but not quite right, tell the agent what to change. Do not settle for a vague learning when a precise one is possible.
    • Do not force empty ceremonies: If /ctx-wrap-up finds nothing worth persisting, that is fine. A session that only read files and answered questions does not need artificial learnings.
    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#next-up","level":2,"title":"Next Up","text":"

    Browsing and Enriching Past Sessions →: Export session history to a browsable journal and enrich entries with metadata.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: the full session workflow that ceremonies bookend
    • Persisting Decisions, Learnings, and Conventions: deep dive on what gets persisted during wrap-up
    • Detecting and Fixing Drift: keeping context files accurate between ceremonies
    • Pausing Context Hooks: skip ceremonies entirely for quick tasks that don't need them
    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-changes/","level":1,"title":"Reviewing Session Changes","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-changed-while-you-were-away","level":2,"title":"What Changed While You Were Away?","text":"

    Between sessions, teammates commit code, context files get updated, and decisions pile up. ctx change gives you a single-command summary of everything that moved since your last session.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#quick-start","level":2,"title":"Quick Start","text":"
    # Auto-detects your last session and shows what changed\nctx change\n\n# Check what changed in the last 48 hours\nctx change --since 48h\n\n# Check since a specific date\nctx change --since 2026-03-10\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#how-reference-time-works","level":2,"title":"How Reference Time Works","text":"

    ctx change needs a reference point to compare against. It tries these sources in order:

    1. --since flag: explicit duration (24h, 72h) or date (2026-03-10, RFC3339 timestamp)
    2. Session markers: ctx-loaded-* files in .context/state/; picks the second-most-recent (your previous session start)
    3. Event log: last context-load-gate event from .context/state/events.jsonl
    4. Fallback: 24 hours ago

    The marker-based detection means ctx change usually just works without any flags: it knows when you last loaded context and shows everything after that.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-it-reports","level":2,"title":"What It Reports","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#context-file-changes","level":3,"title":"Context File Changes","text":"

    Any .md file in .context/ modified after the reference time:

    ### Context File Changes\n- `TASKS.md` - modified 2026-03-11 14:30\n- `DECISIONS.md` - modified 2026-03-11 09:15\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#code-changes","level":3,"title":"Code Changes","text":"

    Git activity since the reference time:

    ### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#integrating-into-session-start","level":2,"title":"Integrating into Session Start","text":"

    Pair ctx change with the /ctx-remember ceremony for a complete session-start picture:

    # 1. Load context (this also creates the session marker)\nctx agent --budget 4000\n\n# 2. See what changed since your last session\nctx change\n

    Or script it:

    # .context/hooks/session-start.sh\nctx agent --budget 4000\necho \"---\"\nctx change\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#team-workflows","level":2,"title":"Team Workflows","text":"

    When multiple people share a .context/ directory, ctx change shows who changed what:

    # After pulling from remote\ngit pull\nctx change --since 72h\n

    This surfaces context file changes from teammates that you might otherwise miss in the commit log.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#tips","level":2,"title":"Tips","text":"
    • No changes? If nothing shows up, the reference time might be wrong. Use --since 48h to widen the window.
    • Works without git. Context file changes are detected by filesystem mtime, not git. Code changes require git.
    • Hook integration. The context-load-gate hook writes the session marker that ctx change uses for auto-detection. If you're not using the ctx plugin, markers won't exist and it falls back to the event log or 24h window.
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-lifecycle/","level":1,"title":"The Complete Session","text":"","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-problem","level":2,"title":"The Problem","text":"

    \"What does a full ctx session look like from start to finish?\"

    You have ctx installed and your .context/ directory initialized, but the individual commands and skills feel disconnected.

    How do they fit together into a coherent workflow?

    This recipe walks through a complete session, from opening your editor to persisting context before you close it, so you can see how each piece connects.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tldr","level":2,"title":"TL;DR","text":"
    1. Load: /ctx-remember: load context, get structured readback.
    2. Orient: /ctx-status: check file health and token usage.
    3. Pick: /ctx-next: choose what to work on.
    4. Work: implement, test, iterate.
    5. Commit: /ctx-commit: commit and capture decisions/learnings.
    6. Reflect: /ctx-reflect: identify what to persist (at milestones)
    7. Wrap up: /ctx-wrap-up: end-of-session ceremony.

    Read on for the full walkthrough with examples.

    What Is a Readback?

    A readback is a structured summary where the agent plays back what it knows:

    • last session,
    • active tasks,
    • recent decisions.

    This way, you can confirm it loaded the right context.

    The term \"readback\" comes from aviation, where pilots repeat instructions back to air traffic control to confirm they heard correctly.

    Same idea in ctx: The agent tells you what it \"thinks\" is going on, and you correct anything that's off before the work begins.

    • Last session: topic, date, what was accomplished
    • Active work: pending and in-progress tasks
    • Recent context: 1-2 decisions or learnings that matter now
    • Next step: suggestion or question about what to focus on
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx status CLI command Quick health check on context files ctx agent CLI command Load token-budgeted context packet ctx journal source CLI command List previous sessions ctx journal source --show CLI command Inspect a specific session in detail /ctx-remember Skill Recall project context with structured readback /ctx-agent Skill Load full context packet inside the assistant /ctx-status Skill Show context summary with commentary /ctx-next Skill Suggest what to work on with rationale /ctx-commit Skill Commit code and prompt for context capture /ctx-reflect Skill Structured reflection checkpoint /ctx-history Skill Browse session history inside your AI assistant","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-workflow","level":2,"title":"The Workflow","text":"

    The session lifecycle has seven steps. You will not always use every step (for example, a quick bugfix might skip reflection, and a research session might skip committing), but the full arc looks like this:

    Load context > Orient > Pick a Task > Work > Commit > Reflect

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-1-load-context","level":3,"title":"Step 1: Load Context","text":"

    Start every session by loading what you know. The fastest way is a single prompt:

    Do you remember what we were working on?\n

    This triggers the /ctx-remember skill. Behind the scenes, the assistant runs ctx agent --budget 4000, reads the files listed in the context packet (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md), checks ctx journal source --limit 3 for recent sessions, and then presents a structured readback.

    The readback should feel like a recall, not a file system tour. If you see \"Let me check if there are files...\" instead of a confident summary, the context system is not loaded properly.

    As an alternative, if you want raw data instead of a readback, run ctx status in your terminal or invoke /ctx-status for a summarized health check showing file counts, token usage, and recent activity.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-2-orient","level":3,"title":"Step 2: Orient","text":"

    After loading context, verify you understand the current state.

    /ctx-status\n

    The status output shows which context files are populated, how many tokens they consume, and which files were recently modified. Look for:

    • Empty core files: TASKS.md or CONVENTIONS.md with no content means the context is sparse
    • High token count (over 30k): the context is bloated and might need ctx compact
    • No recent activity: files may be stale and need updating

    If the status looks healthy and the readback from Step 1 gave you enough context, skip ahead.

    If something seems off (stale tasks, missing decisions...), spend a minute reading the relevant file before proceeding.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

    With context loaded, choose a task. You can pick one yourself, or ask the assistant to recommend:

    /ctx-next\n

    The skill reads TASKS.md, checks recent sessions to avoid re-suggesting completed work, and presents 1-3 ranked recommendations with rationale.

    It prioritizes in-progress tasks over new starts (finishing is better than starting), respects explicit priority tags, and favors momentum: continuing a thread from a recent session is cheaper than context-switching.

    If you already know what you want to work on, state it directly:

    Let's work on the session enrichment feature.\n
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-4-do-the-work","level":3,"title":"Step 4: Do the Work","text":"

    This is the main body of the session: write code, fix bugs, refactor, research: whatever the task requires.

    During this phase, a few ctx-specific patterns help:

    Check decisions before choosing: when you face a design choice, check if a prior decision covers it.

    Is this consistent with our decisions?\n

    Constrain scope: keep the assistant focused on the task at hand.

    Only change files in internal/cli/session/. Nothing else.\n

    Use /ctx-implement for multistep plans: if the task has multiple steps, this skill executes them one at a time with build/test verification between each step.

    Context monitoring runs automatically: the check-context-size hook monitors context capacity at adaptive intervals. Early in a session it stays silent. After 16+ prompts it starts monitoring, and past 30 prompts it checks frequently. If context capacity is running high, it will suggest saving unsaved work. No manual invocation is needed.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-5-commit-with-context","level":3,"title":"Step 5: Commit with Context","text":"

    When the work is ready, use the context-aware commit instead of raw git commit:

    /ctx-commit\n

    The Agent May Recommend Committing

    You do not always need to invoke /ctx-commit explicitly.

    After a commit, the agent may proactively offer to capture context:

    \"We just made a trade-off there. Want me to record it as a decision?\"

    This is normal: The Agent Playbook encourages persisting at milestones, and a commit is a natural milestone.

    As an alternative, you can ask the assistant \"can we commit this?\" and it will pick up the /ctx-commit skill for you.

    The skill runs a pre-commit build check (for Go projects, go build), reviews the staged changes, drafts a commit message focused on \"why\" rather than \"what\", and then commits.

    After the commit succeeds, it prompts you:

    **Any context to capture?**\n\n- **Decision**: Did you make a design choice or trade-off?\n- **Learning**: Did you hit a gotcha or discover something?\n- **Neither**: No context to capture; we are done.\n

    If you made a decision, the skill records it with ctx add decision. If you learned something, it records it with ctx add learning including context, lesson, and application fields. This is the bridge between committing code and remembering why the code looks the way it does.

    If source code changed in areas that affect documentation, the skill also offers to check for doc drift.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-6-reflect","level":3,"title":"Step 6: Reflect","text":"

    At natural breakpoints (after finishing a feature, resolving a complex bug, or before switching tasks) pause to reflect:

    /ctx-reflect\n

    Agents Reflect at Milestones

    Agents often reflect without explicit invocation.

    After completing a significant piece of work, the agent may naturally surface items worth persisting:

    \"We discovered that $PPID resolves differently inside hooks. Should I save that as a learning?\"

    This is the agent following the Work-Reflect-Persist cycle from the Agent Playbook.

    You do not need to say /ctx-reflect for this to happen; the agent treats milestones as reflection triggers on its own.

    The skill works through a checklist: learnings discovered, decisions made, tasks completed or created, and whether there are items worth persisting. It then presents a summary with specific items to persist, each with the exact command to run:

    I would suggest persisting:\n\n- **Learning**: `$PPID` in PreToolUse hooks resolves to the Claude Code PID\n  `ctx add learning --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Task**: mark \"Add cooldown to ctx agent\" as done\n- **Decision**: tombstone-based cooldown with 10m default\n  `ctx add decision \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

    The skill asks before persisting anything. You choose what to keep.

    Not every commit needs reflection. A typo fix does not. But when you have been debugging for an hour and finally understand the root cause, that is worth a reflection checkpoint.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-7-persist-before-ending","level":3,"title":"Step 7: Persist Before Ending","text":"

    Before ending the session, run the wrap-up ceremony to capture outstanding learnings, decisions, conventions, and tasks:

    /ctx-wrap-up\n

    Ceremony Skills

    /ctx-remember at session start and /ctx-wrap-up at session end are ceremony skills: Invoke them as explicit slash commands for completeness, not conversationally.

    See Session Ceremonies for details on why these two skills are the exception to the conversational rule.

    The skill gathers signal from git diff, recent commits, and the conversation, then proposes structured candidates for your approval. After you select which to keep, it persists them via ctx add and offers /ctx-commit if uncommitted changes remain.

    Session transcripts are automatically captured by Claude Code and can be browsed later with ctx journal source and ctx journal source --show.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#conversational-session-flow","level":2,"title":"Conversational Session Flow","text":"

    The steps above describe the mechanics. In practice, a session with a ctx-aware agent feels more like a conversation than a command sequence.

    The agent knows the lifecycle and moves through it naturally, often without explicit invocation. Here is a realistic example:

    You: Do you remember what we were working on?

    Agent (loads context silently, presents structured readback):

    Last session (Feb 7, caching-layer): we implemented the in-memory cache with\nTTL expiration and wrote tests for the eviction policy. \n\nYou have 3 pending tasks; the biggest one is adding Redis as a backend.\n\nRecent decision:<br>\nwe chose LRU over LFU because the access pattern is mostly recency-based.\n\nWant to continue with the Redis backend, or tackle something else?\n

    You: Let's work on the Redis caching backend.

    Agent (picks up the task, begins implementation):

    Got it. I see the task in TASKS.md: \"Add Redis backend for cache layer.\"\n\nI will start with the connection pool setup. Only touching\n`internal/cache/redis/` for now.\n

    (The agent works... writes code, runs tests, iterates.)

    Agent (after implementing the connection pool and choosing a retry strategy):

    The connection pool is working. I went with exponential backoff with jitter\nfor retries instead of fixed intervals; it handles thundering herd better\nunder reconnection storms. That is a design choice worth recording. \n\nWant me to save it as a decision?\n

    You: Yes, go ahead.

    Agent:

    Done. Added to DECISIONS.md: \"Exponential backoff with jitter for Redis\nreconnection, to avoid thundering herd on pool recovery.\"\n

    (More work happens... the agent implements the cache read/write paths.)

    You: What else did we learn during this session?

    Agent:

    A few things worth capturing:\n\n* **Learning**: Redis `SET` with `NX` and `PX` flags handles atomic\n  set-if-not-exists with TTL in a single call. No need for a separate\n  `EXPIRE` command. We initially did it in two calls and hit a race condition.\n* **Learning**: The `go-redis` library's `PoolSize` default is 10 per CPU.\n  For our workload, explicit sizing at 20 was more predictable.\n* **Task update**: The connection pool subtask is done; the read/write path\n  is implemented but needs integration tests.\n\nWant me to persist these?\n

    You: Yes. And let's wrap up.

    Agent (reflects and persists... all in one flow):

    Persisted:\n\n* 2 learnings added to LEARNINGS.md\n* Marked \"connection pool setup\" done in TASKS.md\n* Added \"write integration tests for Redis cache\" as a new task\n\nContext is up to date. You are good to close.\n

    Notice What Happened

    In the above workflow, the user never typed /ctx-reflect or ctx add learning.

    The agent moved through Load, Orient, Pick, Work, Commit, and Reflect driven by natural conversation.

    \"Let's wrap up\" was enough to trigger the full reflect-and-persist flow.

    The agent surfaced persist-worthy items at milestones: after a design choice, after discovering a gotcha: without waiting to be asked.

    This is the intended experience.

    The commands and skills still exist for when you want precise control, but the agent is a proactive partner in the lifecycle, not a passive executor of slash commands.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Quick-reference checklist for a complete session:

    • Load: /ctx-remember: load context and confirm readback
    • Orient: /ctx-status: check file health and token usage
    • Pick: /ctx-next: choose what to work on
    • Work: implement, test, iterate (scope with \"only change X\")
    • Commit: /ctx-commit: commit and capture decisions/learnings
    • Reflect: /ctx-reflect: identify what to persist (at milestones)
    • Wrap up: /ctx-wrap-up: end-of-session ceremony

    Conversational equivalents: you can drive the same lifecycle with plain language:

    Step Slash command Natural language Load /ctx-remember \"Do you remember?\" / \"What were we working on?\" Orient /ctx-status \"How's our context looking?\" Pick /ctx-next \"What should we work on?\" / \"Let's do the caching task\" Work -- \"Only change files in internal/cache/\" Commit /ctx-commit \"Commit this\" / \"Ship it\" Reflect /ctx-reflect \"What did we learn?\" / (agent offers at milestones) Wrap up /ctx-wrap-up (use the slash command for completeness)

    The agent understands both columns.

    In practice, most sessions use a mix:

    • Explicit Commands when you want precision;
    • Natural Language when you want flow and agentic autonomy.

    The agent will also initiate steps on its own (particularly \"Reflect\") when it recognizes a milestone.

    Short sessions (quick bugfix) might only use: Load, Work, Commit.

    Long sessions should Reflect after each major milestone and persist learnings and decisions before ending.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tips","level":2,"title":"Tips","text":"

    Persist early if context is running low. A hook monitors context capacity and notifies you when it gets high, but do not wait for the notification. If you have been working for a while and have unpersisted learnings, persist proactively.

    Browse previous sessions by topic. If you need context from a prior session, ctx journal source --show auth will match by keyword. You do not need to remember the exact date or slug.

    Reflection is optional but valuable. You can skip /ctx-reflect for small changes, but always persist learnings and decisions before ending a session where you did meaningful work. These are what the next session loads.

    Let the hook handle context loading. The PreToolUse hook runs ctx agent automatically with a cooldown, so context loads on first tool use without you asking. The /ctx-remember prompt at session start is for your benefit (to get a readback), not because the assistant needs it.

    The agent is a proactive partner, not a passive tool. A ctx-aware agent follows the Agent Playbook: it watches for milestones (completed tasks, design decisions, discovered gotchas) and offers to persist them without being asked. If you finish a tricky debugging session, it may say \"That root cause is worth saving as a learning. Want me to record it?\" before you think to ask. This is by design.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#next-up","level":2,"title":"Next Up","text":"

    Session Ceremonies →: The two bookend rituals for every session: /ctx-remember at the start, /ctx-wrap-up at the end.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#see-also","level":2,"title":"See Also","text":"
    • Session Ceremonies: why /ctx-remember and /ctx-wrap-up are explicit slash commands, not conversational
    • CLI Reference: full documentation for all ctx commands
    • Prompting Guide: effective prompts for ctx-enabled projects
    • Tracking Work Across Sessions: deep dive on task management
    • Persisting Decisions, Learnings, and Conventions: deep dive on knowledge capture
    • Detecting and Fixing Drift: keeping context files accurate
    • Pausing Context Hooks: shortcut the full lifecycle for quick tasks that don't need ceremony overhead
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-pause/","level":1,"title":"Pausing Context Hooks","text":"","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#the-problem","level":2,"title":"The Problem","text":"

    Not every session needs the full ceremony. Quick investigations, one-off questions, small fixes unrelated to active project work: These tasks don't benefit from persistence nudges, ceremony reminders, or knowledge checks. Every hook still fires, consuming tokens and attention on work that won't produce learnings or decisions worth capturing.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tldr","level":2,"title":"TL;DR","text":"Command What it does ctx hook pause or /ctx-pause Silence all nudge hooks for this session ctx hook resume or /ctx-resume Restore normal hook behavior

    Pause is session-scoped: It only affects the current session. Other sessions (same project, different terminal) are unaffected.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-gets-paused","level":2,"title":"What Gets Paused","text":"

    All nudge and reminder hooks go silent:

    • Context size checkpoints
    • Ceremony adoption nudges
    • Persistence reminders
    • Journal maintenance reminders
    • Knowledge growth nudges
    • Map staleness nudges
    • Version update nudges
    • Resource pressure warnings
    • QA reminders
    • Post-commit nudges
    • Specs nudges
    • Backup age warnings
    • Context load gate
    • Pending reminders relay
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-still-fires","level":2,"title":"What Still Fires","text":"

    Security hooks always run, even when paused:

    • block-non-path-ctx: prevents ./ctx invocations
    • block-dangerous-commands: blocks sudo, force push, etc.
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#workflow","level":2,"title":"Workflow","text":"
    # 1. Session starts: Context loads normally.\n\n# 2. You realize this is a quick task\nctx hook pause\n\n# 3. Work without interruption: hooks are silent\n\n# 4. Session evolves into real work? Resume first\nctx hook resume\n\n# 5. Now wrap up normally\n# /ctx-wrap-up\n
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#graduated-reminder","level":2,"title":"Graduated Reminder","text":"

    Paused hooks aren't completely invisible. A minimal indicator appears so you always know the state:

    Paused turns What you see 1-5 ctx:paused 6+ ctx:paused (N turns): resume with /ctx-resume

    This prevents the \"forgot I paused\" problem during long sessions.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tips","level":2,"title":"Tips","text":"
    • Resume before wrapping up. If your quick task turns into real work, resume hooks before running /ctx-wrap-up. The wrap-up ceremony needs active hooks to capture learnings properly.

    • Initial context load is unaffected. The ~8k token startup injection (CLAUDE.md, playbook, constitution) happens before any command runs. Pause only affects hooks that fire during the session.

    • Use for quick investigations. Debugging a stack trace? Checking a git log? Answering a colleague's question? Pause, do the work, close the session. No ceremony needed.

    • Don't use for real work. If you're implementing features, fixing bugs, or making decisions: keep hooks active. The nudges exist to prevent context loss.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#see-also","level":2,"title":"See Also","text":"

    See also: Session Ceremonies: the bookend rituals that pause lets you skip when they aren't needed.

    See also: Customizing Hook Messages: if you want to change what hooks say rather than silencing them entirely.

    See also: The Complete Session: the full session workflow that pause shortcuts for quick tasks.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-reminders/","level":1,"title":"Session Reminders","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-problem","level":2,"title":"The Problem","text":"

    You're deep in a session and realize: \"I need to refactor the swagger definitions next time.\" You could add a task, but this isn't a work item: it's a note to future-you. You could jot it on the scratchpad, but scratchpad entries don't announce themselves.

    How do you leave a message that your next session opens with?

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tldr","level":2,"title":"TL;DR","text":"
    ctx remind \"refactor the swagger definitions\"\nctx remind list\nctx remind dismiss 1       # or batch: ctx remind dismiss 1 3-5\n

    Reminders surface automatically at session start: VERBATIM, every session, until you dismiss them.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx remind CLI command Add a reminder (default action) ctx remind list CLI command Show all pending reminders ctx remind dismiss CLI command Remove a reminder by ID (or --all) /ctx-remind Skill Natural language interface to reminders","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-1-leave-a-reminder","level":3,"title":"Step 1: Leave a Reminder","text":"

    Tell your agent what to remember, or run it directly:

    You: \"remind me to refactor the swagger definitions\"\n\nAgent: [runs ctx remind \"refactor the swagger definitions\"]\n       \"Reminder set:\n         + [1] refactor the swagger definitions\"\n

    Or from the terminal:

    ctx remind \"refactor the swagger definitions\"\n
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-2-set-a-date-gate-optional","level":3,"title":"Step 2: Set a Date Gate (Optional)","text":"

    If the reminder shouldn't fire until a specific date:

    You: \"remind me to check the deploy logs after Tuesday\"\n\nAgent: [runs ctx remind \"check the deploy logs\" --after 2026-02-25]\n       \"Reminder set:\n         + [2] check the deploy logs  (after 2026-02-25)\"\n

    The reminder stays silent until that date, then fires every session.

    The agent converts natural language dates (\"tomorrow\", \"next week\", \"after the release on Friday\") to YYYY-MM-DD. If it's ambiguous, it asks.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-3-start-a-new-session","level":3,"title":"Step 3: Start a New Session","text":"

    Next session, the reminder appears automatically before anything else:

    ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n

    No action needed: The check-reminders hook fires on UserPromptSubmit and the agent relays the box verbatim.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-4-dismiss-when-done","level":3,"title":"Step 4: Dismiss When Done","text":"

    After you've acted on a reminder (or decided to skip it):

    You: \"dismiss reminder 1\"\n\nAgent: [runs ctx remind dismiss 1]\n       \"Dismissed:\n         - [1] refactor the swagger definitions\"\n\n# Batch dismiss also works:\n# \"dismiss reminders 3, 5 through 7\"\n# → ctx remind dismiss 3 5-7\n

    Or clear everything:

    ctx remind dismiss --all\n
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-5-check-whats-pending","level":3,"title":"Step 5: Check What's Pending","text":"
    ctx remind list\n
      [1] refactor the swagger definitions\n  [3] review auth token expiry logic\n  [4] check deploy logs  (after 2026-02-25, not yet due)\n

    Date-gated reminders that haven't reached their date show (not yet due).

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#using-ctx-remind-in-a-session","level":2,"title":"Using /ctx-remind in a Session","text":"

    Invoke the /ctx-remind skill, then describe what you want:

    You: /ctx-remind remind me to update the API docs\nYou: /ctx-remind what reminders do I have?\nYou: /ctx-remind dismiss reminder 3\n
    You say (after /ctx-remind) What the agent does \"remind me to update the API docs\" ctx remind \"update the API docs\" \"remind me next week to check staging\" ctx remind \"check staging\" --after 2026-03-02 \"what reminders do I have?\" ctx remind list \"dismiss reminder 3\" ctx remind dismiss 3 \"dismiss reminders 3, 5 through 7\" ctx remind dismiss 3 5-7 \"clear all reminders\" ctx remind dismiss --all","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#reminders-vs-scratchpad-vs-tasks","level":2,"title":"Reminders vs Scratchpad vs Tasks","text":"You want to... Use Leave a note that announces itself next session ctx remind Jot down a quick value or sensitive token ctx pad Track work with status and completion TASKS.md Record a decision or lesson for all sessions Context files

    Decision guide:

    • If it should announce itself at session start → ctx remind
    • If it's a quiet note you'll check manually → ctx pad
    • If it's a work item you'll mark done → TASKS.md

    Reminders Are Sticky Notes, Not Tasks

    A reminder has no status, no priority, no lifecycle. It's a message to \"future you\" that fires until dismissed.

    If you need tracking, use a task in TASKS.md.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tips","level":2,"title":"Tips","text":"
    • Reminders fire every session: Unlike nudges (which throttle to once per day), reminders repeat until you dismiss them. This is intentional: You asked to be reminded.
    • Date gating is session-scoped, not clock-scoped: --after 2026-02-25 means \"don't show until sessions on or after Feb 25.\" It does not mean \"alarm at midnight on Feb 25.\"
    • The agent handles date parsing: Say \"next week\" or \"after Friday\": The agent converts it to YYYY-MM-DD. The CLI only accepts the explicit date format.
    • Reminders are committed to git: They travel with the repo. If you switch machines, your reminders follow.
    • IDs never reuse: After dismissing reminder 3, the next reminder gets ID 4 (or higher). No confusion from recycled numbers.
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#next-up","level":2,"title":"Next Up","text":"

    Using the Scratchpad →: For quiet notes and sensitive values that don't need session-start announcements.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#see-also","level":2,"title":"See Also","text":"
    • CLI Reference: ctx remind: full command syntax and flags
    • The Complete Session: how reminders fit into the session lifecycle
    • Managing Tasks: for work items that need status tracking
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/state-maintenance/","level":1,"title":"State Directory Maintenance","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-problem","level":2,"title":"The Problem","text":"

    Every session creates tombstone files in .context/state/ - small markers that suppress repeat hook nudges (\"already checked context size\", \"already sent persistence reminder\"). Over days and weeks, these accumulate into hundreds of files from long-dead sessions.

    The files are harmless individually, but the clutter makes it harder to reason about state, and stale global tombstones can suppress nudges across sessions entirely.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tldr","level":2,"title":"TL;DR","text":"
    ctx prune --dry-run     # preview what would be removed\nctx prune               # prune files older than 7 days\nctx prune --days 1      # more aggressive: keep only today\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx prune Command Remove old per-session state files ctx status Command Quick health overview including state dir","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#understanding-state-files","level":2,"title":"Understanding State Files","text":"

    State files fall into two categories:

    Session-scoped (contain a UUID in the filename): Created per-session to suppress repeat nudges. Safe to prune once the session ends. Examples:

    context-check-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\nheartbeat-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\npersistence-nudge-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\n

    Global (no UUID): Persist across sessions. ctx prune preserves these automatically. Some are legitimate state (events.jsonl, memory-import.json); others may be stale tombstones that need manual review.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-workflow","level":2,"title":"The Workflow","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-1-preview","level":3,"title":"Step 1: Preview","text":"

    Always dry-run first to see what would be removed:

    ctx prune --dry-run\n

    The output shows each file, its age, and a summary:

      would prune: context-check-abc123... (age: 3d)\n  would prune: heartbeat-abc123... (age: 3d)\n\nDry run - would prune 150 files (skip 70 recent, preserve 14 global)\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-2-prune","level":3,"title":"Step 2: Prune","text":"

    Choose an age threshold. The default is 7 days:

    ctx prune               # older than 7 days\nctx prune --days 3      # older than 3 days\nctx prune --days 1      # older than 1 day (aggressive)\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-3-review-global-files","level":3,"title":"Step 3: Review Global Files","text":"

    After pruning, check what prune preserved:

    ls .context/state/ | grep -v '[0-9a-f]\\{8\\}-[0-9a-f]\\{4\\}'\n

    Legitimate global files (keep):

    • events.jsonl - event log
    • memory-import.json - import tracking state

    Stale global tombstones (safe to delete):

    • Files like backup-reminded, ceremony-reminded, version-checked with no session UUID are one-shot markers. If they are from a previous session, they are stale and can be removed manually.
    rm .context/state/backup-reminded .context/state/ceremony-reminded\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-4-verify","level":3,"title":"Step 4: Verify","text":"
    ls .context/state/ | wc -l    # should be manageable\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#when-to-prune","level":2,"title":"When to Prune","text":"
    • Weekly: ctx prune with default 7-day threshold
    • After heavy parallel work: Multiple concurrent sessions create many tombstones. Prune with --days 1 afterward.
    • When state directory exceeds ~100 files: A sign that pruning hasn't run recently
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tips","level":2,"title":"Tips","text":"

    Pruning active sessions is safe but noisy: If you prune a file belonging to a still-running session, the corresponding hook will re-fire its nudge on the next prompt. Minor UX annoyance, not data loss.

    No context files are stored in state: The state directory contains only tombstones, counters, and diagnostic data. Nothing in .context/state/ affects your decisions, learnings, tasks, or conventions.

    Test artifacts sneak in: Files like context-check-statstest or heartbeat-unknown are artifacts from development or testing. They lack UUIDs so prune preserves them. Delete manually.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#see-also","level":2,"title":"See Also","text":"
    • Detecting and Fixing Drift: broader context maintenance including drift detection and archival
    • Troubleshooting: diagnostic workflow using ctx doctor and event logs
    • CLI Reference: system: full flag documentation for ctx prune and related commands
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/steering/","level":1,"title":"Writing Steering Files","text":"","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#writing-steering-files","level":1,"title":"Writing Steering Files","text":"

    Steering files tell your AI assistant how to behave, not what was decided or how the codebase is written. This recipe walks through writing a steering file from scratch, validating which prompts will trigger it, and syncing it out to your configured AI tools.

    Before You Start

    If you're unsure whether a rule belongs in steering/, DECISIONS.md, or CONVENTIONS.md, read the \"Steering vs decisions vs conventions\" admonition on the ctx steering reference page. The short version: if the rule is \"the AI should always do X when asked about Y,\" that's steering. Otherwise it's probably a decision or convention.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#start-here-customize-the-foundation-files","level":2,"title":"Start Here — Customize the Foundation Files","text":"

    ctx init scaffolds four foundation steering files for you the first time you initialize a project:

    File Purpose .context/steering/product.md Product context, goals, target users .context/steering/tech.md Tech stack, constraints, key dependencies .context/steering/structure.md Directory layout, naming conventions .context/steering/workflow.md Branch strategy, commit rules, pre-commit

    Each file opens with an inline HTML comment that explains the three inclusion modes, what priority means, and the tools scope. The comment is invisible in rendered markdown but visible when you edit the file. Delete it once the file is yours.

    All four default to inclusion: always and priority: 10 — they fire on every AI tool call until you customize them. If you're reading this recipe and haven't touched them yet, open each one now and replace the placeholder bullet list with actual rules for your project. That's the highest-leverage five minutes you can spend in a new ctx setup.

    What to fill in, by file:

    product.md — The elevator pitch plus hard scope:

    • One-sentence product description.
    • Primary users and their top job-to-be-done.
    • Two or three \"this is explicitly out of scope\" items so the AI doesn't wander.

    tech.md — Technology and constraints:

    • Languages and versions (Go 1.22, Node 20, etc.).
    • Frameworks and key libraries.
    • Runtime and deployment target.
    • Hard constraints: \"no CGO\", \"no network at test time\", \"no external DB for unit tests\". These are the things that burn agents when they don't know them.

    structure.md — Layout and naming:

    • Top-level directories and their purpose.
    • Where new files should go (and where they should NOT).
    • Naming conventions for packages, files, types.

    workflow.md — Process rules:

    • Branch strategy (main-only, trunk-based, feature branches).
    • Commit message format, signed-off-by requirement.
    • Pre-commit and pre-push checks.
    • Review expectations.

    After editing, the next AI tool call in Claude Code will pick up the new rules automatically via the plugin's PreToolUse hook — no sync step, no restart. Other tools (Cursor, Cline, Kiro) need ctx steering sync to export into their native format.

    Prefer a Bare .context/steering/ Directory?

    Re-run ctx init --no-steering-init and delete the scaffolded files. ctx init leaves existing files alone, so the flag is only needed if you want to opt out of the initial scaffold.

    The rest of this recipe walks through creating an additional, scenario-specific steering file beyond the four foundation defaults.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#scenario","level":2,"title":"Scenario","text":"

    You're working on a project with a strict input-validation policy: every new API handler must validate request bodies before touching the database. You want the AI to flag this concern automatically whenever it's asked to write an HTTP handler, without you having to remind it every session.

    Claude Code Users: Pick always, Not auto

    This walkthrough uses inclusion: auto because the scenario is a scoped rule that matches a specific kind of prompt. That works natively on Cursor, Cline, and Kiro (they resolve the description keyword match themselves).

    On Claude Code, auto does not fire through the plugin's PreToolUse hook — the hook passes an empty prompt to ctx agent, so only always files match. Claude can still reach an auto file by calling the ctx_steering_get MCP tool, but that requires Claude to decide to call it; there's no automatic injection.

    If Claude Code is your tool, set inclusion: always in Step 2 instead of auto. The rule will fire on every tool call regardless of topic. You may want to narrow the rule body so the extra tokens per turn aren't wasted on unrelated work.

    See the ctx steering reference \"Prefer inclusion: always for Claude Code\" section for the full trade-off.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-1-scaffold-the-file","level":2,"title":"Step 1 — Scaffold the File","text":"
    ctx steering add api-validation\n

    That creates .context/steering/api-validation.md with default frontmatter:

    ---\nname: api-validation\ndescription:\ninclusion: manual\ntools: []\npriority: 50\n---\n

    The defaults are deliberately conservative: inclusion: manual means the file won't be applied until you opt in, which keeps the rules out of the prompt until you've reviewed them.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-2-fill-in-the-rule","level":2,"title":"Step 2 — Fill in the Rule","text":"

    Open the file and write the rule body plus a focused description. The description is what inclusion: auto matches against later.

    ---\nname: api-validation\ndescription: HTTP handler input validation and request parsing\ninclusion: auto\ntools: []\npriority: 20\n---\n\n# API request validation\n\nEvery new HTTP handler MUST:\n\n1. Parse request bodies into typed structs, never `map[string]any`.\n2. Validate required fields before any database call.\n3. Return 400 with a machine-readable error for validation failures.\n4. Use `context.Context` from the request for all downstream calls.\n\nPrefer existing validation helpers in `internal/validate/`\nrather than inline checks.\n

    Notes on the choices:

    • inclusion: auto — this rule should fire automatically on HTTP-handler-shaped prompts, not always.
    • priority: 20 — lower than the default, so this rule appears near the top of the prompt alongside other high-priority rules.
    • Description is keyword-rich: \"HTTP handler input validation and request parsing\" — the auto matcher scores prompts against these words.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-3-preview-which-prompts-match","level":2,"title":"Step 3 — Preview Which Prompts Match","text":"

    Before committing the file, validate your description catches the prompts you care about:

    ctx steering preview \"add an endpoint for updating user email\"\n

    Expected output:

    Steering files matching prompt \"add an endpoint for updating user email\":\n  api-validation       inclusion=auto     priority=20  tools=all\n

    Good — the prompt matches. Try a negative case:

    ctx steering preview \"fix a bug in the JSON renderer\"\n

    Expected: empty match (or whatever else is currently auto). If api-validation incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-4-list-to-confirm-metadata","level":2,"title":"Step 4 — List to Confirm Metadata","text":"
    ctx steering list\n

    Should show api-validation alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-5-get-the-rules-in-front-of-the-ai","level":2,"title":"Step 5 — Get the Rules in Front of the AI","text":"

    Steering files are authored once in .context/steering/, but how they reach the AI depends on which tool you use. There are two delivery mechanisms:

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-a-native-rules-tools-cursor-cline-kiro","level":3,"title":"Path A — Native-Rules Tools (Cursor, Cline, Kiro)","text":"

    These tools read a specific directory for rules. ctx steering sync exports your files into that directory with tool-specific frontmatter:

    ctx steering sync\n

    Depending on the active tool in .ctxrc or --tool:

    Tool Target Cursor .cursor/rules/ Cline .clinerules/ Kiro .kiro/steering/

    The sync is idempotent — unchanged files are skipped. Run it whenever you edit a steering file.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-b-claude-code-and-codex-hook-mcp","level":3,"title":"Path B — Claude Code and Codex (Hook + MCP)","text":"

    Claude Code and Codex have no native rules primitive, so ctx steering sync is a no-op for them — it deliberately skips both. Instead, steering reaches these tools through two non-sync channels:

    1. PreToolUse hook (automatic). The ctx setup claude-code plugin installs a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them against the active prompt, and includes matching bodies as Tier 6 of the context packet. The packet gets injected into Claude's context automatically.

    2. ctx_steering_get MCP tool (on-demand). Claude can call this MCP tool mid-task to fetch matching steering files for a specific prompt. Automatic activation comes from Claude's judgment, not a hook.

    Both channels activate when you run:

    ctx setup claude-code --write\n

    That installs the plugin, wires the hook, and registers the MCP server. After that, steering files you edit are picked up on the next tool call — no sync step needed.

    Running ctx steering sync with Claude Code

    It won't error — it will simply report that Claude and Codex aren't sync targets and skip them. If Claude Code is your only tool, you never need to run sync. If you use both Claude Code and (say) Cursor, run sync to keep Cursor up to date; the Claude pipeline takes care of itself via the hook.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-6-verify-the-ai-sees-it","level":2,"title":"Step 6 — Verify the AI Sees It","text":"

    Open your AI tool and ask it something the rule should fire on:

    \"Add a POST /users endpoint that accepts email and name.\"

    If the rule is working, the AI's first response should mention input validation, typed structs, and the internal/validate/ package — because that's what the steering file told it to do.

    If nothing happens, the fix depends on which path you're on:

    Path A — Cursor/Cline/Kiro:

    1. Re-run ctx steering preview with the literal prompt to confirm the match.
    2. Run ctx steering list and verify inclusion is auto, not manual.
    3. Check the tool's own config directory (e.g. .cursor/rules/) — the file should be there after ctx steering sync.

    Path B — Claude Code:

    1. Re-run ctx steering preview with the literal prompt to confirm the match.
    2. Verify the plugin is installed: cat .claude/hooks.json should include ctx agent --budget 8000 under PreToolUse. If not, re-run ctx setup claude-code --write.
    3. Run ctx agent --budget 8000 manually and grep the output for your rule body. If it's there, the data is fine; if it's missing, the inclusion mode or description is at fault.
    4. As a last resort, ask Claude directly: \"Call the ctx_steering_get MCP tool with my prompt and show me the result.\" If the MCP tool returns your rule, Claude has access but isn't pulling it into the initial context packet — tighten the description keywords.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    Too-generic descriptions. description: general coding will match almost every prompt and flood the context window. Keep descriptions specific to the scenario the rule applies to.

    Overlapping rules. If two steering files match the same prompt and contradict each other, the result is confusing. Use priority to resolve, but better: merge the files or narrow the descriptions so they don't overlap.

    Putting decisions in steering. \"We decided to use PostgreSQL\" is a decision, not a rule for the AI to follow on every prompt. Record decisions with ctx add decision, not ctx steering add.

    Committing inclusion: always without thinking. Rules marked always fire on every prompt, consuming tier-6 budget permanently. Only use always for true invariants (security, safety, licensing). Everything else should be auto or manual.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#see-also","level":2,"title":"See Also","text":"
    • ctx steering reference — full command, flag, and frontmatter reference.
    • ctx setup — configure which tools the steering sync writes to.
    • Authoring triggers — if you want script-based automation, not rule-based prompt injection.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/system-hooks-audit/","level":1,"title":"Auditing System Hooks","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-problem","level":2,"title":"The Problem","text":"

    ctx runs 14 system hooks behind the scenes: nudging your agent to persist context, warning about resource pressure, gating commits on QA. But these hooks are invisible by design. You never see them fire. You never know if they stopped working.

    How do you verify your hooks are actually running, audit what they do, and get alerted when they go silent?

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tldr","level":2,"title":"TL;DR","text":"
    ctx system check-resources # run a hook manually\nls -la .context/logs/      # check hook execution logs\nctx hook notify setup      # get notified when hooks fire\n

    Or ask your agent: \"Are our hooks running?\"

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx system <hook> CLI command Run a system hook manually ctx sysinfo CLI command Show system resource status ctx usage CLI command Stream or dump per-session token stats ctx hook notify setup CLI command Configure webhook for audit trail ctx hook notify test CLI command Verify webhook delivery .ctxrcnotify.events Configuration Subscribe to relay for full hook audit .context/logs/ Log files Local hook execution ledger","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-are-system-hooks","level":2,"title":"What Are System Hooks?","text":"

    System hooks are plumbing commands that ctx registers with your AI tool (Claude Code, Cursor, etc.) via the plugin's hooks.json. They fire automatically at specific events during your AI session:

    Event When Hooks UserPromptSubmit Before the agent sees your prompt 10 check hooks + heartbeat PreToolUse Before the agent uses a tool block-non-path-ctx, qa-reminderPostToolUse After a tool call succeeds post-commit

    You never run these manually. Your AI tool runs them for you: That's the point.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-complete-hook-catalog","level":2,"title":"The Complete Hook Catalog","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#prompt-time-checks-userpromptsubmit","level":3,"title":"Prompt-Time Checks (UserPromptSubmit)","text":"

    These fire before every prompt, but most are throttled to avoid noise.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-context-size-context-capacity-warning","level":4,"title":"check-context-size: Context Capacity Warning","text":"

    What: Adaptive prompt counter. Silent for the first 15 prompts, then nudges with increasing frequency (every 5th, then every 3rd).

    Why: Long sessions lose coherence. The nudge reminds both you and the agent to persist context before the window fills up.

    Output: VERBATIM relay box with prompt count.

    ┌─ Context Checkpoint (prompt #20) ────────────────\n│ This session is getting deep. Consider wrapping up\n│ soon. If there are unsaved learnings, decisions, or\n│ conventions, now is a good time to persist them.\n│ ⏱ Context window: ~45k tokens (~22% of 200k)\n└──────────────────────────────────────────────────\n

    Usage: Every prompt records token usage to .context/state/stats-{session}.jsonl. Monitor live with ctx usage --follow or query with ctx usage --json. Usage is recorded even during wrap-up suppression (event: suppressed).

    Billing guard: When billing_token_warn is set in .ctxrc, a one-shot warning fires if session tokens exceed the threshold. This warning is independent of all other triggers - it fires even during wrap-up suppression.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-persistence-context-staleness-nudge","level":4,"title":"check-persistence: Context Staleness Nudge","text":"

    What: Tracks when .context/*.md files were last modified. If too many prompts pass without a write, nudges the agent to persist.

    Why: Sessions produce insights that evaporate if not recorded. This catches the \"we talked about it but never wrote it down\" failure mode.

    Output: VERBATIM relay after 20+ prompts without a context file change.

    ┌─ Persistence Checkpoint (prompt #20) ───────────\n│ No context files updated in 20+ prompts.\n│ Have you discovered learnings, made decisions,\n│ established conventions, or completed tasks\n│ worth persisting?\n│\n│ Run /ctx-wrap-up to capture session context.\n└──────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-ceremonies-session-ritual-adoption","level":4,"title":"check-ceremonies: Session Ritual Adoption","text":"

    What: Scans your last 3 journal entries for /ctx-remember and /ctx-wrap-up usage. Nudges once per day if missing.

    Why: Session ceremonies are the highest-leverage habit in ctx. This hook bootstraps the habit until it becomes automatic.

    Output: Tailored nudge depending on which ceremony is missing.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-journal-unimported-session-reminder","level":4,"title":"check-journal: Unimported Session Reminder","text":"

    What: Detects unimported Claude Code sessions and unenriched journal entries. Fires once per day.

    Why: Exported sessions become searchable history. Unenriched entries lack metadata for filtering. Both decay in value over time.

    Output: VERBATIM relay with counts and exact commands.

    ┌─ Journal Reminder ─────────────────────────────\n│ You have 3 new session(s) not yet exported.\n│ 5 existing entries need enrichment.\n│\n│ Export and enrich:\n│   ctx journal import --all\n│   /ctx-journal-enrich-all\n└────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-resources-system-resource-pressure","level":4,"title":"check-resources: System Resource Pressure","text":"

    What: Monitors memory, swap, disk, and CPU load. Only fires at DANGER severity (memory >= 90%, swap >= 75%, disk >= 95%, load >= 1.5x CPU count).

    Why: Resource exhaustion mid-session can corrupt work. This provides early warning to persist and exit.

    Output: VERBATIM relay listing critical resources.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-knowledge-knowledge-file-growth","level":4,"title":"check-knowledge: Knowledge File Growth","text":"

    What: Counts entries in LEARNINGS.md, DECISIONS.md, and lines in CONVENTIONS.md. Fires once per day when thresholds are exceeded.

    Why: Large knowledge files dilute agent context. 35 learnings compete for attention; 15 focused ones get applied. Thresholds are configurable in .ctxrc.

    Default thresholds:

    # .ctxrc\nentry_count_learnings: 30\nentry_count_decisions: 20\nconvention_line_count: 200\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-version-binaryplugin-version-drift","level":4,"title":"check-version: Binary/Plugin Version Drift","text":"

    What: Compares the ctx binary version against the plugin version. Fires once per day. Also checks encryption key age for rotation nudge.

    Why: Version drift means hooks reference features the binary doesn't have. The key rotation nudge prevents indefinite key reuse.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-reminders-pending-reminder-relay","level":4,"title":"check-reminders: Pending Reminder Relay","text":"

    What: Reads .context/reminders.json and surfaces any due reminders via VERBATIM relay. No throttle: fires every session until dismissed.

    Why: Reminders are sticky notes to future-you. Unlike nudges (which throttle to once per day), reminders repeat deliberately until the user dismisses them.

    Output: VERBATIM relay box listing due reminders.

    ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-freshness-technology-constant-staleness","level":4,"title":"check-freshness: Technology Constant Staleness","text":"

    What: Stats files listed in .ctxrc freshness_files and warns if any haven't been modified in over 6 months. Daily throttle. Silent when no files are configured (opt-in via .ctxrc).

    Why: Model capabilities evolve - token budgets, attention limits, and context window sizes that were accurate 6 months ago may no longer reflect best practices. This hook reminds you to review and touch the file to confirm values are still current.

    Config (.ctxrc):

    freshness_files:\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # optional\n

    Each entry has a path (relative to project root), desc (what constants live there), and optional review_url (where to check current values). When review_url is set, the nudge includes \"Review against: {url}\". When absent, just \"Touch the file to mark it as reviewed.\"

    Output: VERBATIM relay listing stale files, silent otherwise.

    ┌─ Technology Constants Stale ──────────────────────\n│   config/thresholds.yaml (210 days ago)\n│     - Model token limits and batch sizes\n│   Review against: https://docs.example.com/limits\n│ Touch each file to mark it as reviewed.\n└───────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-map-staleness-architecture-map-drift","level":4,"title":"check-map-staleness: Architecture Map Drift","text":"

    What: Checks whether map-tracking.json is older than 30 days and there are commits touching internal/ since the last map refresh. Daily throttle prevents repeated nudges.

    Why: Architecture documentation drifts silently as code evolves. This hook detects structural changes that the map hasn't caught up with and suggests running /ctx-architecture to refresh.

    Output: VERBATIM relay when stale and modules changed, silent otherwise.

    ┌─ Architecture Map Stale ────────────────────────────\n│ ARCHITECTURE.md hasn't been refreshed since 2026-01-15\n│ and there are commits touching 12 modules.\n│ /ctx-architecture keeps architecture docs drift-free.\n│\n│ Want me to run /ctx-architecture to refresh?\n└─────────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#heartbeat-session-heartbeat-webhook","level":4,"title":"heartbeat: Session Heartbeat Webhook","text":"

    What: Fires on every prompt. Sends a webhook notification with prompt count, session ID, context modification status, and token usage telemetry. Never produces stdout.

    Why: Other hooks only send webhooks when they \"speak\" (nudge/relay). When silent, you have no visibility into session activity. The heartbeat provides a continuous session-alive signal with token consumption data for observability dashboards or liveness monitoring.

    Output: None (webhook + event log only).

    Payload:

    {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  }\n}\n

    Token fields (tokens, context_window, usage_pct) are included when usage data is available from the session JSONL file.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tool-time-hooks-pretooluse-posttooluse","level":3,"title":"Tool-Time Hooks (PreToolUse / PostToolUse)","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#block-non-path-ctx-path-enforcement-hard-gate","level":4,"title":"block-non-path-ctx: PATH Enforcement (Hard Gate)","text":"

    What: Blocks any Bash command that invokes ./ctx, ./dist/ctx, go run ./cmd/ctx, or an absolute path to ctx. Only PATH invocations are allowed.

    Why: Enforces CONSTITUTION.md's invocation invariant. Running a dev-built binary in production context causes version confusion and silent behavior drift.

    Output: Block response (prevents the tool call):

    {\"decision\": \"block\", \"reason\": \"Use 'ctx' from PATH, not './ctx'...\"}\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#qa-reminder-pre-commit-qa-gate","level":4,"title":"qa-reminder: Pre-Commit QA Gate","text":"

    What: Fires on every Edit tool use. Reminds the agent to lint and test the entire project before committing.

    Why: Agents tend to \"I'll test later\" and then commit untested code. Repetition is intentional: the hook reinforces the habit on every edit, not just before commits.

    Output: Agent directive with hard QA gate instructions.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#post-commit-context-capture-after-commit","level":4,"title":"post-commit: Context Capture After Commit","text":"

    What: Fires after any git commit (excludes --amend). Prompts the agent to offer context capture (decision? learning?) and suggest running lints/tests before pushing.

    Why: Commits are natural reflection points. The nudge converts mechanical git operations into context-capturing opportunities.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-the-local-event-log","level":2,"title":"Auditing Hooks via the Local Event Log","text":"

    If you don't need an external audit trail, enable the local event log for a self-contained record of hook activity:

    # .ctxrc\nevent_log: true\n

    Once enabled, every hook that fires writes an entry to .context/state/events.jsonl. Query it with ctx hook event:

    ctx hook event                    # last 50 events\nctx hook event --hook qa-reminder # filter by hook\nctx hook event --session <id>     # filter by session\nctx hook event --json | jq '.'    # raw JSONL for processing\n

    The event log is local, queryable, and doesn't require any external service. For a full diagnostic workflow combining event logs with structural health checks, see Troubleshooting.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-webhooks","level":2,"title":"Auditing Hooks via Webhooks","text":"

    The most powerful audit setup pipes all hook output to a webhook, giving you a real-time external record of what your agent is being told.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-1-set-up-the-webhook","level":3,"title":"Step 1: Set Up the Webhook","text":"
    ctx hook notify setup\n# Enter your webhook URL (Slack, Discord, ntfy.sh, IFTTT, etc.)\n

    See Webhook Notifications for service-specific setup.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-2-subscribe-to-relay-events","level":3,"title":"Step 2: Subscribe to relay Events","text":"
    # .ctxrc\nnotify:\n  events:\n    - relay   # all hook output: VERBATIM relays, directives, blocks\n    - nudge   # just the user-facing VERBATIM relays\n

    The relay event fires for every hook that produces output. This includes:

    Hook Event sent check-context-sizerelay + nudgecheck-persistencerelay + nudgecheck-ceremoniesrelay + nudgecheck-journalrelay + nudgecheck-resourcesrelay + nudgecheck-knowledgerelay + nudgecheck-versionrelay + nudgecheck-remindersrelay + nudgecheck-freshnessrelay + nudgecheck-map-stalenessrelay + nudgeheartbeatheartbeat only block-non-path-ctxrelay only post-commitrelay only qa-reminderrelay only","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-3-cross-reference","level":3,"title":"Step 3: Cross-Reference","text":"

    With relay enabled, your webhook receives a JSON payload every time a hook fires:

    {\n  \"event\": \"relay\",\n  \"message\": \"check-persistence: No context updated in 20+ prompts\",\n  \"session_id\": \"b854bd9c\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"my-project\"\n}\n

    This creates an external audit trail independent of the agent. You can now cross-verify: did the agent actually relay the checkpoint the hook told it to relay?

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#verifying-hooks-actually-fire","level":2,"title":"Verifying Hooks Actually Fire","text":"

    Hooks are invisible. An invisible thing that breaks is indistinguishable from an invisible thing that never existed. Three verification methods, from simplest to most robust:

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-1-ask-the-agent","level":3,"title":"Method 1: Ask the Agent","text":"

    The simplest check. After a few prompts into a session:

    \"Did you receive any hook output this session? Print the last\ncontext checkpoint or persistence nudge you saw.\"\n

    The agent should be able to recall recent hook output from its context window. If it says \"I haven't received any hook output\", either:

    • The hooks aren't firing (check installation);
    • The session is too short (hooks throttle early);
    • The hooks fired but the agent absorbed them silently.

    Limitation: You are trusting the agent to report accurately. Agents sometimes confabulate or miss context. Use this as a quick smoke test, not definitive proof.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-2-check-the-webhook-trail","level":3,"title":"Method 2: Check the Webhook Trail","text":"

    If you have relay events enabled, check your webhook receiver. Every hook that fires sends a timestamped notification. No notification = no fire.

    This is the ground truth. The webhook is called directly by the ctx binary, not by the agent. The agent cannot fake, suppress, or modify webhook deliveries.

    Compare what the webhook received against what the agent claims to have relayed. Discrepancies mean the agent is absorbing nudges instead of surfacing them.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-3-read-the-local-logs","level":3,"title":"Method 3: Read the Local Logs","text":"

    Hooks that support logging write to .context/logs/:

    # Check context-size hook activity\ncat .context/logs/check-context-size.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] prompt#1 silent\n# [2026-02-22 09:17:33] [session:b854bd9c] prompt#16 CHECKPOINT\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 CHECKPOINT\n
    # Check persistence nudge activity\ncat .context/logs/check-persistence.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] init count=1 mtime=1770646611\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 NUDGE since_nudge=20\n

    Logs are append-only and written by the ctx binary, not the agent.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#detecting-silent-hook-failures","level":2,"title":"Detecting Silent Hook Failures","text":"

    The hardest failure mode: hooks that stop firing without error. The plugin config changes, a binary update drops a hook, or a PATH issue silently breaks execution. Nothing errors: The hook just never runs.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-staleness-signal","level":3,"title":"The Staleness Signal","text":"

    If .context/logs/check-context-size.log has no entries newer than 5 days but you've been running sessions daily, something is wrong. The absence of evidence is evidence of absence: but only if you control for inactivity.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#false-positive-protection","level":3,"title":"False Positive Protection","text":"

    A naive \"hooks haven't fired in N days\" alert fires incorrectly when you simply haven't used ctx. The correct check needs two inputs:

    1. Last hook fire time: from .context/logs/ or webhook history
    2. Last session activity: from journal entries or ctx journal source

    If sessions are happening but hooks aren't firing, that's a real problem. If neither sessions nor hooks are happening, that's a vacation.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-to-check","level":3,"title":"What to Check","text":"

    When you suspect hooks aren't firing:

    # 1. Verify the plugin is installed\nls ~/.claude/plugins/\n\n# 2. Check hook registration\ncat ~/.claude/plugins/ctx/hooks.json | head -20\n\n# 3. Run a hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-context-size\n\n# 4. Check for PATH issues\nwhich ctx\nctx --version\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tips","level":2,"title":"Tips","text":"
    • Start with nudge, graduate to relay: The nudge event covers user-facing VERBATIM relays. Add relay when you want full visibility into agent directives and hard gates.
    • Webhooks are your trust anchor: The agent can ignore a nudge, but it can't suppress the webhook. If the webhook fired and the agent didn't relay, you have proof of a compliance gap.
    • Hooks are throttled by design: Most check hooks fire once per day or use adaptive frequency. Don't expect a notification every prompt: Silence usually means the throttle is working, not that the hook is broken.
    • Daily markers live in .context/state/: Throttle files are stored in .context/state/ alongside other project-scoped state. If you need to force a hook to re-fire during testing, delete the corresponding marker file.
    • The QA reminder is intentionally noisy: Unlike other hooks, qa-reminder fires on every Edit call with no throttle. This is deliberate: The commit quality degrades when the reminder fades from salience.
    • Log files are safe to commit: .context/logs/ contains only timestamps, session IDs, and status keywords. No secrets, no code.
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#next-up","level":2,"title":"Next Up","text":"

    Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#see-also","level":2,"title":"See Also","text":"
    • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
    • Customizing Hook Messages: override what hooks say without changing what they do
    • Webhook Notifications: setting up and configuring the webhook system
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Detecting and Fixing Drift: structural checks that complement runtime hook auditing
    • CLI Reference: full ctx system command reference
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/task-management/","level":1,"title":"Tracking Work Across Sessions","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-problem","level":2,"title":"The Problem","text":"

    You have work that spans multiple sessions. Tasks get added during one session, partially finished in another, and completed days later.

    Without a system, follow-up items fall through the cracks, priorities drift, and you lose track of what was done versus what still needs doing. TASKS.md grows cluttered with completed checkboxes that obscure the remaining work.

    How do you manage work items that span multiple sessions without losing context?

    Prefer Skills over Raw Commands

    When working with an AI agent, use /ctx-task-add instead of raw ctx add task. The agent automatically picks up session ID, branch, and commit hash from its context — no manual flags needed.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tldr","level":2,"title":"TL;DR","text":"

    Manage Tasks:

    ctx add task \"Fix race condition\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add\nctx add task \"Write tests\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add to phase\nctx task complete \"race condition\"                      # mark done\nctx task snapshot \"before-refactor\"               # backup\nctx task archive                                  # clean up\n

    Pick Up the Next Task:

    /ctx-next # pick what's next\n

    Read on for the full workflow and conversational patterns.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add task Command Add a new task to TASKS.mdctx task complete Command Mark a task as done by number or text ctx task snapshot Command Create a point-in-time backup of TASKS.mdctx task archive Command Move completed tasks to archive file /ctx-task-add Skill AI-assisted task creation with validation /ctx-archive Skill AI-guided archival with safety checks /ctx-next Skill Pick what to work on based on priorities","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-1-add-tasks-with-priorities","level":3,"title":"Step 1: Add Tasks with Priorities","text":"

    Every piece of follow-up work gets a task. Use ctx add task from the terminal or /ctx-task-add from your AI assistant. Tasks should start with a verb and be specific enough that someone unfamiliar with the session could act on them.

    # High-priority bug found during code review\nctx add task \"Fix race condition in session cooldown\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Medium-priority feature work\nctx add task \"Add --format json flag to ctx status for CI integration\" --priority medium \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Low-priority cleanup\nctx add task \"Remove deprecated --raw flag from ctx load\" --priority low \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    The /ctx-task-add skill validates your task before recording it. It checks that the description is actionable, not a duplicate, and specific enough for someone else to pick up.

    If you say \"fix the bug,\" it will ask you to clarify which bug and where.

    Tasks Are Often Created Proactively

    In practice, many tasks are created proactively by the agent rather than by explicit CLI commands.

    After completing a feature, the agent will often identify follow-up work: tests, docs, edge cases, error handling, and offer to add them as tasks.

    You do not need to dictate ctx add task commands; the agent picks up on work context and suggests tasks naturally.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-2-organize-with-phase-sections","level":3,"title":"Step 2: Organize with Phase Sections","text":"

    Tasks live in phase sections inside TASKS.md.

    Phases provide logical groupings that preserve order and enable replay.

    A task does not move between sections. It stays in its phase permanently, and status is tracked via checkboxes and inline tags.

    ## Phase 1: Core CLI\n\n- [x] Implement ctx add command\n- [x] Implement ctx task complete command\n- [ ] Add --section flag to ctx add task `#priority:medium`\n\n## Phase 2: AI Integration\n\n- [ ] Implement ctx agent cooldown `#priority:high` `#in-progress`\n- [ ] Add ctx watch XML parsing `#priority:medium`\n  - Blocked by: Need to finalize agent output format\n\n## Backlog\n\n- [ ] Performance optimization for large TASKS.md files `#priority:low`\n- [ ] Add metrics dashboard to ctx status `#priority:deferred`\n

    Use --section when adding a task to a specific phase:

    ctx add task \"Add ctx watch XML parsing\" --priority medium --section \\\n    \"Phase 2: AI Integration\" \\\n    --session-id abc12345 --branch main --commit 68fbc00a\n

    Without --section, the task is inserted before the first unchecked task in TASKS.md.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

    At the start of a session, or after finishing a task, use /ctx-next to get prioritized recommendations.

    The skill reads TASKS.md, checks recent sessions, and ranks candidates using explicit priority, blocking status, in-progress state, momentum from recent work, and phase order.

    You can also ask naturally: \"what should we work on?\" or \"what's the highest priority right now?\"

    /ctx-next\n

    The output looks like this:

    **1. Implement ctx agent cooldown** `#priority:high`\n\n    Still in-progress from yesterday's session. The tombstone file approach is\n    half-built. Finishing is cheaper than context-switching.\n\n**2. Add --section flag to ctx add task** `#priority:medium`\n\n    Last Phase 1 item. Quick win that unblocks organized task entry.\n\n---\n\n*Based on 8 pending tasks across 3 phases.\n\nLast session: agent-cooldown (2026-02-06).*\n

    In-progress tasks almost always come first:

    Finishing existing work takes priority over starting new work.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-4-complete-tasks","level":3,"title":"Step 4: Complete Tasks","text":"

    When a task is done, mark it complete by number or partial text match:

    # By task number (as shown in TASKS.md)\nctx task complete 3\n\n# By partial text match\nctx task complete \"agent cooldown\"\n

    The task's checkbox changes from [ ] to [x]. Tasks are never deleted: they stay in their phase section so history is preserved.

    Be Conversational

    You rarely need to run ctx task complete yourself during an interactive session.

    When you say something like \"the rate limiter is done\" or \"we finished that,\" the agent marks the task complete and moves on to suggesting what is next.

    The CLI commands are most useful for manual housekeeping, scripted workflows, or when you want precision.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-5-snapshot-before-risky-changes","level":3,"title":"Step 5: Snapshot Before Risky Changes","text":"

    Before a major refactor or any change that might break things, snapshot your current task state. This creates a copy of TASKS.md in .context/archive/ without modifying the original.

    # Default snapshot\nctx task snapshot\n\n# Named snapshot (recommended before big changes)\nctx task snapshot \"before-refactor\"\n

    This creates a file like .context/archive/tasks-before-refactor-2026-02-08-1430.md. If the refactor goes sideways, and you need to confirm what the task state looked like before you started, the snapshot is there.

    Snapshots are cheap: Take them before any change you might want to undo or review later.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-6-archive-when-tasksmd-gets-cluttered","level":3,"title":"Step 6: Archive When TASKS.md Gets Cluttered","text":"

    After several sessions, TASKS.md accumulates completed tasks that make it hard to see what is still pending.

    Use ctx task archive to move all [x] items to a timestamped archive file.

    Start with a dry run to preview what will be moved:

    ctx task archive --dry-run\n

    Then archive:

    ctx task archive\n

    Completed tasks move to .context/archive/tasks-2026-02-08.md. Phase headers are preserved in the archive for traceability. Pending tasks ([ ]) remain in TASKS.md.

    The /ctx-archive skill adds two safety checks before archiving: it verifies that completed tasks are genuinely cluttering the view and that nothing was marked [x] prematurely.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#letting-the-agent-drive","level":2,"title":"Letting the Agent Drive","text":"

    The steps above show the CLI commands for task management.

    In practice, most task management happens conversationally:

    An agent that has loaded the context files does not need you to type ctx add task. It tracks work naturally and offers the right operations.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#use-natural-language-instead-of-commands","level":3,"title":"Use Natural Language Instead of Commands","text":"

    These conversational prompts replace explicit commands during interactive sessions:

    Instead of typing... Just say... ctx add task \"Write tests for X\" --session-id ID --branch BR --commit HASH \"We should add tests for this: track that?\" /ctx-next \"What should we work on?\" ctx task complete \"rate limiting\" \"The rate limiter is done, what's next?\" ctx task archive \"TASKS.md is getting long, can you clean it up?\" ctx add task ... --session-id ID --branch BR --commit HASH && ctx add task ... \"Add follow-ups for what we just built.\"

    The agent translates these into the right ctx operations behind the scenes.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-creates-tasks-proactively","level":3,"title":"The Agent Creates Tasks Proactively","text":"

    After completing a piece of work, the agent does not wait for you to dictate follow-up items. It identifies them from the work context and offers:

    **Agent**: \"I've finished implementing the rate limiter middleware. I noticed\nthree follow-up items:\n\n1. Write integration tests for the rate limiter (the unit tests pass but\n   there's no end-to-end coverage)\n2. Add rate limit headers to API responses (X-RateLimit-Remaining, etc.)\n3. Document the rate limit configuration in the ops runbook\n\nWant me to add these as tasks?\"\n

    This is how an agent behaves once it has read the Agent Playbook.

    The playbook's \"Work, Reflect, Persist\" cycle means the agent asks itself after every milestone: Did I just complete a task? Did new work emerge?

    When the answer is yes, it acts.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#asking-the-agent-what-to-capture","level":3,"title":"Asking the Agent What to Capture","text":"

    You don't always know what's worth persisting.

    Instead of reviewing the session yourself and typing specific commands, ask the agent to do the thinking:

    You say What happens \"What tasks came out of this session?\" Agent reviews the work, identifies follow-ups, and offers to add them \"Are there any tasks, learnings, or decisions worth adding?\" Agent scans the full session for all three types and proposes a batch \"What should we remember from today?\" Agent surfaces everything persist-worthy: not just tasks \"Anything we should track before we move on?\" Agent checks for loose ends specific to the work just completed

    This is especially powerful after long or exploratory sessions where multiple threads were touched.

    The agent has the full conversation in context and can spot follow-ups you might miss:

    You:   \"Are there any tasks, learnings, or decisions worth adding\n        from this session?\"\n\nAgent: \"Yes. Here's what I'd suggest:\n\n  Tasks:\n  - Add retry logic to the webhook delivery (we discussed it when\n    the timeout test failed but never tracked it)\n  - Update the API docs to reflect the new rate limit headers\n\n  Learnings:\n  - net/http's default client has no timeout: we burned 15 minutes\n    on that before adding an explicit 30s timeout\n\n  Decisions:\n  - We chose token-bucket over sliding-window for rate limiting\n    because it handles bursts better and the library is simpler\n\n  Want me to add all of these?\"\n\nYou:   \"All of them.\"\n

    The agent fills in structured fields from what it observed during the session. You get the same quality entries as if you typed verbose commands, without needing to remember what happened or how to format it.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-completes-tasks-and-moves-on","level":3,"title":"The Agent Completes Tasks and Moves On","text":"

    When you confirm that work is finished, or when the agent can see that it is, it marks the task done and suggests the next step:

    You: \n\"The cooldown logic is working now.\"\n\nAgent: \n\"Marked 'Implement ctx agent cooldown' as done. Your highest-\npriority remaining task is 'Add --section flag to ctx add task'.\n\nIt's the last item in Phase 1 and a quick win. Want to start on that?\"\n

    The agent chains completion into next-task selection.

    One sentence from you triggers two operations.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#end-of-session-capture","level":3,"title":"End-of-Session Capture","text":"

    At the end of a session, the agent reviews what happened and catches loose ends:

    You: \n\"I think we're good for today.\"\n\nAgent: \n\"Before we wrap up: during the middleware work you mentioned\nwanting to add metrics for rate limit hits. That never made it into TASKS.md.\n\nWant me to add it as a task so it's there next session?\"\n

    This is the proactive persistence model at work.

    The agent monitors the conversation for mentioned-but-not-tracked work and surfaces it before context is lost.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#a-conversational-session-example","level":3,"title":"A Conversational Session Example","text":"

    Here is what task management looks like in a real session when the agent is driving:

    You:   What's left to do?\nAgent: You have 5 pending tasks across 2 phases. The highest priority is\n       \"Implement ctx agent cooldown\": it's marked in-progress from\n       yesterday and half-built. Want to pick that up?\n\nYou:   Yeah, let's finish it.\n       [... 20 minutes of implementation work ...]\n\nYou:   That's working. Tests pass.\nAgent: Nice. I'll mark \"Implement ctx agent cooldown\" as done.\n       I noticed two things from the implementation:\n       1. The tombstone file cleanup should happen on a timer, not just\n          at startup. Want me to add that as a task?\n       2. We learned that file locks don't work reliably on NFS. I'll\n          add that to LEARNINGS.md.\n\nYou:   Yes to both. What's next?\nAgent: Added the cleanup task as high priority. Next up is \"Add --section\n       flag to ctx add task\": last item in Phase 1. It should be\n       straightforward since the flag parsing pattern already exists\n       in the codebase. Ready?\n

    It's All Conversational

    Notice what did not happen: The user never typed a ctx command.

    The agent handled task completion, follow-up creation, learning capture, and next-task selection: all from natural conversation.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"
    # Add a task\nctx add task \"Implement rate limiting for API endpoints\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to a specific phase\nctx add task \"Write integration tests for rate limiter\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# See what to work on\n# (from AI assistant) /ctx-next\n\n# Mark done by text\nctx task complete \"rate limiting\"\n\n# Mark done by number\nctx task complete 5\n\n# Snapshot before a risky refactor\nctx task snapshot \"before-middleware-rewrite\"\n\n# Archive completed tasks when the list gets long\nctx task archive --dry-run     # preview first\nctx task archive               # then archive\n
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tips","level":2,"title":"Tips","text":"
    • Start tasks with a verb: \"Add,\" \"Fix,\" \"Implement,\" \"Investigate\": not just a topic like \"Authentication.\"
    • Include the why in the task description. Future sessions lack the context of why you added the task. \"Add rate limiting\" is worse than \"Add rate limiting to prevent abuse on the public API after the load test showed 10x traffic spikes.\"
    • Use #in-progress sparingly. Only one or two tasks should carry this tag at a time. If everything is in-progress, nothing is.
    • Snapshot before, not after. The point of a snapshot is to capture the state before a change, not to celebrate what you just finished.
    • Archive regularly. Once completed tasks outnumber pending ones, it is time to archive. A clean TASKS.md helps both you and your AI assistant focus.
    • Never delete tasks. Mark them [x] (completed) or [-] (skipped with a reason). Deletion breaks the audit trail.
    • Trust the agent's task instincts. When the agent suggests follow-up items after completing work, it is drawing on the full context of what just happened.
    • Conversational prompts beat commands in interactive sessions. Saying \"what should we work on?\" is faster and more natural than running /ctx-next. Save explicit commands for scripts, CI, and unattended runs.
    • Let the agent chain operations. A single statement like \"that's done, what's next?\" can trigger completion, follow-up identification, and next-task selection in one flow.
    • Review proactive task suggestions before moving on. The best follow-ups come from items spotted in-context right after the work completes.
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#next-up","level":2,"title":"Next Up","text":"

    Using the Scratchpad →: Store short-lived sensitive notes in an encrypted scratchpad.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle including task management in context
    • Persisting Decisions, Learnings, and Conventions: capturing the \"why\" behind your work
    • Detecting and Fixing Drift: keeping TASKS.md accurate over time
    • CLI Reference: full documentation for ctx add, ctx task complete, ctx task
    • Context Files: TASKS.md: format and conventions for TASKS.md
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/triggers/","level":1,"title":"Authoring Lifecycle Triggers","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#authoring-lifecycle-triggers","level":1,"title":"Authoring Lifecycle Triggers","text":"

    Triggers are executable shell scripts that fire at specific events during an AI session. They're how you express \"when the AI saves a file, also do X\" or \"before the AI edits this path, check Y first.\" This recipe walks through writing your first trigger, testing it, and enabling it safely.

    Triggers Execute Arbitrary Code

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks:

    • Only enable scripts you have read and understand.
    • Never enable a trigger you downloaded from the internet without reviewing every line.
    • Avoid shelling out to user-controlled values (jq -r output, path field, tool field) without quoting.
    • A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    The generated trigger template starts disabled (no executable bit) so you cannot accidentally run an unreviewed script. Enable it explicitly with ctx trigger enable.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#scenario","level":2,"title":"Scenario","text":"

    You want a pre-tool-use trigger that blocks the AI from editing anything in internal/crypto/ without explicit confirmation. Cryptographic code is sensitive, and accidental edits have caused outages before — you want a hard gate.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-1-scaffold-the-script","level":2,"title":"Step 1 — Scaffold the Script","text":"
    ctx trigger add pre-tool-use protect-crypto\n

    That creates .context/hooks/pre-tool-use/protect-crypto.sh with a template:

    #!/usr/bin/env bash\nset -euo pipefail\n\n# Read the JSON event from stdin.\npayload=$(cat)\n\n# Parse fields with jq.\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Your logic here.\n\n# Return a JSON result. action can be \"allow\", \"block\", or absent.\necho '{\"action\": \"allow\"}'\n

    Note: the directory is .context/hooks/pre-tool-use/ — the on-disk layout still uses hooks/ even though the command is ctx trigger. If you ls .context/hooks/, that's where your triggers live.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-2-write-the-logic","level":2,"title":"Step 2 — Write the Logic","text":"

    Open the file and replace the template body:

    #!/usr/bin/env bash\nset -euo pipefail\n\npayload=$(cat)\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Only gate write-family tools.\ncase \"$tool\" in\n  write_file|edit_file|apply_patch) ;;\n  *)\n    echo '{\"action\": \"allow\"}'\n    exit 0\n    ;;\nesac\n\n# Block any path under internal/crypto/.\ncase \"$path\" in\n  internal/crypto/*|*/internal/crypto/*)\n    jq -n --arg p \"$path\" '{\n      action: \"block\",\n      message: (\"Edits to \" + $p + \" require manual review. \" +\n                \"See CONVENTIONS.md for the crypto-change process.\")\n    }'\n    exit 0\n    ;;\nesac\n\necho '{\"action\": \"allow\"}'\n

    A few things to note:

    • set -euo pipefail — any unhandled error aborts the script. Critical for a security-relevant trigger.
    • Quote everything from jq — the path field comes from the AI tool; treat it as untrusted input.
    • Explicit allow case — the default is allow. An empty or missing response is a risky default.
    • Use jq -n --arg for output construction — safer than string concatenation when the message may contain special characters.
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-3-test-with-a-mock-payload","level":2,"title":"Step 3 — Test with a Mock Payload","text":"

    Before enabling the trigger, test it with a realistic mock input using ctx trigger test. This runs the script against a synthetic JSON payload without actually firing any AI tool.

    # Test the \"should block\" case\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\n

    Expected: the trigger returns {\"action\":\"block\", \"message\": \"...\"}.

    # Test the \"should allow\" case\nctx trigger test pre-tool-use --tool write_file --path internal/memory/mirror.go\n

    Expected: the trigger returns {\"action\":\"allow\"}.

    # Test that non-write tools pass through\nctx trigger test pre-tool-use --tool read_file --path internal/crypto/aes.go\n

    Expected: {\"action\":\"allow\"} because the case statement only gates write-family tools.

    If any of these cases misbehave, fix the trigger before enabling it. The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-4-enable-it","level":2,"title":"Step 4 — Enable It","text":"

    Once the test cases pass, enable the trigger:

    ctx trigger enable protect-crypto\n

    That sets the executable bit. Next time the AI starts a pre-tool-use event, the trigger will fire.

    Verify it's enabled:

    ctx trigger list\n

    Should show protect-crypto under pre-tool-use with an enabled indicator.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-5-iterate-safely","level":2,"title":"Step 5 — Iterate Safely","text":"

    If you discover a bug after enabling, disable first, fix second:

    ctx trigger disable protect-crypto\n# ...edit the script...\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\nctx trigger enable protect-crypto\n

    Disabling simply clears the executable bit — the script stays on disk, and ctx trigger enable re-enables it without rewriting anything.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#patterns-worth-copying","level":2,"title":"Patterns Worth Copying","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#logging-not-blocking","level":3,"title":"Logging, Not Blocking","text":"

    For auditing or analytics, return {\"action\":\"allow\"} always and append to a log as a side effect:

    #!/usr/bin/env bash\nset -euo pipefail\npayload=$(cat)\necho \"$payload\" >> .context/logs/tool-use.jsonl\necho '{\"action\":\"allow\"}'\n
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#context-injection-at-session-start","level":3,"title":"Context Injection at Session Start","text":"

    A session-start trigger can prepend text to the agent's initial prompt by emitting {\"action\":\"inject\", \"content\": \"...\"} — useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#chaining-triggers-of-the-same-type","level":3,"title":"Chaining Triggers of the Same Type","text":"

    Multiple scripts in the same type directory all run. If any returns action: block, the block wins. Keep individual triggers single-purpose and rely on composition.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    Forgetting the shebang. Without #!/usr/bin/env bash, the trigger won't execute even with the executable bit set.

    Not quoting $path. If you use $path in a command substitution or a case glob without quoting, a file name with spaces or metacharacters will break the trigger in surprising ways.

    Enabling before testing. ctx trigger enable makes the script live immediately. Always ctx trigger test first.

    Outputting non-JSON. The trigger's stdout must be valid JSON or ctx's trigger runner will log a parse error. Use jq -n to construct output rather than hand-writing JSON strings.

    Mixing hook and trigger vocabulary. The command is ctx trigger but the on-disk directory is .context/hooks/. The feature was renamed; the directory name lags behind. Don't let this confuse you — they refer to the same thing.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#see-also","level":2,"title":"See Also","text":"
    • ctx trigger reference — full command, flag, and event-type reference.
    • ctx steering — persistent rules, not scripts. Use steering when the thing you want is \"tell the AI to always do X\" rather than \"run a script when Y happens.\"
    • Writing steering files — the rule-based equivalent of this recipe.
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/troubleshooting/","level":1,"title":"Troubleshooting","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-problem","level":2,"title":"The Problem","text":"

    Something isn't working: a hook isn't firing, nudges are too noisy, context seems stale, or the agent isn't following instructions. The information to diagnose it exists (across status, drift, event logs, hook config, and session history), but assembling it manually is tedious.

    How do you figure out what's wrong and fix it?

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tldr","level":2,"title":"TL;DR","text":"
    ctx doctor                   # structural health check\nctx hook event --last 20  # recent hook activity\n# or ask: \"something seems off, can you diagnose?\"\n
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx doctor CLI command Structural health report ctx doctor --json CLI command Machine-readable health report ctx hook event CLI command Query local event log /ctx-doctor Skill Agent-driven diagnosis with analysis","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#quick-check-ctx-doctor","level":3,"title":"Quick Check: ctx doctor","text":"

    Run ctx doctor for an instant structural health report. It checks context initialization, required files, drift, hook configuration, event logging, webhooks, reminders, task completion ratio, and context token size: all in one pass:

    ctx doctor\n
    ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

    Warnings are non-critical but worth fixing. Errors need attention. Informational notes (○) flag optional features that aren't enabled.

    For scripting:

    ctx doctor --json | jq '.warnings'\n
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#deep-dive-ctx-doctor","level":3,"title":"Deep Dive: /ctx-doctor","text":"

    When you need the agent to reason about what's wrong, use the skill. Ask naturally or invoke directly:

    Why didn't my hook fire?\nSomething seems off, can you diagnose?\n/ctx-doctor\n

    The agent follows a triage sequence:

    1. Baseline: runs ctx doctor --json for structural health
    2. Events: runs ctx hook event --json --last 100 (if event logging enabled)
    3. Correlate: connects findings across both sources
    4. Present: structured findings with evidence
    5. Suggest: actionable next steps (but doesn't auto-fix)

    The skill degrades gracefully: without event logging enabled, it still runs structural checks and notes what you'd gain by enabling it.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#raw-event-inspection","level":3,"title":"Raw Event Inspection","text":"

    For power users: ctx hook event with filters gives direct access to the event log.

    # Last 50 events (default)\nctx hook event\n\n# Events from a specific session\nctx hook event --session eb1dc9cd-0163-4853-89d0-785fbfaae3a6\n\n# Only QA reminder events\nctx hook event --hook qa-reminder\n\n# Raw JSONL for jq processing\nctx hook event --json | jq '.message'\n\n# Include rotated (older) events\nctx hook event --all --last 100\n

    Filters use AND logic: --hook qa-reminder --session abc123 returns only QA reminder events from that specific session.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#common-problems","level":2,"title":"Common Problems","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#ctx-not-initialized","level":3,"title":"\"ctx: Not Initialized\"","text":"

    Symptoms: Any ctx command fails with ctx: not initialized - run \"ctx init\" first.

    Cause: You're running ctx in a directory without an initialized .context/ directory. This guard runs on all user-facing commands to prevent confusing downstream errors.

    Fix:

    ctx init          # create .context/ with template files\nctx init --minimal  # or just the essentials (CONSTITUTION, TASKS, DECISIONS)\n

    Commands that work without initialization: ctx init, ctx setup, ctx doctor, and help-only grouping commands (ctx, ctx system).

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#my-hook-isnt-firing","level":3,"title":"\"My Hook Isn't Firing\"","text":"

    Symptoms: No nudges appearing, webhook silent, event log shows no entries for the expected hook.

    Diagnosis:

    # 1. Check if ctx is installed and on PATH\nwhich ctx && ctx --version\n\n# 2. Check if the hook is registered\ngrep \"check-persistence\" ~/.claude/plugins/ctx/hooks.json\n\n# 3. Run the hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-persistence\n\n# 4. Check event log for the hook (if enabled)\nctx hook event --hook check-persistence\n

    Common causes:

    • Plugin is not installed: run ctx init --claude to reinstall
    • PATH issue: the hook invokes ctx from PATH; ensure it resolves
    • Throttle active: most hooks fire once per day: check .context/state/ for daily marker files
    • Hook silenced: a custom message override may be an empty file: check ctx hook message list for overrides
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#too-many-nudges","level":3,"title":"\"Too Many Nudges\"","text":"

    Symptoms: The agent is overwhelmed with hook output. Context checkpoints, persistence reminders, and QA gates fire constantly.

    Diagnosis:

    # Check how often hooks fired recently\nctx hook event --last 50\n\n# Count fires per hook\nctx hook event --json | jq -r '.detail.hook // \"unknown\"' \\\n  | sort | uniq -c | sort -rn\n

    Common causes:

    • QA reminder is noisy by design: it fires on every Edit call with no throttle. This is intentional. If it's too much, silence it with an empty override: ctx hook message edit qa-reminder gate, then empty the file
    • Long session: context checkpoint fires with increasing frequency after prompt 15. This is the system telling you the session is getting long: consider wrapping up
    • Short throttle window: if you deleted marker files in .context/state/, daily-throttled hooks will re-fire
    • Outdated Claude Code plugin: Update the plugin using Claude Code → /plugin → \"Marketplace\"
    • ctx version mismatch: Build (or download) and install the latest ctx vesion.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#context-seems-stale","level":3,"title":"\"Context Seems Stale\"","text":"

    Symptoms: The agent references outdated information, paths that don't exist, or decisions that were reversed.

    Diagnosis:

    # Structural drift check\nctx drift\n\n# Full doctor check (includes drift + more)\nctx doctor\n\n# Check when context files were last modified\nctx status --verbose\n

    Common causes:

    • Drift accumulated: stale path references in ARCHITECTURE.md or CONVENTIONS.md. Fix with ctx drift --fix or ask the agent to clean up.
    • Task backlog: too many completed tasks diluting active context. Archive with ctx task archive or ctx compact --archive.
    • Large context files: LEARNINGS.md with 40+ entries competes for attention. Consolidate with /ctx-consolidate.
    • Missing session ceremonies: if /ctx-remember and /ctx-wrap-up aren't being used, context doesn't get refreshed. See Session Ceremonies.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-agent-isnt-following-instructions","level":3,"title":"\"The Agent Isn't Following Instructions\"","text":"

    Symptoms: The agent ignores conventions, forgets decisions, or acts contrary to CONSTITUTION.md rules.

    Diagnosis:

    # Check context token size: Is it too large for the model?\nctx doctor --json | jq '.results[] | select(.name == \"context_size\")'\n\n# Check if context is actually being loaded\nctx hook event --hook context-load-gate\n

    Common causes:

    • Context too large: if total tokens exceed the model's effective attention, instructions get diluted. Check ctx doctor for the size check. Compact with ctx compact --archive.
    • Context not loading: if context-load-gate hasn't fired, the agent may not have received context. Verify the hook is registered.
    • Conflicting instructions: CONVENTIONS.md says one thing, AGENT_PLAYBOOK.md says another. Review both files for consistency.
    • Agent drift: the agent's behavior diverges from instructions over long sessions. This is normal. Use /ctx-reflect to re-anchor, or start a new session.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#prerequisites","level":2,"title":"Prerequisites","text":"
    • Event logging (optional but recommended): event_log: true in .ctxrc
    • ctx initialized: ctx init

    Event logging is not required for ctx doctor or /ctx-doctor to work. Both degrade gracefully: structural checks run regardless, and the skill notes when event data is unavailable.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tips","level":2,"title":"Tips","text":"
    • Start with ctx doctor: It's the fastest way to get a comprehensive health picture. Save event log inspection for when you need to understand when and how often something happened.
    • Enable event logging early: The log is opt-in and low-cost (~250 bytes per event, 1MB rotation cap). Enable it before you need it: Diagnosing a problem without historical data is much harder.
    • Use the skill for correlation: ctx doctor tells you what is wrong. /ctx-doctor tells you why by correlating structural findings with event patterns. The agent can spot connections that individual commands miss.
    • Event log is gitignored: It's machine-local diagnostic data, not project context. Different machines produce different event streams.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#next-up","level":2,"title":"Next Up","text":"

    Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#see-also","level":2,"title":"See Also","text":"
    • Auditing System Hooks: the complete hook catalog and webhook-based audit trails
    • Detecting and Fixing Drift: structural and semantic drift detection and repair
    • Webhook Notifications: push notifications for hook activity
    • ctx doctor CLI: full command reference
    • ctx hook event CLI: event log query reference
    • /ctx-doctor skill: agent-driven diagnosis
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/webhook-notifications/","level":1,"title":"Webhook Notifications","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-problem","level":2,"title":"The Problem","text":"

    Your agent runs autonomously (loops, implements, releases) while you are away from the terminal. You have no way to know when it finishes, hits a limit, or when a hook fires a nudge.

    How do you get notified about agent activity without watching the terminal?

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tldr","level":2,"title":"TL;DR","text":"
    ctx hook notify setup  # configure webhook URL (encrypted)\nctx hook notify test   # verify delivery\n# Hooks auto-notify on: session-end, loop-iteration, resource-danger\n
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx hook notify setup CLI command Configure and encrypt webhook URL ctx hook notify test CLI command Send a test notification ctx hook notify --event <name> \"msg\" CLI command Send a notification from scripts/skills .ctxrcnotify.events Configuration Filter which events reach your webhook","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-1-get-a-webhook-url","level":3,"title":"Step 1: Get a Webhook URL","text":"

    Any service that accepts HTTP POST with JSON works. Common options:

    Service How to get a URL IFTTT Create an applet with the \"Webhooks\" trigger Slack Create an Incoming Webhook Discord Channel Settings > Integrations > Webhooks ntfy.sh Use https://ntfy.sh/your-topic (no signup) Pushover Use API endpoint with your user key

    The URL contains auth tokens. ctx encrypts it; it never appears in plaintext in your repo.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-2-configure-the-webhook","level":3,"title":"Step 2: Configure the Webhook","text":"
    ctx hook notify setup\n# Enter webhook URL: https://maker.ifttt.com/trigger/ctx/json/with/key/YOUR_KEY\n# Webhook configured: https://maker.ifttt.com/***\n# Encrypted at: .context/.notify.enc\n

    This encrypts the URL with AES-256-GCM using the same key as the scratchpad (~/.ctx/.ctx.key). The encrypted file (.context/.notify.enc) is safe to commit. The key lives outside the project and is never committed.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-3-test-it","level":3,"title":"Step 3: Test It","text":"
    ctx hook notify test\n# Webhook responded: HTTP 200 OK\n

    If you see No webhook configured, run ctx hook notify setup first.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-4-configure-events","level":3,"title":"Step 4: Configure Events","text":"

    Notifications are opt-in: no events are sent unless you configure an event list in .ctxrc:

    # .ctxrc\nnotify:\n  events:\n    - loop       # loop completion or max-iteration hit\n    - nudge      # VERBATIM relay hooks (context checkpoint, persistence, etc.)\n    - relay      # all hook output (verbose, for debugging)\n    - heartbeat  # every-prompt session-alive signal with metadata\n

    Only listed events fire. Omitting an event silently drops it.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-5-use-in-your-own-skills","level":3,"title":"Step 5: Use in Your Own Skills","text":"

    Add ctx hook notify calls to any skill or script:

    # In a release skill\nctx hook notify --event release \"v1.2.0 released successfully\" 2>/dev/null || true\n\n# In a backup script\nctx hook notify --event backup \"Nightly backup completed\" 2>/dev/null || true\n

    The 2>/dev/null || true suffix ensures the notification never breaks your script: If there's no webhook or the HTTP call fails, it's a silent noop.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-types","level":2,"title":"Event Types","text":"

    ctx fires these events automatically:

    Event Source When loop Loop script Loop completes or hits max iterations nudge System hooks VERBATIM relay nudge is emitted (context checkpoint, persistence, ceremonies, journal, resources, knowledge, version) relay System hooks Any hook output (VERBATIM relays, agent directives, block responses) heartbeat System hook Every prompt: session-alive signal with prompt count and context modification status testctx hook notify test Manual test notification (custom) Your skills You wire ctx hook notify --event <name> in your own scripts

    nudge vs relay: The nudge event fires only for VERBATIM relay hooks (the ones the agent is instructed to show verbatim). The relay event fires for all hook output: VERBATIM relays, agent directives, and hard gates. Subscribe to relay for debugging (\"did the agent get the post-commit nudge?\"), nudge for user-facing assurance (\"was the checkpoint emitted?\").

    Webhooks as a Hook Audit Trail

    Subscribe to relay events and you get an external record of every hook that fires, independent of the agent.

    This lets you verify hooks are running and catch cases where the agent absorbs a nudge instead of surfacing it.

    See Auditing System Hooks for the full workflow.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#payload-format","level":2,"title":"Payload Format","text":"

    Every notification sends a JSON POST:

    {\n  \"event\": \"nudge\",\n  \"message\": \"check-context-size: Context window at 82%\",\n  \"detail\": {\n    \"hook\": \"check-context-size\",\n    \"variant\": \"window\",\n    \"variables\": {\"Percentage\": 82, \"TokenCount\": \"164k\"}\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n

    The detail field is a structured template reference containing the hook name, variant, and any template variables. This lets receivers filter by hook or variant without parsing rendered text. The field is omitted when no template reference applies (e.g. custom ctx hook notify calls).

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#heartbeat-payload","level":3,"title":"Heartbeat Payload","text":"

    The heartbeat event fires on every prompt with session metadata and token usage telemetry:

    {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc123-...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-28T10:15:00Z\",\n  \"project\": \"ctx\"\n}\n

    The tokens, context_window, and usage_pct fields are included when token data is available from the session JSONL file. They are omitted when no usage data has been recorded yet (e.g. first prompt).

    Unlike other events, heartbeat fires every prompt (not throttled). Use it for observability dashboards or liveness monitoring of long-running sessions.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#security-model","level":2,"title":"Security Model","text":"Component Location Committed? Permissions Encryption key ~/.ctx/.ctx.key No (user-level) 0600 Encrypted URL .context/.notify.enc Yes (safe) 0600 Webhook URL Never on disk in plaintext N/A N/A

    The key is shared with the scratchpad. If you rotate the encryption key, re-run ctx hook notify setup to re-encrypt the webhook URL with the new key.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#key-rotation","level":2,"title":"Key Rotation","text":"

    ctx checks the age of the encryption key once per day. If it's older than 90 days (configurable via key_rotation_days), a VERBATIM nudge is emitted suggesting rotation.

    # .ctxrc\nkey_rotation_days: 30   # nudge sooner (default: 90)\n
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#worktrees","level":2,"title":"Worktrees","text":"

    The webhook URL is encrypted with the same encryption key (~/.ctx/.ctx.key). Because the key lives at the user level, it is shared across all worktrees on the same machine - notifications work in worktrees automatically.

    This means agents running in worktrees cannot send webhook alerts. For autonomous runs where worktree agents are opaque, monitor them from the terminal rather than relying on webhooks. Enrich journals and review results on the main branch after merging.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-log-the-local-complement","level":2,"title":"Event Log: The Local Complement","text":"

    Don't need a webhook but want diagnostic visibility? Enable event_log: true in .ctxrc. The event log writes the same payload as webhooks to a local JSONL file (.context/state/events.jsonl) that you can query without any external service:

    ctx hook event --last 20          # recent hook activity\nctx hook event --hook qa-reminder # filter by hook\n

    Webhooks and event logging are independent: you can use either, both, or neither. Webhooks give you push notifications and an external audit trail. The event log gives you local queryability and ctx doctor integration.

    See Troubleshooting for how they work together.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tips","level":2,"title":"Tips","text":"
    • Fire-and-forget: Notifications never block. HTTP errors are silently ignored. No retry, no response parsing.
    • No webhook = no cost: When no webhook is configured, ctx hook notify exits immediately. System hooks that call notify.Send() add zero overhead.
    • Multiple projects: Each project has its own .notify.enc. You can point different projects at different webhooks.
    • Event filter is per-project: Configure notify.events in each project's .ctxrc independently.
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#next-up","level":2,"title":"Next Up","text":"

    Auditing System Hooks →: Verify your hooks are running, audit what they do, and get alerted when they go silent.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#see-also","level":2,"title":"See Also","text":"
    • CLI Reference: ctx hook notify: full command reference
    • Configuration: .ctxrc settings including notify options
    • Running an Unattended AI Agent: how loops work and how notifications fit in
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Auditing System Hooks: using webhooks as an external audit trail for hook execution
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/","level":1,"title":"When to Use a Team of Agents","text":"","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-problem","level":2,"title":"The Problem","text":"

    You have a task, and you are wondering: \"should I throw more agents at it?\"

    More agents can mean faster results, but they also mean coordination overhead, merge conflicts, divergent mental models, and wasted tokens re-reading context.

    The wrong setup costs more than it saves.

    This recipe is a decision framework: It helps you choose between a single agent, parallel worktrees, and a full agent team, and explains what ctx provides at each level.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tldr","level":2,"title":"TL;DR","text":"
    • Single agent for most work;
    • Parallel worktrees when tasks touch disjoint file sets;
    • Agent teams only when tasks need real-time coordination. When in doubt, start with one agent.
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-spectrum","level":2,"title":"The Spectrum","text":"

    There are three modes, ordered by complexity:

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#1-single-agent-default","level":3,"title":"1. Single Agent (Default)","text":"

    One agent, one session, one branch. This is correct for most work.

    Use this when:

    • The task has linear dependencies (step 2 needs step 1's output);
    • Changes touch overlapping files;
    • You need tight feedback loops (review each change before the next);
    • The task requires deep understanding of a single area;
    • Total effort is less than a few hours of agent time.

    ctx provides: Full .context/: tasks, decisions, learnings, conventions, all in one session.

    The agent builds a coherent mental model and persists it as it goes.

    Example tasks: Bug fixes, feature implementation, refactoring a module, writing documentation for one area, debugging.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#2-parallel-worktrees-independent-tracks","level":3,"title":"2. Parallel Worktrees (Independent Tracks)","text":"

    2-4 agents, each in a separate git worktree on its own branch, working on non-overlapping parts of the codebase.

    Use this when:

    • You have 5+ independent tasks in the backlog;
    • Tasks group cleanly by directory or package;
    • File overlap between groups is zero or near-zero;
    • Each track can be completed and merged independently;
    • You want parallelism without coordination complexity.

    ctx provides: Shared .context/ via git (each worktree sees the same tasks, decisions, conventions). /ctx-worktree skill for setup and teardown. TASKS.md as a lightweight work queue.

    Example tasks: Docs + new package + test coverage (three tracks that don't touch the same files). Parallel recipe writing. Independent module development.

    See: Parallel Agent Development with Git Worktrees

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#3-agent-team-coordinated-swarm","level":3,"title":"3. Agent Team (Coordinated Swarm)","text":"

    Multiple agents communicating via messages, sharing a task list, with a lead agent coordinating. Claude Code's team/swarm feature.

    Use this when:

    • Tasks have dependencies but can still partially overlap;
    • You need research and implementation happening simultaneously;
    • The work requires different roles (researcher, implementer, tester);
    • A lead agent needs to review and integrate others' work;
    • The task is large enough that coordination cost is justified.

    ctx provides: .context/ as shared state that all agents can read. Task tracking for work assignment. Decisions and learnings as team memory that survives individual agent turnover.

    Example tasks: Large refactor across modules where a lead reviews merges. Research and implementation where one agent explores options while another builds. Multi-file feature that needs integration testing after parallel implementation.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-decision-framework","level":2,"title":"The Decision Framework","text":"

    Ask these questions in order:

    Can one agent do this in a reasonable time?\n  YES → Single agent. Stop here.\n  NO  ↓\n\nCan the work be split into non-overlapping file sets?\n  YES → Parallel worktrees (2-4 tracks)\n  NO  ↓\n\nDo the subtasks need to communicate during execution?\n  YES → Agent team with lead coordination\n  NO  → Parallel worktrees with a merge step\n
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-file-overlap-test","level":3,"title":"The File Overlap Test","text":"

    This is the critical decision point. Before choosing multi-agent, list the files each subtask would touch. If two subtasks modify the same file, they belong in the same track (or the same single-agent session).

    You: \"I want to parallelize these tasks. Which files would each one touch?\"\n\nAgent: [reads `TASKS.md`, analyzes codebase]\n       \"Task A touches internal/config/ and internal/cli/initialize/\n        Task B touches docs/ and site/\n        Task C touches internal/config/ and internal/cli/status/\n\n        Tasks A and C overlap on internal/config/ # they should be\n        in the same track. Task B is independent.\"\n

    When in doubt, keep things in one track. A merge conflict in a critical file costs more time than the parallelism saves.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#when-teams-make-things-worse","level":2,"title":"When Teams Make Things Worse","text":"

    \"More agents\" is not always better. Watch for these patterns:

    Merge hell: If you are spending more time resolving conflicts than the parallel work saved, you split wrong: Re-group by file overlap.

    Context divergence: Each agent builds its own mental model. After 30 minutes of independent work, agent A might make assumptions that contradict agent B's approach. Shorter tracks with frequent merges reduce this.

    Coordination theater: A lead agent spending most of its time assigning tasks, checking status, and sending messages instead of doing work. If the task list is clear enough, worktrees with no communication are cheaper.

    Re-reading overhead: Every agent reads .context/ on startup. A team of 4 agents each reading 4000 tokens of context = 16000 tokens before anyone does any work. For small tasks, that overhead dominates.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#what-ctx-gives-you-at-each-level","level":2,"title":"What ctx Gives You at Each Level","text":"ctx Feature Single Agent Worktrees Team .context/ files Full access Shared via git Shared via filesystem TASKS.md Work queue Split by track Assigned by lead Decisions/Learnings Persisted in session Persisted per branch Persisted by any agent /ctx-next Picks next task Picks within track Lead assigns /ctx-worktree N/A Setup + teardown Optional /ctx-commit Normal commits Per-branch commits Per-agent commits","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#team-composition-recipes","level":2,"title":"Team Composition Recipes","text":"

    Four practical team compositions for common workflows.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#feature-development-3-agents","level":3,"title":"Feature Development (3 Agents)","text":"Role Responsibility Architect Writes spec in specs/, breaks work into TASKS.md phases Implementer Picks tasks from TASKS.md, writes code, marks [x] done Reviewer Runs tests, ctx drift, lint; files issues as new tasks

    Coordination: TASKS.md checkboxes. Architect writes tasks before implementer starts. Reviewer runs after each implementer commit.

    Anti-pattern: All three agents editing the same file simultaneously. Sequence the work so only one agent touches a file at a time.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#consolidation-sprint-3-4-agents","level":3,"title":"Consolidation Sprint (3-4 Agents)","text":"Role Responsibility Auditor Runs ctx drift, identifies stale paths and broken refs Code Fixer Updates source code to match context (or vice versa) Doc Writer Updates ARCHITECTURE.md, CONVENTIONS.md, and docs/ Test Fixer (Optional) Fixes tests broken by the fixer's changes

    Coordination: Auditor's ctx drift output is the shared work queue. Each agent claims a subset of issues by adding #in-progress labels.

    Anti-pattern: Fixer and doc writer both editing ARCHITECTURE.md. Assign file ownership explicitly.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#release-prep-2-agents","level":3,"title":"Release Prep (2 Agents)","text":"Role Responsibility Release Notes Generates changelog from commits, writes release notes Validation Runs full test suite, lint, build across platforms

    Coordination: Both read TASKS.md to identify what shipped. Release notes agent works from git log; validation agent works from make audit.

    Anti-pattern: Release notes agent running tests \"to verify.\" Each agent stays in its lane.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#documentation-sprint-3-agents","level":3,"title":"Documentation Sprint (3 Agents)","text":"Role Responsibility Content Writes new pages, expands existing docs Cross-linker Adds nav entries, cross-references, \"See Also\" sections Verifier Builds site, checks broken links, validates rendering

    Coordination: Content agent writes files first. Cross-linker updates zensical.toml and index pages after content lands. Verifier builds after each batch.

    Antipattern: Content and cross-linker both editing zensical.toml. Batch nav updates into the cross-linker's pass.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tips","level":2,"title":"Tips","text":"
    • Start with one agent: Only add parallelism when you have identified the bottleneck. \"This would go faster with more agents\" is usually wrong for tasks under 2 hours.
    • The 3-4 agent ceiling is real: Coordination overhead grows quadratically. 2 agents = 1 communication pair. 4 agents = 6 pairs. Beyond 4, you are managing agents more than doing work.
    • Worktrees > teams for most parallelism needs: If agents don't need to talk to each other during execution, worktrees give you parallelism with zero coordination overhead.
    • Use ctx as the shared brain: Whether it's one agent or four, the .context/ directory is the single source of truth. Decisions go in DECISIONS.md, not in chat messages between agents.
    • Merge early, merge often: Long-lived parallel branches diverge. Merge a track as soon as it's done rather than waiting for all tracks to finish.
    • TASKS.md conflicts are normal: Multiple agents completing different tasks will conflict on merge. The resolution is always additive: accept all [x] completions from both sides.
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#next-up","level":2,"title":"Next Up","text":"

    Parallel Agent Development with Git Worktrees →: Run multiple agents on independent task tracks using git worktrees.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#go-deeper","level":2,"title":"Go Deeper","text":"
    • CLI Reference: all commands and flags
    • Integrations: setup for Claude Code, Cursor, Aider
    • Session Journal: browse and search session history
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#see-also","level":2,"title":"See Also","text":"
    • Parallel Agent Development with Git Worktrees: the mechanical \"how\" for worktree-based parallelism
    • Running an Unattended AI Agent: serial autonomous loops: a different scaling strategy
    • Tracking Work Across Sessions: managing the task backlog that feeds into any multi-agent setup
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"reference/","level":1,"title":"Reference","text":"

    Technical reference for ctx commands, skills, and internals.

    ","path":["Reference"],"tags":[]},{"location":"reference/#the-system-explains-itself","level":3,"title":"The System Explains Itself","text":"

    The 12 properties that must hold for any valid ctx implementation. Not features: constraints. The system's contract with its users and contributors.

    ","path":["Reference"],"tags":[]},{"location":"reference/#code-conventions","level":3,"title":"Code Conventions","text":"

    Common patterns and fixes for the AST compliance tests in internal/audit/. When a test fails, find the matching section.

    ","path":["Reference"],"tags":[]},{"location":"reference/#cli","level":3,"title":"CLI","text":"

    Every command, subcommand, and flag. Now a top-level section: see CLI Reference.

    ","path":["Reference"],"tags":[]},{"location":"reference/#skills","level":3,"title":"Skills","text":"

    The full skill catalog: what each skill does, when it triggers, and how skills interact with commands.

    ","path":["Reference"],"tags":[]},{"location":"reference/#tool-ecosystem","level":3,"title":"Tool Ecosystem","text":"

    How ctx compares to Cursor Rules, Aider conventions, CLAUDE.md, and other context approaches.

    ","path":["Reference"],"tags":[]},{"location":"reference/#session-journal","level":3,"title":"Session Journal","text":"

    Export, browse, and enrich your session history. Covers the journal site, Obsidian export, and the enrichment pipeline.

    ","path":["Reference"],"tags":[]},{"location":"reference/#scratchpad","level":3,"title":"Scratchpad","text":"

    Encrypted, git-tracked scratch space for short notes and sensitive values that travel with the project.

    ","path":["Reference"],"tags":[]},{"location":"reference/#version-history","level":3,"title":"Version History","text":"

    Changelog for every ctx release.

    ","path":["Reference"],"tags":[]},{"location":"reference/audit-conventions/","level":1,"title":"Code Conventions","text":"","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#code-conventions-common-patterns-and-fixes","level":1,"title":"Code Conventions: Common Patterns and Fixes","text":"

    This guide documents the code conventions enforced by internal/audit/ AST tests. Each section shows the violation pattern, the fix, and the rationale. When a test fails, find the matching section below.

    All tests skip _test.go files. The patterns apply only to production code under internal/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#variable-shadowing-bare-err-reuse","level":2,"title":"Variable Shadowing (Bare err := Reuse)","text":"

    Test: TestNoVariableShadowing

    When a function has multiple := assignments to err, each shadows the previous one. This makes it impossible to tell which error a later if err != nil is checking.

    Before:

    func Run(cmd *cobra.Command) error {\n    data, err := os.ReadFile(path) \n    if err != nil {\n        return err\n    }\n\n    result, err := json.Unmarshal(data)  // shadows first err\n    if err != nil {\n        return err\n    }\n\n    err = validate(result)  // shadows again\n    return err\n}\n

    After:

    func Run(cmd *cobra.Command) error {\n    data, readErr := os.ReadFile(path)\n    if readErr != nil {\n        return readErr\n    }\n\n    result, parseErr := json.Unmarshal(data)\n    if parseErr != nil {\n        return parseErr\n    }\n\n    validateErr := validate(result)\n    return validateErr\n}\n

    Rule: Use descriptive error names (readErr, writeErr, parseErr, walkErr, absErr, relErr) so each error site is independently identifiable.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#import-name-shadowing","level":2,"title":"Import Name Shadowing","text":"

    Test: TestNoImportNameShadowing

    When a local variable has the same name as an imported package, the import becomes inaccessible in that scope.

    Before:

    import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(session *entity.Session) {  // param shadows import\n    // session package is now unreachable here\n}\n

    After:

    import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(sess *entity.Session) {\n    // session package still accessible\n}\n

    Rule: Parameters, variables, and return values must not reuse imported package names. Common renames: session -> sess, token -> tok, config -> cfg, entry -> ent.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-strings","level":2,"title":"Magic Strings","text":"

    Test: TestNoMagicStrings

    String literals in function bodies are invisible to refactoring tools and cause silent breakage when the value changes in one place but not another.

    Before (string literals):

    func loadContext() {\n    data := filepath.Join(dir, \"TASKS.md\")\n    if strings.HasSuffix(name, \".yaml\") {\n        // ...\n    }\n}\n

    After:

    func loadContext() {\n    data := filepath.Join(dir, config.FilenameTask)\n    if strings.HasSuffix(name, config.ExtYAML) {\n        // ...\n    }\n}\n

    Before (format verbs — also caught):

    func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return fmt.Sprintf(\"%x\", h[:8])\n}\n

    After:

    func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return hex.EncodeToString(h[:cfgFmt.HashPrefixLen])\n}\n

    Before (URL schemes — also caught):

    if strings.HasPrefix(target, \"https://\") ||\n    strings.HasPrefix(target, \"http://\") {\n    return target\n}\n

    After:

    if strings.HasPrefix(target, cfgHTTP.PrefixHTTPS) ||\n    strings.HasPrefix(target, cfgHTTP.PrefixHTTP) {\n    return target\n}\n

    Exempt from this check:

    • Empty string \"\", single space \" \", indentation strings
    • Regex capture references ($1, ${name})
    • const and var definition sites (that's where constants live)
    • Struct tags
    • Import paths
    • Packages under internal/config/, internal/assets/tpl/

    Rule: If a string is used for comparison, path construction, or appears in 3+ files, it belongs in internal/config/ as a constant. Format strings belong in internal/config/ as named constants (e.g., cfgGit.FlagLastN, cfgTrace.RefFormat). User-facing prose belongs in internal/assets/ YAML files accessed via desc.Text().

    Common fix for fmt.Sprintf with format verbs:

    Pattern Fix fmt.Sprintf(\"%d\", n)strconv.Itoa(n)fmt.Sprintf(\"%d\", int64Val)strconv.FormatInt(int64Val, 10)fmt.Sprintf(\"%x\", bytes)hex.EncodeToString(bytes)fmt.Sprintf(\"%q\", s)strconv.Quote(s)fmt.Sscanf(s, \"%d\", &n)strconv.Atoi(s)fmt.Sprintf(\"-%d\", n)fmt.Sprintf(cfgGit.FlagLastN, n)\"https://\"cfgHTTP.PrefixHTTPS\"&lt;\" config constant in config/html/","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-printf-calls","level":2,"title":"Direct Printf Calls","text":"

    Test: TestNoPrintfCalls

    cmd.Printf and cmd.PrintErrf bypass the write-package formatting pipeline and scatter user-facing text across the codebase.

    Before:

    func Run(cmd *cobra.Command, args []string) {\n    cmd.Printf(\"Found %d tasks\\n\", count)\n}\n

    After:

    func Run(cmd *cobra.Command, args []string) {\n    write.TaskCount(cmd, count)\n}\n

    Rule: All formatted output goes through internal/write/ which uses cmd.Print/cmd.Println with pre-formatted strings from desc.Text().

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#raw-time-format-strings","level":2,"title":"Raw Time Format Strings","text":"

    Test: TestNoRawTimeFormats

    Inline time format strings (\"2006-01-02\", \"15:04:05\") drift when one call site is updated but others are missed.

    Before:

    func formatDate(t time.Time) string {\n    return t.Format(\"2006-01-02\")\n}\n

    After:

    func formatDate(t time.Time) string {\n    return t.Format(cfgTime.DateFormat)\n}\n

    Rule: All time format strings must use constants from internal/config/time/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-flag-registration","level":2,"title":"Direct Flag Registration","text":"

    Test: TestNoFlagBindOutsideFlagbind

    Direct cobra flag calls (.Flags().StringVar(), etc.) scatter flag wiring across dozens of cmd.go files. Centralizing through internal/flagbind/ gives one place to audit flag names, defaults, and description key lookups.

    Before:

    func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    c.Flags().StringVarP(&output, \"output\", \"o\", \"\",\n        \"output format\")\n    return c\n}\n

    After:

    func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    flagbind.StringFlagShort(c, &output, flag.Output,\n        flag.OutputShort, cmd.DescKeyOutput)\n    return c\n}\n

    Rule: All flag registration goes through internal/flagbind/. If the helper you need doesn't exist, add it to flagbind/flag.go before using it.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#todo-comments","level":2,"title":"TODO Comments","text":"

    Test: TestNoTODOComments

    TODO, FIXME, HACK, and XXX comments in production code are invisible to project tracking. They accumulate silently and never get addressed.

    Before:

    // TODO: handle pagination\nfunc listEntries() []Entry {\n

    After:

    Remove the comment and add a task to .context/TASKS.md:

    - [ ] Handle pagination in listEntries (internal/task/task.go)\n

    Rule: Deferred work lives in TASKS.md, not in source comments.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#dead-exports","level":2,"title":"Dead Exports","text":"

    Test: TestNoDeadExports

    Exported symbols with zero references outside their definition file are dead weight. They increase API surface, confuse contributors, and cost maintenance.

    Fix: Either delete the export (preferred) or demote it to unexported if it's still used within the file.

    If the symbol existed for historical reasons and might be needed again, move it to quarantine/deadcode/ with a .dead extension. This preserves the code in git without polluting the live codebase:

    quarantine/deadcode/internal/config/flag/flag.go.dead\n

    Each .dead file includes a header:

    // Dead exports quarantined from internal/config/flag/flag.go\n// Quarantined: 2026-04-02\n// Restore from git history if needed.\n

    Rule: If a test-only allowlist entry is needed (the export exists only for test use), add the fully qualified symbol to testOnlyExports in dead_exports_test.go. Keep this list small — prefer eliminating the export.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#core-package-structure","level":2,"title":"Core Package Structure","text":"

    Test: TestCoreStructure

    core/ directories under internal/cli/ must contain only doc.go and test files at the top level. All domain logic lives in subpackages. This prevents core/ from becoming a god package.

    Before:

    internal/cli/dep/core/\n    go.go           # violation — logic at core/ level\n    python.go       # violation\n    node.go         # violation\n    types.go        # violation\n

    After:

    internal/cli/dep/core/\n    doc.go          # package doc only\n    golang/\n        golang.go\n        golang_test.go\n        doc.go\n    python/\n        python.go\n        python_test.go\n        doc.go\n    node/\n        node.go\n        node_test.go\n        doc.go\n

    Rule: Extract each logical unit into its own subpackage under core/. Each subpackage gets a doc.go. The subpackage name should match the domain concept (golang, check, fix, store), not a generic label (util, helper).

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cross-package-types","level":2,"title":"Cross-Package Types","text":"

    Test: TestCrossPackageTypes

    When a type defined in one package is used from a different module (e.g., cli/doctor importing a type from cli/notify), the type has crossed its module boundary. Cross-cutting types belong in internal/entity/ for discoverability.

    Before:

    // internal/cli/notify/core/types.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/cli/notify/core\"\nfunc check(p core.NotifyPayload) { ... }\n

    After:

    // internal/entity/notify.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/entity\"\nfunc check(p entity.NotifyPayload) { ... }\n

    Exempt: Types inside entity/, proto/, core/ subpackages, and config/ packages. Same-module usage (e.g., cli/doctor/cmd/ using cli/doctor/core/) is not flagged.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#type-file-convention","level":2,"title":"Type File Convention","text":"

    Test: TestTypeFileConvention, TestTypeFileConventionReport

    Exported types in core/ subpackages should live in types.go (the convention from CONVENTIONS.md), not scattered across implementation files. This makes type definitions discoverable. TestTypeFileConventionReport generates a diagnostic summary of all type placements for triage.

    Exception: entity/ organizes by domain (task.go, session.go), proto/ uses schema.go, and err/ packages colocate error types with their domain context.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-yaml-linkage","level":2,"title":"DescKey / YAML Linkage","text":"

    Test: TestDescKeyYAMLLinkage

    Every DescKey constant must have a corresponding key in the YAML asset files, and every YAML key must have a corresponding DescKey constant. Orphans in either direction mean dead text or runtime panics.

    Fix for orphan YAML key: Delete the YAML entry, or add the corresponding DescKey constant in config/embed/{text,cmd,flag}/.

    Fix for orphan DescKey: Delete the constant, or add the corresponding entry in the YAML file under internal/assets/commands/text/, cmd/, or flag/.

    If the orphan YAML entry was once valid but the feature was removed, move the YAML entry to a .dead file in quarantine/deadcode/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#package-doc-quality","level":2,"title":"Package Doc Quality","text":"

    Test: TestPackageDocQuality

    Every package under internal/ must have a doc.go with a meaningful package doc comment (at least 8 lines of real content). One-liners and file-list patterns (// - foo.go, // Source files:) are flagged because they drift as files change.

    Template:

    //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n\n// Package mypackage does X.\n//\n// It handles Y by doing Z. The main entry point is [FunctionName]\n// which accepts A and returns B.\n//\n// Configuration is read from [config.SomeConstant]. Output is\n// written through [write.SomeHelper].\n//\n// This package is used by [parentpackage] during the W lifecycle\n// phase.\npackage mypackage\n
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-regex-compilation","level":2,"title":"Inline Regex Compilation","text":"

    Test: TestNoInlineRegexpCompile

    regexp.MustCompile and regexp.Compile inside function bodies recompile the pattern on every call. Compiled patterns belong at package level.

    Before:

    func parse(s string) bool {\n    re := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n    return re.MatchString(s)\n}\n

    After:

    // In internal/config/regex/regex.go:\n// DatePattern matches ISO date format (YYYY-MM-DD).\nvar DatePattern = regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n\n// In calling package:\nfunc parse(s string) bool {\n    return regex.DatePattern.MatchString(s)\n}\n

    Rule: All compiled regexes live in internal/config/regex/ as package-level var declarations. Two tests enforce this: TestNoInlineRegexpCompile catches function-body compilation, and TestNoRegexpOutsideRegexPkg catches package-level compilation outside config/regex/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#doc-comments","level":2,"title":"Doc Comments","text":"

    Test: TestDocComments

    All functions (exported and unexported), structs, and package-level variables must have a doc comment. Config packages allow group doc comments for const blocks.

    Before:

    func buildIndex(entries []Entry) map[string]int {\n

    After:

    // buildIndex maps entry names to their position in the\n// ordered slice for O(1) lookup during reconciliation.\n//\n// Parameters:\n//   - entries: ordered slice of entries to index\n//\n// Returns:\n//   - map[string]int: name-to-position mapping\nfunc buildIndex(entries []Entry) map[string]int {\n

    Rule: Every function, struct, and package-level var gets a doc comment in godoc format. Functions include Parameters: and Returns: sections. Structs with 2+ fields document every field. See CONVENTIONS.md for the full template.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#line-length","level":2,"title":"Line Length","text":"

    Test: TestLineLength

    Lines in non-test Go files must not exceed 80 characters. This is a hard check, not a suggestion.

    Before:

    _ = trace.Record(fmt.Sprintf(cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum), state.Dir())\n

    After:

    ref := fmt.Sprintf(\n    cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum,\n)\n_ = trace.Record(ref, state.Dir())\n

    Rule: Break at natural points: function arguments, struct fields, chained calls. Long strings (URLs, struct tags) are the rare acceptable exception.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#literal-whitespace","level":2,"title":"Literal Whitespace","text":"

    Test: TestNoLiteralWhitespace

    Bare whitespace string and byte literals (\"\\n\", \"\\r\\n\", \"\\t\") must not appear outside internal/config/token/. All other packages use the token constants.

    Before:

    output := strings.Join(lines, \"\\n\")\n

    After:

    output := strings.Join(lines, token.Newline)\n

    Rule: Whitespace literals are defined once in internal/config/token/. Use token.Newline, token.Tab, token.CRLF, etc.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-numeric-values","level":2,"title":"Magic Numeric Values","text":"

    Test: TestNoMagicValues

    Numeric literals in function bodies need constants, with narrow exceptions.

    Before:

    if len(entries) > 100 {\n    entries = entries[:100]\n}\n

    After:

    if len(entries) > config.MaxEntries {\n    entries = entries[:config.MaxEntries]\n}\n

    Exempt: 0, 1, -1, 210, strconv radix/bitsize args (10, 32, 64 in strconv.Parse*/Format*), octal permissions (caught separately by TestNoRawPermissions), and const/var definition sites.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-separators","level":2,"title":"Inline Separators","text":"

    Test: TestNoInlineSeparators

    strings.Join calls must use token constants for their separator argument, not string literals.

    Before:

    result := strings.Join(parts, \", \")\n

    After:

    result := strings.Join(parts, token.CommaSep)\n

    Rule: Separator strings live in internal/config/token/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stuttery-function-names","level":2,"title":"Stuttery Function Names","text":"

    Test: TestNoStutteryFunctions

    Function names must not redundantly include their package name as a PascalCase word boundary. Go callers already write pkg.Function, so pkg.PkgFunction stutters.

    Before:

    // In package write\nfunc WriteJournal(cmd *cobra.Command, ...) {\n

    After:

    // In package write\nfunc Journal(cmd *cobra.Command, ...) {\n

    Exempt: Identity functions like write.Write / write.write.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#predicate-naming-no-ishascan-prefix","level":2,"title":"Predicate Naming (No Is/Has/Can Prefix)","text":"

    Test: None (manual review convention)

    Exported methods that return bool must not use Is, Has, or Can prefixes. The predicate reads more naturally without them, especially at call sites where the package name provides context.

    Before:

    func IsCompleted(t *Task) bool { ... }\nfunc HasChildren(n *Node) bool { ... }\nfunc IsExemptPackage(path string) bool { ... }\n

    After:

    func Completed(t *Task) bool { ... }\nfunc Children(n *Node) bool { ... }  // or: ChildCount > 0\nfunc ExemptPackage(path string) bool { ... }\n

    Rule: Drop the prefix. Private helpers may use prefixes when it reads more naturally (isValid in a local context is fine). This convention applies to exported methods and package-level functions. See CONVENTIONS.md \"Predicates\" section.

    This is not yet enforced by an AST test — it requires semantic understanding of return types and naming intent that makes automated detection fragile. Apply during code review.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#mixed-visibility","level":2,"title":"Mixed Visibility","text":"

    Test: TestNoMixedVisibility

    Files with exported functions must not also contain unexported functions. Public API and private helpers live in separate files.

    Before:

    load.go\n    func Load() { ... }        // exported\n    func parseHeader() { ... } // unexported — violation\n

    After:

    load.go\n    func Load() { ... }        // exported only\nparse.go\n    func parseHeader() { ... } // private helper\n

    Exempt: Files with exactly one function, doc.go, test files.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stray-errgo-files","level":2,"title":"Stray Err.Go Files","text":"

    Test: TestNoStrayErrFiles

    err.go files must only exist under internal/err/. Error constructors anywhere else create a broken-window pattern where contributors add local error definitions when they see a local err.go.

    Fix: Move the error constructor to internal/err/<domain>/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cli-cmd-structure","level":2,"title":"CLI Cmd Structure","text":"

    Test: TestCLICmdStructure

    Each cmd/$sub/ directory under internal/cli/ may contain only cmd.go, run.go, doc.go, and test files. Extra .go files (helpers, output formatters, types) belong in the corresponding core/ subpackage.

    Before:

    internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\n    format.go   # violation — helper in cmd dir\n

    After:

    internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\ninternal/cli/doctor/core/format/\n    format.go\n    doc.go\n
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-namespace","level":2,"title":"DescKey Namespace","text":"

    Test: TestUseConstantsOnlyInCobraUse, TestDescKeyOnlyInLookupCalls, TestNoWrongNamespaceLookup

    Three tests enforce DescKey/Use constant discipline:

    1. Use* constants appear only in cobra Use: struct field assignments — never as arguments to desc.Text() or elsewhere.
    2. DescKey* constants are passed only to assets.CommandDesc(), assets.FlagDesc(), or desc.Text() — never to cobra Use:.
    3. No cross-namespace lookups — TextDescKey must not be passed to CommandDesc(), FlagDescKey must not be passed to Text(), etc.
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#yaml-examples-registry-linkage","level":2,"title":"YAML Examples / Registry Linkage","text":"

    Test: TestExamplesYAMLLinkage, TestRegistryYAMLLinkage

    Every key in examples.yaml and registry.yaml must match a known entry type constant. Prevents orphan entries that are never rendered.

    Fix: Delete the orphan YAML entry, or add the corresponding constant in config/entry/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#other-enforced-patterns","level":2,"title":"Other Enforced Patterns","text":"

    These tests follow the same fix approach — extract the operation to its designated package:

    Test Violation Fix TestNoNakedErrorsfmt.Errorf/errors.New outside internal/err/ Add error constructor to internal/err/<domain>/TestNoRawFileIO Direct os.ReadFile, os.Create, etc. Use io.SafeReadFile, io.SafeWriteFile, etc. TestNoRawLogging Direct fmt.Fprintf(os.Stderr, ...) Use log/warn.Warn() or log/event.Append()TestNoExecOutsideExecPkgexec.Command outside internal/exec/ Add command to internal/exec/<domain>/TestNoCmdPrintOutsideWritecmd.Print* outside internal/write/ Add output helper to internal/write/<domain>/TestNoRawPermissions Octal literals (0644, 0755) Use config/fs.PermFile, config/fs.PermExec, etc. TestNoErrorsAserrors.As() Use errors.AsType() (generic, Go 1.23+) TestNoStringConcatPathsdir + \"/\" + file Use filepath.Join(dir, file)","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#general-fix-workflow","level":2,"title":"General Fix Workflow","text":"

    When an audit test fails:

    1. Read the error message. It includes file:line and a description of the violation.
    2. Find the matching section above. The test name maps directly to a section.
    3. Apply the pattern. Most fixes are mechanical: extract to the right package, rename a variable, or replace a literal with a constant.
    4. Run make test before committing. Audit tests run as part of go test ./internal/audit/.
    5. Don't add allowlist entries as a first resort. Fix the code. Allowlists exist only for genuinely unfixable cases (test-only exports, config packages that are definitionally exempt).
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/comparison/","level":1,"title":"Tool Ecosystem","text":"","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#high-level-mental-model","level":2,"title":"High-Level Mental Model","text":"

    Many tools help AI think.

    ctx helps AI remember.

    • Not by storing thoughts,
    • but by preserving intent.
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#how-ctx-differs-from-similar-tools","level":2,"title":"How ctx Differs from Similar Tools","text":"

    There are many tools in the AI ecosystem that touch parts of the context problem:

    • Some manage prompts.
    • Some retrieve data.
    • Some provide runtime context objects.
    • Some offer enterprise platforms.

    ctx focuses on a different layer entirely.

    This page explains where ctx fits, and where it intentionally does not.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#the-core-distinction","level":2,"title":"The Core Distinction","text":"

    Most tools treat context as input.

    ctx treats context as infrastructure.

    That single difference explains nearly all of ctx's design choices.

    Question Most tools ctx Where does context live? In prompts or APIs In files How long does it last? One request / one session Across time Who can read it? The model Humans and tools How is it updated? Implicitly Explicitly Is it inspectable? Rarely Always","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#prompt-management-tools","level":2,"title":"Prompt Management Tools","text":"

    Examples include:

    • prompt templates;
    • reusable system prompts;
    • prompt libraries;
    • prompt versioning tools.

    These tools help you start a session.

    They do not help you continue one.

    Prompt tools:

    • inject text at session start;
    • are ephemeral by design;
    • do not evolve with the project.

    ctx:

    • persists knowledge over time;
    • accumulates decisions and learnings;
    • makes the context part of the repository itself.

    Prompt tooling and ctx are complementary; not competing. Yet, they operate in different layers.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#retrieval-augmented-generation-rag","level":2,"title":"Retrieval-Augmented Generation (RAG)","text":"

    RAG systems typically:

    • index documents
    • embed text
    • retrieve chunks dynamically at runtime

    They are excellent for:

    • large knowledge bases
    • static documentation
    • reference material

    RAG answers questions like:

    \"What information might be relevant right now?\"

    ctx answers a different question:

    \"What have we already decided, learned, or committed to?\"

    Here are some key differences:

    RAG ctx Statistical relevance Intentional relevance Embedding-based File-based Opaque retrieval Explicit structure Runtime query Persistent memory

    ctx does not replace RAG. Instead, it defines a persistent context layer that RAG can optionally augment.

    RAG belongs to the data plane; ctx defines the context control plane.

    It focuses on project memory, not knowledge search.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#agent-frameworks","level":2,"title":"Agent Frameworks","text":"

    Agent frameworks often provide:

    • task loops
    • tool orchestration
    • planner/executor patterns
    • autonomous iteration

    These systems are powerful, but they typically assume that:

    • memory is external
    • context is injected
    • state is transient

    Agent frameworks answer:

    \"How should the agent act?\"

    ctx answers:

    \"What should the agent remember?\"

    Without persistent context, agents tend to:

    • rediscover decisions
    • repeat mistakes
    • lose architectural intent

    This is why ctx pairs well with autonomous loop workflows:

    • The loop provides iteration
    • ctx provides continuity

    Together, loops become cumulative instead of forgetful.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#sdk-level-context-objects","level":2,"title":"SDK-Level Context Objects","text":"

    Some SDKs expose \"context\" objects that exist:

    • inside a process
    • during a request
    • for the lifetime of a call chain

    These are extremely useful and completely different.

    SDK context objects:

    • are in-memory
    • disappear when the process ends
    • are not shared across sessions

    ctx:

    • survives process restarts
    • survives new chats
    • survives new days

    They share a name, not a purpose.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#enterprise-context-platforms","level":2,"title":"Enterprise Context Platforms","text":"

    Enterprise platforms often provide:

    • centralized context services
    • dashboards
    • access control
    • organizational knowledge layers

    These tools are designed for:

    • teams
    • governance
    • compliance
    • managed environments

    ctx is intentionally:

    • local-first — context lives next to your code, not behind a service boundary.
    • file-based — everything important is a markdown file you can read, diff, grep, and version-control.
    • single-binary core — the context persistence path (init, add, agent, status, drift, load, sync, compact, task, decision, learning, and their siblings) is a single Go binary with no required runtime dependencies. Optional integrations — ctx trace (needs git), ctx serve (needs zensical), the ctx Hub (needs a running hub), Claude Code plugin (needs claude) — are opt-in and each declares its dependency explicitly.
    • CLI-driven — every feature is reachable from the command line and scriptable.
    • developer-controlled — no auto-updating cloud service, no telemetry, no account to sign up for.

    The core ctx binary does not require:

    • a server
    • a database
    • an account
    • a SaaS backend
    • network connectivity (for core operations)

    ctx optimizes for individual and small-team workflows where context should live next to code; not behind a service boundary.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#specific-tool-comparisons","level":2,"title":"Specific Tool Comparisons","text":"

    Users often evaluate ctx against specific tools they already use. These comparisons clarify where responsibilities overlap, where they diverge, and where the tools are genuinely complementary.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#claude-code-memory-anthropic-auto-memory","level":3,"title":"Claude Code Memory / Anthropic Auto-Memory","text":"

    Anthropic's auto-memory is tool-managed memory (L2): the model decides what to remember, stores it automatically, and retrieves it implicitly. ctx is system memory (L3): humans and agents explicitly curate decisions, learnings, and tasks in inspectable files.

    Auto-memory is convenient - you do not configure anything. But it is also opaque: you cannot see what was stored, edit it precisely, or share it across tools. ctx files are plain Markdown in your repository, visible in diffs and code review.

    The two are complementary. ctx can absorb auto-memory as an input source (importing what the model remembered into structured context files) while providing the durable, inspectable layer that auto-memory lacks.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cursorrules-clauderules","level":3,"title":".Cursorrules / .Claude/rules","text":"

    Static rule files (.cursorrules, .claude/rules/) declare conventions: coding style, forbidden patterns, preferred libraries. They are effective for what to do and load automatically at session start.

    ctx adds dimensions that rule files do not cover: architectural decisions with rationale, learnings discovered during development, active tasks, and a constitution that governs agent behavior. Critically, ctx context accumulates - each session can add to it, and token budgeting ensures only the most relevant context is injected.

    Use rule files for static conventions. Use ctx for evolving project memory.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#aider-read-watch","level":3,"title":"Aider --read / --watch","text":"

    Aider's --read flag injects file contents at session start; --watch reloads them on change. The concept is similar to ctx's \"load\" step: make the agent aware of specific files.

    The differences emerge beyond loading. Aider has no persistence model -- nothing the agent learns during a session is written back. There is no token budgeting (large files consume the full context window), no priority ordering across file types, and no structured format for decisions or learnings. ctx provides the full lifecycle: load, accumulate, persist, and budget.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#copilot-workspace","level":3,"title":"Copilot @Workspace","text":"

    GitHub Copilot's @workspace performs workspace-wide code search. It answers \"what code exists?\" - finding function definitions, usages, and file structure across the repository.

    ctx answers a different question: \"what did we decide?\" It stores architectural intent, not code indices. Copilot's workspace search and ctx's project memory are orthogonal; one finds code, the other preserves the reasoning behind it.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cline-memory","level":3,"title":"Cline Memory","text":"

    Cline's memory bank stores session context within the Cline extension. The motivation is similar to ctx: help the agent remember across sessions.

    The key difference is portability. Cline memory is tied to Cline - it does not transfer to Claude Code, Cursor, Aider, or any other tool. ctx is tool-agnostic: context lives in plain files that any editor, agent, or script can read. Switching tools does not mean losing memory.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-a-good-fit","level":2,"title":"When ctx Is a Good Fit","text":"

    ctx works best when:

    • you want AI work to compound over time;
    • architectural decisions matter;
    • context must be inspectable;
    • humans and AI must share the same source of truth;
    • Git history should include why, not just what.
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-not-the-right-tool","level":2,"title":"When ctx Is Not the Right Tool","text":"

    ctx is probably not what you want if:

    • you only need one-off prompts;
    • you rely exclusively on RAG;
    • you want autonomous agents without a human-readable state;
    • you require centralized enterprise control;
    • you want black-box memory systems,

    These are valid goals; just different ones.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#further-reading","level":2,"title":"Further Reading","text":"
    • You Can't Import Expertise: why project-specific context matters more than generic best practices
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/design-invariants/","level":1,"title":"Invariants","text":"","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-system-explains-itself","level":1,"title":"The System Explains Itself","text":"

    These are the properties that must hold for any valid ctx implementation.

    • These are not features.
    • These are constraints.

    A change that violates an invariant is a category error, not an improvement.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#cognitive-state-tiers","level":2,"title":"Cognitive State Tiers","text":"

    ctx distinguishes between three forms of state:

    • Authoritative state: Versioned, inspectable artifacts that define intent and survive time.
    • Delivery views: Deterministic assemblies of the authoritative state for a specific budget or workflow.
    • Ephemeral working state: Local, transient, or sensitive data that assists interaction but does not define system truth.

    The invariants below apply primarily to the authoritative cognitive state.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#1-cognitive-state-is-explicit","level":2,"title":"1. Cognitive State Is Explicit","text":"

    All authoritative context lives in artifacts that can be inspected, reviewed, and versioned.

    If something is important, it must exist as a file: Not only in a prompt, a chat, or a model's hidden memory.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#2-assembly-is-reproducible","level":2,"title":"2. Assembly Is Reproducible","text":"

    Given the same:

    • repository state,
    • configuration,
    • and inputs,

    context assembly produces the same result.

    Heuristics may rank or filter for delivery under constraints.

    They do not alter the authoritative state.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#3-the-authoritative-state-is-human-readable","level":2,"title":"3. The Authoritative State Is Human-Readable","text":"

    The authoritative cognitive state must be stored in formats that a human can:

    • read,
    • diff,
    • review,
    • and edit directly.

    Sensitive working memory may be encrypted at rest. However, encryption must not become the only representation of authoritative knowledge.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#4-artifacts-outlive-sessions","level":2,"title":"4. Artifacts Outlive Sessions","text":"

    Sessions are transient.

    Knowledge persists.

    Reasoning, decisions, and outcomes must remain available after the interaction that produced them has ended.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#5-authority-is-user-defined","level":2,"title":"5. Authority Is User-Defined","text":"

    What enters the authoritative context is an explicit human decision.

    Models may suggest.

    Automation may assist.

    Selection is never implicit.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#6-operation-is-local-first","level":2,"title":"6. Operation Is Local-First","text":"

    The core system must function without requiring network access or a remote service.

    External systems may extend ctx.

    They must not be required for its operation.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#7-versioning-is-the-memory-model","level":2,"title":"7. Versioning Is the Memory Model","text":"

    The evolution of the authoritative cognitive state must be:

    • preserved,
    • inspectable,
    • and branchable.

    Ephemeral and sensitive working state may use different retention and diff strategies by design.

    Understanding includes understanding how we arrived here.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#8-structure-enables-scale","level":2,"title":"8. Structure Enables Scale","text":"

    Unstructured accumulation is not memory.

    Authoritative cognitive state must have a defined layout that:

    • communicates intent,
    • supports navigation,
    • and prevents drift.
    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#9-verification-is-the-scoreboard","level":2,"title":"9. Verification Is the Scoreboard","text":"

    Claims without recorded outcomes are noise.

    Reality (observed and captured) is the only signal that compounds.

    This invariant defines a required direction:

    The authoritative state must be able to record expectation and result.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#10-capture-once-reuse-indefinitely","level":2,"title":"10. Capture Once, Reuse Indefinitely","text":"

    Work that has already produced understanding must not be re-derived from scratch.

    Explored paths, rejected options, and validated conclusions are permanent assets.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#11-policies-are-encoded-not-remembered","level":2,"title":"11. Policies Are Encoded, Not Remembered","text":"

    Alignment must not depend on recall or goodwill.

    Constraints that matter must exist in machine-readable form and participate in context assembly.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#12-the-system-explains-itself","level":2,"title":"12. The System Explains Itself","text":"

    From the repository state alone it must be possible to determine:

    • what was authoritative,
    • what constraints applied.

    Delivery views may be optimized.

    They must not become the only explanation.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#non-goals","level":1,"title":"Non-Goals","text":"

    To avoid category errors, ctx does not attempt to be:

    • a skill,
    • a prompt management tool,
    • a chat history viewer,
    • an autonomous agent runtime,
    • a vector database,
    • a hosted memory service.

    Such systems may integrate with ctx.

    They do not define it.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#implications-for-contributions","level":1,"title":"Implications for Contributions","text":"

    Valid contributions:

    • strengthen an invariant,
    • reduce the cost of maintaining an invariant,
    • or extend the system without violating invariants.

    Invalid contributions:

    • introduce hidden authoritative state,
    • replace reproducible assembly with non-reproducible behavior,
    • make core operation depend on external services,
    • reduce human inspectability of authoritative state,
    • or bypass explicit user authority over what becomes authoritative.
    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-contract","level":1,"title":"The Contract","text":"

    Everything else (commands, skills, layouts, integrations, optimizations) is an implementation detail.

    These invariants are the system.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/scratchpad/","level":1,"title":"Scratchpad","text":"","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#what-is-ctx-scratchpad","level":2,"title":"What Is ctx Scratchpad?","text":"

    A one-liner scratchpad, encrypted at rest, synced via git.

    Quick notes that don't fit decisions, learnings, or tasks: reminders, intermediate values, sensitive tokens, working memory during debugging. Entries are numbered, reorderable, and persist across sessions.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#encrypted-by-default","level":2,"title":"Encrypted by Default","text":"

    Scratchpad entries are encrypted with AES-256-GCM before touching the disk.

    Component Path Git status Encryption key ~/.ctx/.ctx.key User-level, 0600 permissions Encrypted data .context/scratchpad.enc Committed

    The key is generated automatically during ctx init (256-bit via crypto/rand) and stored at ~/.ctx/.ctx.key. One key per machine, shared across all projects.

    The ciphertext format is [12-byte nonce][ciphertext+tag]. No external dependencies: Go stdlib only.

    Because the key is .gitignored and the data is committed, you get:

    • At-rest encryption: the .enc file is opaque without the key
    • Git sync: push/pull the encrypted file like any other tracked file
    • Key separation: the key never leaves the machine unless you copy it
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#commands","level":2,"title":"Commands","text":"Command Purpose ctx pad List all entries (numbered 1-based) ctx pad show N Output raw text of entry N (no prefix, pipe-friendly) ctx pad add \"text\" Append a new entry ctx pad rm ID [ID...] Remove entries by stable ID (supports ranges: 3-5) ctx pad edit N \"text\" Replace entry N with new text ctx pad edit N --append \"text\" Append text to the end of entry N ctx pad edit N --prepend \"text\" Prepend text to the beginning of entry N ctx pad edit N --tag tagname Add a tag to entry N ctx pad add TEXT --file PATH Ingest a file as a blob entry (TEXT is the label) ctx pad show N --out PATH Write decoded blob content to a file ctx pad normalize Reassign entry IDs as 1..N ctx pad mv N M Move entry from position N to position M ctx pad resolve Show both sides of a merge conflict for resolution ctx pad import FILE Bulk-import lines from a file (or stdin with -) ctx pad import --blob DIR Import directory files as blob entries ctx pad export [DIR] Export all blob entries to a directory as files ctx pad merge FILE... Merge entries from other scratchpad files into current ctx pad --tag TAG List entries filtered by tag (prefix with ~ to exclude) ctx pad tags List all tags with counts ctx pad tags --json List all tags with counts as JSON

    All commands decrypt on read, operate on plaintext in memory, and re-encrypt on write. The key file is never printed to stdout.

    For blob entries, --append, --prepend, and --tag modify the label while preserving the blob data.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#examples","level":3,"title":"Examples","text":"
    # Add a note\nctx pad add \"check DNS propagation after deploy\"\n\n# List everything\nctx pad\n#   1. check DNS propagation after deploy\n#   2. staging API key: sk-test-abc123\n\n# Show raw text (for piping)\nctx pad show 2\n# sk-test-abc123\n\n# Compose entries\nctx pad edit 1 --append \"$(ctx pad show 2)\"\n\n# Reorder\nctx pad mv 2 1\n\n# Clean up (IDs are stable — they don't shift when entries are deleted)\nctx pad rm 2\n
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#tags","level":2,"title":"Tags","text":"

    Entries can contain #word tags for lightweight categorization. Tags are convention-based: any #word token in an entry's text is a tag. No special syntax to add or remove them — use the existing add and edit commands.

    # Add tagged entries\nctx pad add \"check DNS propagation #later\"\nctx pad add \"deploy hotfix #urgent\"\nctx pad add \"review PR #later #ci\"\n\n# Filter by tag\nctx pad --tag later\n#   1. check DNS propagation #later\n#   3. review PR #later #ci\n\n# Exclude a tag\nctx pad --tag ~later\n#   2. deploy hotfix #urgent\n\n# Multiple filters (AND logic)\nctx pad --tag later --tag ci\n#   3. review PR #later #ci\n\n# List all tags with counts\nctx pad tags\n# ci       1\n# later    2\n# urgent   1\n\n# JSON output\nctx pad tags --json\n# [{\"tag\":\"ci\",\"count\":1},{\"tag\":\"later\",\"count\":2},{\"tag\":\"urgent\",\"count\":1}]\n\n# Add a tag to an existing entry\nctx pad edit 1 --tag done\n\n# Combine with other operations\nctx pad edit 1 --append \"checked\" --tag done\n\n# Remove a tag (replace entry text without the tag)\nctx pad edit 1 \"check DNS propagation\"\n

    Entry IDs are stable — they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry. Use ctx pad normalize to reassign IDs as 1..N if gaps bother you. Tags are case-sensitive and support letters, digits, hyphens, and underscores (#high-priority, #v2, #my_tag).

    For blob entries, tags are extracted from the label only.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#bulk-import-and-export","level":2,"title":"Bulk Import and Export","text":"

    Import lines from a file in bulk (each non-empty line becomes an entry):

    # Import from a file\nctx pad import notes.txt\n\n# Import from stdin\ngrep TODO *.go | ctx pad import -\n

    Export all blob entries to a directory as files:

    # Export to a directory\nctx pad export ./ideas\n\n# Preview without writing\nctx pad export --dry-run\n\n# Overwrite existing files\nctx pad export --force ./backup\n
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#merging-scratchpads","level":2,"title":"Merging Scratchpads","text":"

    Combine entries from other scratchpad files into your current pad. Useful when merging work from parallel worktrees, other machines, or teammates:

    # Merge from a worktree's encrypted scratchpad\nctx pad merge worktree/.context/scratchpad.enc\n\n# Merge from multiple sources (encrypted and plaintext)\nctx pad merge pad-a.enc notes.md\n\n# Merge a foreign encrypted pad using its key\nctx pad merge --key /other/.ctx.key foreign.enc\n\n# Preview without writing\nctx pad merge --dry-run pad-a.enc pad-b.md\n

    Each input file is auto-detected as encrypted or plaintext: decryption is attempted first, and on failure the file is parsed as plain text. Entries are deduplicated by exact content, so running merge twice with the same file is safe.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#file-blobs","level":2,"title":"File Blobs","text":"

    The scratchpad can store small files (up to 64 KB) as blob entries. Files are base64-encoded and stored with a human-readable label.

    # Ingest a file: first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# Listing shows label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n\n# Extract to a file\nctx pad show 2 --out ./recovered.yaml\n\n# Or print decoded content to stdout\nctx pad show 2\n

    Blob entries are encrypted identically to text entries. The internal format is label:::base64data: You never need to construct this manually.

    Constraint Value Max file size (pre-encoding) 64 KB Storage format label:::base64(content) Display label [BLOB] in listings

    When Should You Use Blobs

    Blobs are for small files you want encrypted and portable: config snippets, key fragments, deployment manifests, test fixtures. For anything larger than 64 KB, use the filesystem directly.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#using-with-ai","level":2,"title":"Using with AI","text":"

    Use Natural Language

    As in many ctx features, the ctx scratchpad can also be used with natural langauge. You don't have to memorize the CLI commands.

    CLI gives you \"precision\", whereas natural language gives you flow.

    The /ctx-pad skill maps natural language to ctx pad commands. You don't need to remember the syntax:

    You say What happens \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"show my scratchpad\" ctx pad \"delete the third entry\" ctx pad rm 3 \"update entry 2 to include the new endpoint\" ctx pad edit 2 \"...\" \"move entry 4 to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./backup\" ctx pad export ./backup \"merge the scratchpad from the worktree\" ctx pad merge worktree/.context/scratchpad.enc

    The skill handles the translation. You describe what you want in plain English; the agent picks the right command.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#worktrees","level":2,"title":"Worktrees","text":"

    The encryption key lives at ~/.ctx/.ctx.key (outside the project directory). Because all worktrees on the same machine share this path, ctx pad works in worktrees automatically - no special setup needed.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#key-distribution","level":2,"title":"Key Distribution","text":"

    The encryption key (~/.ctx/.ctx.key) stays on the machine where it was generated. ctx never transmits it.

    To share the scratchpad across machines:

    1. Copy the key manually: scp, USB drive, password manager.
    2. Push/pull the .enc file via git as usual.
    3. Both machines can now read and write the same scratchpad.

    Never Commit the Key

    The key is .gitignored by default. If you override this, anyone with repo access can decrypt your scratchpad.

    Treat the key like an SSH private key.

    See the Syncing Scratchpad Notes Across Machines recipe for a step-by-step walkthrough.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#plaintext-override","level":2,"title":"Plaintext Override","text":"

    For projects where encryption is unnecessary, disable it in .ctxrc:

    scratchpad_encrypt: false\n

    In plaintext mode:

    • Entries are stored in .context/scratchpad.md instead of .enc.
    • No key is generated or required.
    • All ctx pad commands work identically.
    • The file is human-readable and diffable.

    When Should You Use Plaintext

    Plaintext mode is useful for non-sensitive projects, solo work where encryption adds friction, or when you want scratchpad entries visible in git diff.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#when-should-you-use-scratchpad-versus-context-files","level":2,"title":"When Should You Use Scratchpad versus Context Files","text":"Use case Where it goes Temporary reminders (\"check X after deploy\") Scratchpad Working values during debugging Scratchpad Sensitive tokens or API keys (short-term) Scratchpad Quick notes that don't fit anywhere else Scratchpad Items that are not directly relevant to the project Scratchpad Things that you want to keep near, but also hidden Scratchpad Work items with completion tracking TASKS.md Trade-offs with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

    Rule of thumb:

    • If it needs structure or will be referenced months later, use a context file (i.e. DECISIONS.md, LEARNINGS.md, TASKS.md).
    • If it is working memory for the current session or week, use the scratchpad.
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#see-also","level":2,"title":"See Also","text":"
    • Syncing Scratchpad Notes Across Machines: Key distribution, push/pull workflow, merge conflict resolution
    • Using the Scratchpad: Natural language examples, blob workflow, when to use scratchpad vs context files
    • Context Files: Format and conventions for all .context/ files
    • Security: Trust model and permission hygiene
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/session-journal/","level":1,"title":"Session Journal","text":"

    Important Security Note

    Session journals contain sensitive data such as file contents, commands, API keys, internal discussions, error messages with stack traces, and more.

    The .context/journal-site/ and .context/journal-obsidian/ directories MUST be .gitignored.

    • DO NOT host your journal publicly.
    • DO NOT commit your journal files to version control.
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#browse-your-session-history","level":2,"title":"Browse Your Session History","text":"

    ctx's Session Journal turns your AI coding sessions into a browsable, searchable, and editable archive.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#quick-start","level":2,"title":"Quick Start","text":"

    After using ctx for a couple of sessions, you can generate a journal site with:

    # Import all sessions to markdown\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

    Then open http://localhost:8000 to browse your sessions.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#what-you-get","level":2,"title":"What You Get","text":"

    The Session Journal gives you:

    • Browsable history: Navigate through all your AI sessions by date
    • Full conversations: See every message, tool use, and result
    • Token usage: Track how many tokens each session consumed
    • Search: Find sessions by content, project, or date
    • Dark mode: Easy on the eyes for late-night archaeology

    Each session page includes the following sections:

    Section Content Metadata Date, time, duration, model, project, git branch Summary Space for your notes (editable) Tool Usage Which tools were used and how often Conversation Full transcript with timestamps","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#1-import-sessions","level":3,"title":"1. Import Sessions","text":"
    # Import all sessions from current project (only new files)\nctx journal import --all\n\n# Import sessions from all projects\nctx journal import --all --all-projects\n\n# Import a specific session by ID (always writes)\nctx journal import abc123\n\n# Preview what would be imported\nctx journal import --all --dry-run\n\n# Re-import existing (regenerates conversation, preserves YAML frontmatter)\nctx journal import --all --regenerate\n\n# Discard frontmatter during regeneration\nctx journal import --all --regenerate --keep-frontmatter=false -y\n

    Imported sessions go to .context/journal/ as editable Markdown files.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#2-generate-the-site","level":3,"title":"2. Generate the Site","text":"
    # Generate site structure\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

    The site is generated in .context/journal-site/ by default.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#3-browse-and-search","level":3,"title":"3. Browse and Search","text":"

    Open http://localhost:8000 after running --serve.

    • Use the sidebar to navigate by date
    • Use search (/ key) to find specific content
    • Click any session to see the full conversation
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#editing-sessions","level":2,"title":"Editing Sessions","text":"

    Imported sessions are plain Markdown in .context/journal/. You can:

    • Add summaries: Fill in the ## Summary section
    • Add notes: Insert your own commentary anywhere
    • Highlight key moments: Use Markdown formatting
    • Delete noise: Remove irrelevant tool outputs

    After editing, regenerate the site:

    ctx journal site --serve\n
    Safe by Default

    Running ctx journal import --all only imports new sessions. Existing files are skipped entirely (your edits and enrichments are never touched).

    Use --regenerate to re-import existing files. Conversation content is regenerated, but YAML frontmatter (topics, type, outcome, etc.) is preserved. You'll be prompted before any existing files are overwritten; add -y to skip the prompt.

    Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

    Locked entries (via ctx journal lock) are always skipped, regardless of flags. If you prefer to add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#large-sessions","level":2,"title":"Large Sessions","text":"

    Sessions with many messages (200+) are automatically split into multiple parts for better browser performance. Navigation links connect the parts:

    session-abc123.md      (Part 1 of 3)\nsession-abc123-p2.md   (Part 2 of 3)\nsession-abc123-p3.md   (Part 3 of 3)\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#suggestion-sessions","level":2,"title":"Suggestion Sessions","text":"

    Claude Code generates \"suggestion\" sessions for auto-complete prompts. These are separated in the index under a \"Suggestions\" section to keep your main session list focused.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enriching-journal-entries","level":2,"title":"Enriching Journal Entries","text":"

    Raw imported sessions contain basic metadata (date, time, project) but lack the structured information needed for effective search, filtering, and analysis. Journal enrichment adds semantic metadata that transforms a flat archive into a searchable knowledge base.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#why-enrich","level":3,"title":"Why Enrich?","text":"

    Without enrichment, you have timestamps and raw conversations. With enrichment:

    • Find sessions by topic: \"Show me all auth-related sessions\"
    • Filter by outcome: \"What did I abandon vs complete?\"
    • Track technology usage: \"When did I last work with PostgreSQL?\"
    • Identify key files: Jump directly to the files discussed
    • Get summaries: Understand what happened without reading transcripts
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-frontmatter-schema","level":3,"title":"The Frontmatter Schema","text":"

    Enriched entries begin with YAML frontmatter:

    ---\ntitle: \"Implement caching layer\"\ndate: 2026-01-27\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/cache/memory.go\n---\n
    Field Required Description title Yes Descriptive title (not the session slug) date Yes Session date (YYYY-MM-DD) type Yes Session type (see below) outcome Yes How the session ended (see below) topics No Subject areas discussed technologies No Languages, databases, frameworks libraries No Specific packages or libraries used key_files No Important files created or modified

    Type values:

    Type When to use feature Building new functionality bugfix Fixing broken behavior refactor Restructuring without behavior change exploration Research, learning, experimentation debugging Investigating issues documentation Writing docs, comments, README

    Outcome values:

    Outcome Meaning completed Goal achieved partial Some progress, work continues abandoned Stopped pursuing this approach blocked Waiting on external dependency","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-ctx-journal-enrich","level":3,"title":"Using /ctx-journal-enrich","text":"

    The /ctx-journal-enrich skill automates enrichment by analyzing conversation content and proposing metadata.

    Invoke by session identifier:

    /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-01-24\n/ctx-journal-enrich 76fe2ab9\n

    The skill will:

    1. Check if locked - locked entries are skipped (same as export);
    2. Find the matching journal file;
    3. Read and analyze the conversation;
    4. Propose frontmatter (type, topics, outcome, technologies);
    5. Generate a 2-3 sentence summary;
    6. Extract decisions, learnings, and tasks mentioned;
    7. Show a diff and ask for confirmation before writing.
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#before-and-after","level":3,"title":"Before and After","text":"

    Before enrichment:

    # twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\n[Add your summary of this session]\n\n## Conversation\n...\n

    After enrichment:

    ---\ntitle: \"Add Redis caching to API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n\n# twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\nImplemented Redis-based caching middleware for frequently accessed API endpoints.\nAdded cache invalidation on writes and configurable TTL per route. Reduced\n the average response time from 200ms to 15ms for cached routes.\n\n## Decisions\n\n* Used Redis over in-memory cache for horizontal scaling\n* Chose per-route TTL configuration over global setting\n\n## Learnings\n\n* Redis WATCH command prevents race conditions during cache invalidation\n\n## Conversation\n...\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enrichment-and-site-generation","level":3,"title":"Enrichment and Site Generation","text":"

    The journal site generator uses enriched metadata for better organization:

    • Titles appear in navigation instead of slugs
    • Summaries provide context in the index
    • Topics enable filtering (when using search)
    • Types allow grouping by work category

    Future improvements will add topic-based navigation and outcome filtering to the generated site.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#batch-enrichment","level":3,"title":"Batch Enrichment","text":"

    To enrich multiple sessions, process them one at a time:

    # List unenriched sessions (those without frontmatter)\ngrep -L \"^---$\" .context/journal/*.md | head -10\n

    Then run /ctx-journal-enrich on each. Enrichment is intentionally interactive to ensure accuracy.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#obsidian-vault-export","level":2,"title":"Obsidian Vault Export","text":"

    If you use Obsidian for knowledge management, you can export your journal as an Obsidian vault instead of (or alongside) the static site:

    ctx journal obsidian\n

    This generates a vault in .context/journal-obsidian/ with:

    • Wikilinks ([[target|display]]) instead of Markdown links
    • MOC pages (Map of Content) for topics, key files, and session types
    • Related sessions footer per entry: links to entries sharing the same topics
    • Transformed frontmatter: topics renamed to tags (Obsidian-recognized), aliases added from title for search
    • Graph-optimized structure: MOC hubs and cross-linked entries create dense graph connectivity

    To use: open the output directory in Obsidian (\"Open folder as vault\").

    # Custom output directory\nctx journal obsidian --output ~/vaults/ctx-journal\n

    Static Site vs Obsidian Vault

    Use ctx journal site when you want a web-browsable archive with search and dark mode. Use ctx journal obsidian when you want graph view, backlinks, and tag-based navigation inside Obsidian. Both use the same enriched source entries: you can generate both.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#full-pipeline","level":2,"title":"Full Pipeline","text":"

    The complete journal workflow has four stages. Each is idempotent: safe to re-run, and stages skip already-processed entries.

    import → enrich → rebuild\n
    Stage Command / Skill What it does Skips if Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) Enrich /ctx-journal-enrich Adds frontmatter, summaries, topics Frontmatter already present Rebuild ctx journal site --build Generates static HTML site -- Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks --

    One-Command Pipeline

    /ctx-journal-enrich-all handles import automatically - it detects unimported sessions and imports them before enriching. You only need to run ctx journal site --build afterward.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-make-journal","level":3,"title":"Using make journal","text":"

    If your project includes Makefile.ctx (deployed by ctx init), the first and last stages are combined:

    make journal           # import + rebuild\n

    After it runs, it reminds you to enrich in Claude Code:

    Next steps (in Claude Code):\n  /ctx-journal-enrich-all # imports if needed + adds metadata per entry\n\nThen re-run: make journal\n

    Rendering Issues?

    If individual entries have rendering problems (broken fences, malformed lists), check the programmatic normalization in the import pipeline. Most cases are handled automatically during ctx journal import.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#tips","level":2,"title":"Tips","text":"

    Daily workflow:

    # Import, browse, then enrich in Claude Code\nmake journal && make journal-serve\n# Then in Claude Code: /ctx-journal-enrich <session>\n

    After a productive session:

    # Import just that session and add notes\nctx journal import <session-id>\n# Edit .context/journal/<session>.md\n# Regenerate: ctx journal site\n

    Searching across all sessions:

    # Use grep on the journal directory\ngrep -r \"authentication\" .context/journal/\n

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#requirements","level":2,"title":"Requirements","text":"Use pipx for zensical

    pip install zensical may install a non-functional stub on system Python. Using venv has other issues too.

    These issues especially happen on Mac OSX.

    Use pipx install zensical, which creates an isolated environment and handles Python version management automatically.

    The journal site uses zensical for static site generation:

    pipx install zensical\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#see-also","level":2,"title":"See Also","text":"
    • ctx journal: Session discovery and listing
    • ctx journal site: Static site generation
    • ctx journal obsidian: Obsidian vault export
    • Context Files: The .context/ directory structure
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/skills/","level":1,"title":"Skills","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skills","level":2,"title":"Skills","text":"

    Skills are slash commands that run inside your AI assistant (e.g., /ctx-next), as opposed to CLI commands that run in your terminal (e.g., ctx status).

    Skills give your agent structured workflows: It knows what to read, what to run, and when to ask. Most wrap one or more ctx CLI commands with opinionated behavior on top.

    Skills Are Best Used Conversationally

    The beauty of ctx is that it's designed to be intuitive and conversational, allowing you to interact with your AI assistant naturally. That's why you don't have to memorize many of these skills.

    See the Prompting Guide for natural-language triggers that invoke these skills conversationally.

    However, when you need a more precise control, you have the option to invoke the relevant skills directly.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#all-skills","level":2,"title":"All Skills","text":"Skill Description Type /ctx-remember Recall project context and present structured readback user-invocable /ctx-wrap-up End-of-session context persistence ceremony user-invocable /ctx-status Show context summary with interpretation user-invocable /ctx-agent Load full context packet for AI consumption user-invocable /ctx-next Suggest 1-3 concrete next actions with rationale user-invocable /ctx-commit Commit with integrated context persistence user-invocable /ctx-reflect Pause and reflect on session progress user-invocable /ctx-task-add Add actionable task to TASKS.md user-invocable /ctx-decision-add Record architectural decision with rationale user-invocable /ctx-learning-add Record gotchas and lessons learned user-invocable /ctx-convention-add Record coding convention for consistency user-invocable /ctx-archive Archive completed tasks from TASKS.md user-invocable /ctx-pad Manage encrypted scratchpad entries user-invocable /ctx-history Browse and import AI session history user-invocable /ctx-journal-enrich Enrich single journal entry with metadata user-invocable /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich user-invocable /ctx-blog Generate blog post draft from project activity user-invocable /ctx-blog-changelog Generate themed blog post from a commit range user-invocable /ctx-consolidate Consolidate redundant learnings or decisions user-invocable /ctx-drift Detect and fix context drift user-invocable /ctx-prompt Apply, list, and manage saved prompt templates user-invocable /ctx-prompt-audit Analyze prompting patterns for improvement user-invocable /ctx-link-check Audit docs for dead internal and external links user-invocable /ctx-permission-sanitize Audit Claude Code permissions for security risks user-invocable /ctx-brainstorm Structured design dialogue before implementation user-invocable /ctx-spec Scaffold a feature spec from a project template user-invocable /ctx-plan-import Import Claude Code plan files into project specs user-invocable /ctx-implement Execute a plan step-by-step with verification user-invocable /ctx-loop Generate autonomous loop script user-invocable /ctx-worktree Manage git worktrees for parallel agents user-invocable /ctx-architecture Build and maintain architecture maps user-invocable /ctx-architecture-failure-analysis Adversarial failure analysis for correctness bugs user-invocable /ctx-remind Manage session-scoped reminders user-invocable /ctx-doctor Troubleshoot ctx behavior with health checks and event analysis user-invocable /ctx-skill-audit Audit skills against Anthropic prompting best practices user-invocable /ctx-skill-create Create, improve, and test skills user-invocable /ctx-pause Pause context hooks for this session user-invocable /ctx-resume Resume context hooks after a pause user-invocable","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-lifecycle","level":2,"title":"Session Lifecycle","text":"

    Skills for starting, running, and ending a productive session.

    Session Ceremonies

    Two skills in this group are ceremony skills: /ctx-remember (session start) and /ctx-wrap-up (session end). Unlike other skills that work conversationally, these should be invoked as explicit slash commands for completeness. See Session Ceremonies.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remember","level":3,"title":"/ctx-remember","text":"

    Recall project context and present a structured readback. Ceremony skill: invoke explicitly at session start.

    Wraps: ctx agent --budget 4000, ctx journal source --limit 3, reads TASKS.md, DECISIONS.md, LEARNINGS.md

    See also: Session Ceremonies, The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-status","level":3,"title":"/ctx-status","text":"

    Show context summary (files, token budget, tasks, recent activity) with interpreted suggestions.

    Wraps: ctx status [--verbose] [--json]

    See also: The Complete Session, ctx status CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-agent","level":3,"title":"/ctx-agent","text":"

    Load the full context packet optimized for AI consumption. Also runs automatically via the PreToolUse hook with cooldown.

    Wraps: ctx agent [--budget] [--format] [--cooldown] [--session]

    See also: The Complete Session, ctx agent CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-next","level":3,"title":"/ctx-next","text":"

    Suggest 1-3 concrete next actions ranked by priority, momentum, and unblocked status.

    Wraps: reads TASKS.md, ctx journal source --limit 3

    See also: The Complete Session, Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-commit","level":3,"title":"/ctx-commit","text":"

    Commit code with integrated context persistence: pre-commit checks, staged files, Co-Authored-By trailer, and a post-commit prompt to capture decisions and learnings.

    Wraps: git add, git commit, optionally chains to /ctx-decision-add and /ctx-learning-add

    See also: The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-reflect","level":3,"title":"/ctx-reflect","text":"

    Pause and reflect on session progress. Walks through a checklist of learnings, decisions, task completions, and session notes to persist.

    Wraps: chains to ctx add learning, ctx add decision, manual TASKS.md updates

    See also: The Complete Session, Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-wrap-up","level":3,"title":"/ctx-wrap-up","text":"

    End-of-session context persistence ceremony. Gathers signal from git diff, recent commits, and conversation themes. Proposes candidates (learnings, decisions, conventions, tasks) with complete structured fields for user approval, then persists via ctx add. Offers /ctx-commit if uncommitted changes remain. Ceremony skill: invoke explicitly at session end.

    Wraps: git diff --stat, git log, ctx add learning, ctx add decision, ctx add convention, ctx add task, chains to /ctx-commit

    See also: Session Ceremonies, The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#context-persistence","level":2,"title":"Context Persistence","text":"

    Skills for recording work artifacts: tasks, decisions, learnings, conventions: into .context/ files.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-task-add","level":3,"title":"/ctx-task-add","text":"

    Add an actionable task with optional priority and phase section.

    Wraps: ctx add task \"description\" [--priority high|medium|low] --session-id ID --branch BR --commit HASH

    See also: Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-decision-add","level":3,"title":"/ctx-decision-add","text":"

    Record an architectural decision with context, rationale, and consequence. Supports Y-statement (lightweight) and full ADR formats.

    Wraps: ctx add decision \"title\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id ID --branch BR --commit HASH

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-learning-add","level":3,"title":"/ctx-learning-add","text":"

    Record a project-specific gotcha, bug, or unexpected behavior. Filters for insights that are searchable, project-specific, and required real effort to discover.

    Wraps: ctx add learning \"title\" --context \"...\" --lesson \"...\" --application \"...\" --session-id ID --branch BR --commit HASH

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-convention-add","level":3,"title":"/ctx-convention-add","text":"

    Record a coding convention that should be standardized across sessions. Targets patterns seen 2-3+ times.

    Wraps: ctx add convention \"rule\" --section \"Name\"

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-archive","level":3,"title":"/ctx-archive","text":"

    Archive completed tasks from TASKS.md to a timestamped file in .context/archive/. Preserves phase headers for traceability.

    Wraps: ctx task archive [--dry-run]

    See also: Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#scratchpad","level":2,"title":"Scratchpad","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pad","level":3,"title":"/ctx-pad","text":"

    Manage the encrypted scratchpad: add, remove, edit, and reorder one-liner notes. Encrypted at rest with AES-256-GCM.

    Wraps: ctx pad, ctx pad add, ctx pad rm, ctx pad edit, ctx pad mv, ctx pad import, ctx pad export, ctx pad merge

    See also: Scratchpad, Using the Scratchpad

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#journal-history","level":2,"title":"Journal & History","text":"

    Skills for browsing, exporting, and enriching your AI session history into a structured journal.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-history","level":3,"title":"/ctx-history","text":"

    Browse, inspect, and import AI session history. List recent sessions, show details by slug or ID, and import to .context/journal/.

    Wraps: ctx journal source, ctx journal source --show, ctx journal import

    See also: Browsing and Enriching Past Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich","level":3,"title":"/ctx-journal-enrich","text":"

    Enrich a single journal entry with YAML frontmatter: title, type, outcome, topics, technologies, and summary. Shows diff before writing.

    Wraps: reads and edits .context/journal/*.md files

    See also: Browsing and Enriching Past Sessions, Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich-all","level":3,"title":"/ctx-journal-enrich-all","text":"

    Full journal pipeline: imports unimported sessions first, then batch-enriches all unenriched entries. Filters out short sessions and continuations. Can spawn subagents for large backlogs.

    Wraps: ctx journal import --all + iterates /ctx-journal-enrich

    See also: Browsing and Enriching Past Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#content-creation","level":2,"title":"Content Creation","text":"

    Skills for turning project activity into publishable content.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog","level":3,"title":"/ctx-blog","text":"

    Generate a blog post draft from recent project activity: git history, decisions, learnings, tasks, and journal entries. Requires a narrative arc (problem, approach, outcome).

    Wraps: reads git log, DECISIONS.md, LEARNINGS.md, TASKS.md, journal entries; writes to docs/blog/

    See also: Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog-changelog","level":3,"title":"/ctx-blog-changelog","text":"

    Generate a themed blog post from a commit range. Takes a starting commit and unifying theme, analyzes diffs and journal entries from that period.

    Wraps: git log, git diff --stat; writes to docs/blog/

    See also: Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#auditing-health","level":2,"title":"Auditing & Health","text":"

    Skills for detecting drift, auditing alignment, and improving prompt quality.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-consolidate","level":3,"title":"/ctx-consolidate","text":"

    Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Groups overlapping entries by keyword similarity, presents candidates, and (with user approval) merges groups into denser combined entries. Originals are archived, not deleted.

    Wraps: reads LEARNINGS.md and DECISIONS.md, writes consolidated entries, archives originals, runs ctx reindex

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-drift","level":3,"title":"/ctx-drift","text":"

    Detect and fix context drift: stale paths, missing files, file age staleness, task accumulation, entry count warnings, and constitution violations via ctx drift. Also detects skill drift against canonical templates.

    Wraps: ctx drift [--fix]

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

    Analyze recent prompting patterns to identify vague or ineffective prompts. Reviews 3-5 journal entries and suggests rewrites with positive observations.

    Wraps: reads .context/journal/ entries

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-doctor","level":3,"title":"/ctx-doctor","text":"

    Troubleshoot ctx behavior. Runs structural health checks via ctx doctor, analyzes event log patterns via ctx hook event, and presents findings with suggested actions. The CLI provides the structural baseline; the agent adds semantic analysis of event patterns and correlations.

    Wraps: ctx doctor --json, ctx hook event --json --last 100, ctx remind list, ctx hook message list, reads .ctxrc

    Trigger phrases: \"diagnose\", \"troubleshoot\", \"doctor\", \"health check\", \"why didn't my hook fire?\", \"hooks seem broken\", \"something seems off\"

    Graceful degradation: If event_log is not enabled, the skill still works but with reduced capability. It runs structural checks and notes: \"Enable event_log: true in .ctxrc for hook-level diagnostics.\"

    See also: Troubleshooting, ctx doctor CLI, ctx hook event CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-link-check","level":3,"title":"/ctx-link-check","text":"

    Scan all markdown files under docs/ for broken links. Three passes: internal links (verify file targets exist on disk), external links (HTTP HEAD with timeout, report failures as warnings), and image references. Resolves relative paths, strips anchors before checking, and skips localhost/example URLs.

    Wraps: Glob + Grep to scan, curl for external checks

    Trigger phrases: \"check links\", \"audit links\", \"any broken links?\", \"dead links\"

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-permission-sanitize","level":3,"title":"/ctx-permission-sanitize","text":"

    Audit .claude/settings.local.json for dangerous permissions across four risk categories: hook bypass (Critical), destructive commands (High), config injection vectors (High), and overly broad patterns (Medium). Reports findings by severity and offers specific fix actions with user confirmation.

    Wraps: reads .claude/settings.local.json, edits with confirmation

    Trigger phrases: \"audit permissions\", \"are my permissions safe?\", \"sanitize permissions\", \"check settings\"

    See also: Claude Code Permission Hygiene

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#planning-execution","level":2,"title":"Planning & Execution","text":"

    Skills for structured design, implementation, and parallel agent workflows.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-brainstorm","level":3,"title":"/ctx-brainstorm","text":"

    Transform raw ideas into clear, validated designs through structured dialogue before any implementation begins. Follows a gated process: understand context, clarify the idea (one question at a time), surface non-functional requirements, lock understanding with user confirmation, explore 2-3 design approaches with trade-offs, stress-test the chosen approach, and present the detailed design.

    Wraps: reads DECISIONS.md, relevant source files; chains to /ctx-decision-add for recording design choices

    Trigger phrases: \"let's brainstorm\", \"design this\", \"think through\", \"before we build\", \"what approach should we take?\"

    See also: /ctx-spec

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-spec","level":3,"title":"/ctx-spec","text":"

    Scaffold a feature spec from the project template and walk through each section with the user. Covers: problem, approach, happy path, edge cases, validation rules, error handling, interface, implementation, configuration, testing, and non-goals. Spends extra time on edge cases and error handling.

    Wraps: reads specs/tpl/spec-template.md, writes to specs/, optionally chains to /ctx-task-add

    Trigger phrases: \"spec this out\", \"write a spec\", \"create a spec\", \"design document\"

    See also: /ctx-brainstorm, /ctx-plan-import

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-plan-import","level":3,"title":"/ctx-plan-import","text":"

    Import Claude Code plan files (~/.claude/plans/*.md) into the project's specs/ directory. Lists plans with dates and H1 titles, supports filtering (--today, --since, --all), slugifies headings for filenames, and optionally creates tasks referencing each imported spec.

    Wraps: reads ~/.claude/plans/*.md, writes to specs/, optionally chains to /ctx-task-add

    See also: Importing Claude Code Plans, Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-implement","level":3,"title":"/ctx-implement","text":"

    Execute a multi-step plan with build and test verification at each step. Loads a plan from a file or conversation context, breaks it into atomic steps, and checkpoints after every 3-5 steps.

    Wraps: reads plan file, runs verification commands (go build, go test, etc.)

    See also: Running an Unattended AI Agent

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-loop","level":3,"title":"/ctx-loop","text":"

    Generate a ready-to-run shell script for autonomous AI iteration. Supports Claude Code, Aider, and generic tool templates with configurable completion signals.

    Wraps: ctx loop [--tool] [--prompt] [--max-iterations] [--completion] [--output]

    See also: Autonomous Loops, Running an Unattended AI Agent

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-worktree","level":3,"title":"/ctx-worktree","text":"

    Manage git worktrees for parallel agent development. Create sibling worktrees on dedicated branches, analyze task blast radius for grouping, and tear down with merge.

    Wraps: git worktree add, git worktree list, git worktree remove, git merge

    See also: Parallel Agent Development with Git Worktrees

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture","level":3,"title":"/ctx-architecture","text":"

    Build and maintain architecture maps incrementally. Creates or refreshes ARCHITECTURE.md (succinct project map, loaded at session start) and DETAILED_DESIGN.md (deep per-module reference, consulted on-demand). Coverage is tracked in map-tracking.json so each run extends the map rather than re-analyzing everything.

    Wraps: ctx status, git log, reads source files; writes ARCHITECTURE.md, DETAILED_DESIGN.md, map-tracking.json

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture-failure-analysis","level":3,"title":"/ctx-architecture-failure-analysis","text":"

    Adversarial failure analysis that generates falsifiable incident hypotheses against architecture artifacts. Hunts for correctness bugs that survive code review and tests: race conditions, ordering assumptions, cache staleness, error swallowing, ownership gaps, idempotency failures, state machine drift, and scaling cliffs.

    Requires /ctx-architecture artifacts as input. Reads ARCHITECTURE.md, DETAILED_DESIGN*.md, and map-tracking.json, then systematically applies 9 failure categories to every mutation point. Each finding carries an evidence standard (code path, trigger, failure path, silence reason, code evidence), a confidence level, and an explicit risk score. A mandatory challenge phase attempts to disprove each finding before it is accepted.

    Produces .context/DANGER-ZONES.md with ranked findings split into Critical (risk >= 7, silent/cascading) and Elevated tiers.

    Wraps: reads architecture artifacts, source code; writes DANGER-ZONES.md. Optionally uses GitNexus for blast radius and Gemini Search for cross-referencing known failure patterns.

    Relationship:

    Skill Mode /ctx-architecture Map what exists /ctx-architecture-enrich Improve map fidelity /ctx-architecture-failure-analysis Generate falsifiable incident hypotheses","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remind","level":3,"title":"/ctx-remind","text":"

    Manage session-scoped reminders via natural language. Translates user intent (\"remind me to refactor swagger\") into the corresponding ctx remind command. Handles date conversion for --after flags.

    Wraps: ctx remind, ctx remind list, ctx remind dismiss

    See also: Session Reminders

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skill-authoring","level":2,"title":"Skill Authoring","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-audit","level":3,"title":"/ctx-skill-audit","text":"

    Audit one or more skills against Anthropic prompting best practices. Checks audit dimensions: positive framing, motivation, phantom references, examples, subagent guards, scope, and descriptions. Reports findings by severity with concrete fix suggestions.

    Wraps: reads internal/assets/claude/skills/*/SKILL.md or .claude/skills/*/SKILL.md, references anthropic-best-practices.md

    Trigger phrases: \"audit this skill\", \"check skill quality\", \"review the skills\", \"are our skills any good?\"

    See also: /ctx-skill-create, Contributing

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-create","level":3,"title":"/ctx-skill-create","text":"

    Create, improve, and test skills. Guides the full lifecycle: capture intent, interview for edge cases, draft the SKILL.md, test with realistic prompts, review results with the user, and iterate. Applies core principles: the agent is already smart (only add what it does not know), the description is the trigger (make it specific and \"pushy\"), and explain the why instead of rigid directives.

    Wraps: reads/writes .claude/skills/ and internal/assets/claude/skills/

    Trigger phrases: \"create a skill\", \"turn this into a skill\", \"make a slash command\", \"this should be a skill\", \"improve this skill\", \"the skill isn't triggering\"

    See also: Contributing

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-control","level":2,"title":"Session Control","text":"

    Skills for controlling hook behavior during a session.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pause","level":3,"title":"/ctx-pause","text":"

    Pause all context nudge and reminder hooks for the current session. Security hooks still fire. Use for quick investigations or tasks that don't need ceremony overhead.

    Wraps: ctx hook pause

    Trigger phrases: \"pause ctx\", \"pause context\", \"stop the nudges\", \"quiet mode\"

    See also: Pausing Context Hooks

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-resume","level":3,"title":"/ctx-resume","text":"

    Resume context hooks after a pause. Restores normal nudge, reminder, and ceremony behavior. Silent no-op if not paused.

    Wraps: ctx hook resume

    Trigger phrases: \"resume ctx\", \"resume context\", \"turn nudges back on\", \"unpause\"

    See also: Pausing Context Hooks

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#project-specific-skills","level":2,"title":"Project-Specific Skills","text":"

    The ctx plugin ships the skills listed above. Teams can add their own project-specific skills to .claude/skills/ in the project root: These are separate from plugin-shipped skills and are scoped to the project.

    Project-specific skills follow the same format and are invoked the same way.

    Custom skills are not covered in this reference.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/versions/","level":1,"title":"Version History","text":"","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#version-history","level":2,"title":"Version History","text":"

    Documentation snapshots for each release.

    Tap the corresponding view docs to view the docs as they were at that release.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#releases","level":2,"title":"Releases","text":"Version Release Date Documentation v0.8.0 2026-03-23 view docs v0.6.0 2026-02-16 view docs v0.3.0 2026-02-07 view docs v0.2.0 2026-02-01 view docs v0.1.2 2026-01-27 view docs v0.1.1 2026-01-26 view docs v0.1.0 2026-01-25 view docs","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v080-the-architecture-release","level":3,"title":"v0.8.0: The Architecture Release","text":"

    MCP server for tool-agnostic AI integration. Memory bridge connecting Claude Code auto-memory to .context/. Complete CLI restructuring into cmd/ + core/ taxonomy. All user-facing strings externalized to YAML. fatih/color removed; two direct dependencies remain.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v060-the-integration-release","level":3,"title":"v0.6.0: The Integration Release","text":"

    Plugin architecture: hooks and skills converted from shell scripts to Go subcommands, shipped as a Claude Code marketplace plugin. Multi-tool hook generation for Cursor, Aider, Copilot, and Windsurf. Webhook notifications with encrypted URL storage.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v030-the-discipline-release","level":3,"title":"v0.3.0: The Discipline Release","text":"

    Journal static site generation via zensical. 49-skill audit and fix pass (positive framing, phantom reference removal, scope tightening). Context consolidation skill. golangci-lint v2 migration.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v020-the-archaeology-release","level":3,"title":"v0.2.0: The Archaeology Release","text":"

    Session journal system: ctx journal import converts Claude Code JSONL transcripts to browsable Markdown. Constants refactor with semantic prefixes (Dir*, File*, Filename*). CRLF handling for Windows compatibility.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v012","level":3,"title":"v0.1.2","text":"

    Default Claude Code permissions deployed on ctx init. Prompting guide published as a standalone documentation page.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v011","level":3,"title":"v0.1.1","text":"

    Bug fixes: hook schema key format corrected, JSON unicode escaping fixed in context file output.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v010-initial-release","level":3,"title":"v0.1.0: Initial Release","text":"

    CLI with 15 subcommands, 6 context file types (CONSTITUTION, TASKS, CONVENTIONS, ARCHITECTURE, DECISIONS, LEARNINGS), Makefile build system, and Claude Code hook integration.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#latest","level":2,"title":"Latest","text":"

    The main documentation always reflects the latest development version.

    For the most recent stable release, see v0.8.0.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#changelog","level":2,"title":"Changelog","text":"

    For detailed changes between versions, see the GitHub Releases page.

    ","path":["Reference","Version History"],"tags":[]},{"location":"security/","level":1,"title":"Security","text":"

    Security model, agent hardening, and vulnerability reporting.

    ","path":["Security"],"tags":[]},{"location":"security/#securing-ai-agents","level":3,"title":"Securing AI Agents","text":"

    Defense in depth for unattended AI agents: five layers of protection, each with a known bypass, strength in combination.

    ","path":["Security"],"tags":[]},{"location":"security/#security-policy","level":3,"title":"Security Policy","text":"

    Trust model, vulnerability reporting, permission hygiene, and security design principles.

    ","path":["Security"],"tags":[]},{"location":"security/agent-security/","level":1,"title":"Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#defense-in-depth-securing-ai-agents","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-problem","level":2,"title":"The Problem","text":"

    An unattended AI agent with unrestricted access to your machine is an unattended shell with unrestricted access to your machine.

    This is not a theoretical concern. AI coding agents execute shell commands, write files, make network requests, and modify project configuration. When running autonomously (overnight, in a loop, without a human watching), the attack surface is the full capability set of the operating system user account.

    The risk is not that the AI is malicious. The risk is that the AI is controllable: it follows instructions from context, and context can be poisoned.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#threat-model","level":2,"title":"Threat Model","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#how-agents-get-compromised","level":3,"title":"How Agents Get Compromised","text":"

    AI agents follow instructions from multiple sources: system prompts, project files, conversation history, and tool outputs. An attacker who can inject content into any of these sources can redirect the agent's behavior.

    Vector How it works Prompt injection via dependencies A malicious package includes instructions in its README, changelog, or error output. The agent reads these during installation or debugging and follows them. Prompt injection via fetched content The agent fetches a URL (documentation, API response, Stack Overflow answer) containing embedded instructions. Poisoned project files A contributor adds adversarial instructions to CLAUDE.md, .cursorrules, or .context/ files. The agent loads these at session start. Self-modification between iterations In an autonomous loop, the agent modifies its own configuration files. The next iteration loads the modified config with no human review. Tool output injection A command's output (error messages, log lines, file contents) contains instructions the agent interprets and follows.","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#what-can-a-compromised-agent-do","level":3,"title":"What Can a Compromised Agent Do","text":"

    Depends entirely on what permissions and access the agent has:

    Access level Potential impact Unrestricted shell Execute any command, install software, modify system files Network access Exfiltrate source code, credentials, or context files to external servers Docker socket Escape container isolation by spawning privileged sibling containers SSH keys Pivot to other machines, push to remote repositories, access production systems Write access to own config Disable its own guardrails for the next iteration","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-defense-layers","level":2,"title":"The Defense Layers","text":"

    No single layer is sufficient. Each layer catches what the others miss.

    Layer 1: Soft instructions     (CONSTITUTION.md, playbook)\nLayer 2: Application controls  (permission allowlist, tool restrictions)\nLayer 3: OS-level isolation    (user accounts, filesystem, containers)\nLayer 4: Network controls      (firewall rules, airgap)\nLayer 5: Infrastructure        (VM isolation, resource limits)\n
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

    Markdown files like CONSTITUTION.md and the Agent Playbook tell the agent what to do and what not to do. These are probabilistic: the agent usually follows them, but there is no enforcement mechanism.

    What it catches: Most common mistakes. An agent that has been told \"never delete production data\" will usually not delete production data.

    What it misses: Prompt injection. A sufficiently crafted injection can override soft instructions. Long context windows dilute attention on rules stated early. Edge cases where instructions are ambiguous.

    Verdict: Necessary but not sufficient. Good for the common case. Do not rely on it for security boundaries.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

    AI tool runtimes (Claude Code, Cursor, etc.) provide permission systems: tool allowlists, command restrictions, confirmation prompts.

    For Claude Code, ctx init writes both an allowlist and an explicit deny list into .claude/settings.local.json. The golden images live in internal/assets/permissions/:

    Allowlist (allow.txt): only these tools run without confirmation:

    Bash(ctx:*)\nSkill(ctx-convention-add)\nSkill(ctx-decision-add)\n... # all bundled ctx-* skills\n

    Deny list (deny.txt): these are blocked even if the agent requests them:

    # Dangerous operations\nBash(sudo *)\nBash(git push *)\nBash(git push)\nBash(rm -rf /*)\nBash(rm -rf ~*)\nBash(curl *)\nBash(wget *)\nBash(chmod 777 *)\n\n# Sensitive file reads\nRead(**/.env)\nRead(**/.env.*)\nRead(**/*credentials*)\nRead(**/*secret*)\nRead(**/*.pem)\nRead(**/*.key)\n\n# Sensitive file edits\nEdit(**/.env)\nEdit(**/.env.*)\n

    What it catches: The agent cannot run commands outside the allowlist, and the deny list blocks dangerous operations even if a future allowlist change were to widen access. If rm, curl, sudo, or docker are not allowed and sudo/curl/wget are explicitly denied, the agent cannot invoke them regardless of what any prompt says.

    What it misses: The agent can modify the allowlist itself. In an autonomous loop, if the agent writes to .claude/settings.local.json, and the next iteration loads the modified config, then the protection is effectively lost. The application enforces the rules, but the application reads the rules from files the agent can write.

    Verdict: Strong first layer. Must be combined with self-modification prevention (Layer 3).

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-3-os-level-isolation-deterministic-and-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Deterministic and Unbypassable)","text":"

    The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

    Control Purpose Dedicated user account No sudo, no privileged group membership (docker, wheel, adm). The agent cannot escalate privileges. Filesystem permissions Project directory writable; everything else read-only or inaccessible. Agent cannot reach other projects, home directories, or system config. Immutable config files CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md owned by a different user or marked immutable (chattr +i on Linux). The agent cannot modify its own guardrails.

    What it catches: Privilege escalation, self-modification, lateral movement to other projects or users.

    What it misses: Actions within the agent's legitimate scope. If the agent has write access to source code (which it needs to do its job), it can introduce vulnerabilities in the code itself.

    Verdict: Essential. This is the layer that makes the other layers trustworthy.

    OS-level isolation does not make the agent safe; it makes the other layers meaningful.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

    An agent that cannot reach the internet cannot exfiltrate data. It also cannot ingest new instructions mid-loop from external documents, API responses, or hostile content.

    Scenario Recommended control Agent does not need the internet --network=none (container) or outbound firewall drop-all Agent needs to fetch dependencies Allow specific registries (npmjs.com, proxy.golang.org, pypi.org) via firewall rules. Block everything else. Agent needs API access Allow specific API endpoints only. Use an HTTP proxy with allowlisting.

    What it catches: Data exfiltration, phone-home payloads, downloading additional tools, and instruction injection via fetched content.

    What it misses: Nothing, if the agent genuinely does not need the network. The tradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

    The strongest boundary is a separate machine (or something that behaves like one).

    The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

    Containers (Docker, Podman):

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

    Docker Socket Is Sudo Access

    Critical: never mount the Docker socket (/var/run/docker.sock).

    An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

    Use rootless Docker or Podman to eliminate this escalation path.

    Virtual machines: The strongest isolation. The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

    Resource limits: CPU, memory, and disk quotas prevent a runaway agent from consuming all resources. Use ulimit, cgroup limits, or container resource constraints.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A defense-in-depth setup for overnight autonomous runs:

    Layer Implementation Stops Soft instructions CONSTITUTION.md with \"never delete tests\", \"always run tests before committing\" Common mistakes (probabilistic) Application allowlist .claude/settings.local.json with explicit tool permissions Unauthorized commands (deterministic within runtime) Immutable config chattr +i on CLAUDE.md, .claude/, CONSTITUTION.md Self-modification between iterations Unprivileged user Dedicated user, no sudo, no docker group Privilege escalation Container --cap-drop=ALL --network=none, rootless, no socket mount Host escape, network exfiltration Resource limits --memory=4g --cpus=2, disk quotas Resource exhaustion

    Each layer is straightforward: The strength is in the combination.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    \"I'll just use --dangerously-skip-permissions\": This disables Layer 2 entirely. Without Layers 3-5, you have no protection at all. Only use this flag inside a properly isolated container or VM.

    \"The agent is sandboxed in Docker\": A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

    \"CONSTITUTION.md says not to do that\": Markdown is a suggestion. It works most of the time. It is not a security boundary. Do not use it as one.

    \"I reviewed the CLAUDE.md, it's fine\": The agent can modify CLAUDE.md during iteration N. Iteration N+1 loads the modified version. Unless the file is immutable, your review is stale.

    \"The agent only has access to this one project\": Does the project directory contain .env files, SSH keys, API tokens, or credentials? Does it have a .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-security-considerations","level":2,"title":"Team Security Considerations","text":"

    When multiple developers share a .context/ directory, security considerations extend beyond single-agent hardening.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#code-review-for-context-files","level":3,"title":"Code Review for Context Files","text":"

    Treat .context/ changes like code changes. Context files influence agent behavior (a modified CONSTITUTION.md or CONVENTIONS.md changes what every agent on the team will do next session). Review them in PRs with the same scrutiny you apply to production code.

    Watch for:

    • Weakened constitutional rules (removed constraints, softened language)
    • New decisions that contradict existing ones without acknowledging it
    • Learnings that encode incorrect assumptions
    • Task additions that bypass the team's prioritization process
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#gitignore-patterns","level":3,"title":"Gitignore Patterns","text":"

    ctx init configures .gitignore automatically, but verify these patterns are in place:

    • Always gitignored: .ctx.key (encryption key), .context/logs/, .context/journal/
    • Team decision: scratchpad.enc (encrypted, safe to commit for shared scratchpad state); .gitignore if scratchpads are personal
    • Never committed: .env, credentials, API keys (enforced by drift secret detection)
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#multi-developer-context-sharing","level":3,"title":"Multi-Developer Context Sharing","text":"

    CONSTITUTION.md is the shared contract. All team members and their agents inherit it. Changes require team consensus, not unilateral edits.

    When multiple agents write to the same context files concurrently (e.g., two developers adding learnings simultaneously), git merge conflicts are expected. Resolution is typically additive: accept both additions. Destructive resolution (dropping one side) loses context.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-conventions-for-context-management","level":3,"title":"Team Conventions for Context Management","text":"

    Establish and document:

    • Who reviews context changes: Same reviewers as code, or a designated context owner?
    • How to resolve conflicting decisions: If two sessions record contradictory decisions, which wins? Default: the later one must explicitly supersede the earlier one with rationale.
    • Frequency of context maintenance: Weekly ctx drift checks, monthly consolidation passes, archival after each milestone.
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#checklist","level":2,"title":"Checklist","text":"

    Before running an unattended AI agent:

    • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
    • Agent's config files are immutable or owned by a different user
    • Permission allowlist restricts tools to the project's toolchain
    • Container drops all capabilities (--cap-drop=ALL)
    • Docker socket is NOT mounted
    • Network is disabled or restricted to specific domains
    • Resource limits are set (memory, CPU, disk)
    • No SSH keys, API tokens, or credentials are accessible to the agent
    • Project directory does not contain .env or secrets files
    • Iteration cap is set (--max-iterations)
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#further-reading","level":2,"title":"Further Reading","text":"
    • Running an Unattended AI Agent: the ctx recipe for autonomous loops, including step-by-step permissions and isolation setup
    • Security: ctx's own trust model and vulnerability reporting
    • Autonomous Loops: full documentation of the loop pattern, prompt templates, and troubleshooting
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/hub/","level":1,"title":"Hub Security Model","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#ctx-hub-security-model","level":1,"title":"ctx Hub: Security Model","text":"

    What the hub defends against, what it does not defend against, and the concrete mechanisms in play.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#threat-model","level":2,"title":"Threat Model","text":"

    The hub is designed for trusted cross-project knowledge sharing within a team or homelab. It assumes:

    • The hub host is trusted. Anyone with root on that box can read every entry ever published.
    • Network is semi-trusted. Hub traffic is gRPC over TCP; TLS is strongly recommended but not mandatory.
    • Client machines are trusted enough to hold a per-project client token. Losing a client token is roughly equivalent to losing an API key — scoped damage, not total compromise.
    • Entry content is not secret. Decisions, learnings, and conventions may be indexed by AI agents, rendered in docs, shared across projects. Do not push credentials or PII into the hub.

    The hub is not a secure messaging system, a secrets store, or a compliance-grade audit log. If your threat model needs those, use a dedicated tool and keep the hub for knowledge sharing.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#mechanisms","level":2,"title":"Mechanisms","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#bearer-tokens","level":3,"title":"Bearer Tokens","text":"

    All RPCs except Register require a bearer token in gRPC metadata. Two kinds of tokens exist:

    Kind Format Scope Lifetime Admin token ctx_adm_... Register new projects Manual rotate Client token ctx_cli_... Publish, Sync, Listen, Status Project lifetime

    Tokens are compared in constant time (crypto/subtle) to prevent timing oracles, and looked up via an O(1) hash map so the comparison cost does not depend on the total number of registered clients.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#client-side-encryption-at-rest","level":3,"title":"Client-Side Encryption at Rest","text":"

    .context/.connect.enc stores the client token and hub address, encrypted with AES-256-GCM using the same scheme the notification subsystem uses. The key is derived from ctx's local keyring (see internal/crypto).

    An attacker with read access to the project directory cannot learn the client token without also breaking ctx's local keyring.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#hub-side-token-storage","level":3,"title":"Hub-Side Token Storage","text":"

    Tokens Are Stored in Plaintext on the Hub Host

    <data-dir>/clients.json currently stores client tokens verbatim, not hashed. Anyone with read access to the hub's data directory sees every registered client's token and can impersonate any project that has ever registered.

    Mitigations today:

    • Run the hub as an unprivileged user and lock the data directory with chmod 700 <data-dir>.
    • Use the systemd unit in Operations, which enables ProtectSystem=strict, NoNewPrivileges=true, and a dedicated user.
    • Never expose <data-dir> over NFS, SMB, or shared filesystems.
    • Treat <data-dir> the same way you'd treat /etc/shadow — back it up encrypted, never check it into version control.

    Hashing clients.json and moving to keyring-backed storage is tracked as a follow-up in the PR #60 task group. Until that lands, assume a hub host compromise equals total hub compromise.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#input-validation","level":3,"title":"Input Validation","text":"

    Every published entry is validated before it touches the log:

    • Type must be one of: decision, learning, convention, task. Unknown types are rejected.
    • ID and Origin are required and non-empty.
    • Content size is capped at 1 MB. Reasonable for text, hostile for attempts to fill the disk.
    • Duplicate project registration is rejected — a client that replays an old Register call gets an error, not a second token.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#no-script-execution","level":3,"title":"No Script Execution","text":"

    The hub never interprets entry content. There is no expression language, no template evaluation, no markdown rendering at ingest. Content is stored as bytes and fanned out to clients verbatim.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#audit-trail","level":3,"title":"Audit Trail","text":"

    entries.jsonl is append-only. Every accepted publish is recorded with the publishing project's origin tag and sequence number. Nothing is ever deleted by the hub; retention is managed manually by the operator (see log rotation).

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#what-the-hub-does-not-defend-against","level":2,"title":"What the Hub Does Not Defend Against","text":"
    • Untrusted entry senders. A client with a valid token can publish anything (within the 1 MB cap). There is no content validation beyond shape.
    • Denial of service from a registered client. A misbehaving client can publish until disk is full. Monitor entries.jsonl growth.
    • Network eavesdropping without TLS. Plain gRPC leaks entry content and tokens. Use a TLS-terminating reverse proxy (see Multi-machine recipe).
    • Host compromise. Root on the hub host = access to every entry and every token. Harden the host.
    • Accidental secret upload. The hub will happily fan out a decision containing an API key. Sanitize content before publishing.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#operational-hardening-checklist","level":2,"title":"Operational Hardening Checklist","text":"
    • Run the hub as an unprivileged user with NoNewPrivileges=true and ProtectSystem=strict (see the systemd unit in Operations).
    • Terminate TLS in front of the hub for anything beyond a trusted LAN.
    • Restrict the listen port with firewall rules to the client subnet only.
    • Back up <data-dir>/admin.token to a secrets manager; do not leave it in shell history.
    • Rotate the admin token when a team member with access leaves. Client tokens keep working across rotations.
    • Monitor entries.jsonl growth; alert on sudden spikes.
    • Run NTP on all clients to prevent entry-timestamp skew.
    • Do not publish from machines you do not trust.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#responsible-disclosure","level":2,"title":"Responsible Disclosure","text":"

    Security issues in the hub follow the same process as the rest of ctx — see Reporting.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub Operations
    • ctx Hub failure modes
    • HA cluster recipe
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/reporting/","level":1,"title":"Security Policy","text":"","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#reporting-vulnerabilities","level":2,"title":"Reporting Vulnerabilities","text":"

    At ctx we take security very seriously.

    If you discover a security vulnerability in ctx, please report it responsibly.

    Do NOT open a public issue for security vulnerabilities.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#email","level":3,"title":"Email","text":"

    Send details to security@ctx.ist.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#github-private-reporting","level":3,"title":"GitHub Private Reporting","text":"
    1. Go to the Security tab;
    2. Click \"Report a Vulnerability\";
    3. Provide a detailed description.
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#encrypted-reports-optional","level":3,"title":"Encrypted Reports (Optional)","text":"

    If your report contains sensitive details (proof-of-concept exploits, credentials, or internal system information), you can encrypt your message with our PGP key:

    • In-repo: SECURITY_KEY.asc
    • Keybase: keybase.io/alekhinejose
    # Import the key\ngpg --import SECURITY_KEY.asc\n\n# Encrypt your report\ngpg --armor --encrypt --recipient security@ctx.ist report.txt\n

    Encryption is optional. Unencrypted reports to security@ctx.ist or via GitHub Private Reporting are perfectly fine.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#what-to-include","level":3,"title":"What to Include","text":"
    • Description of the vulnerability,
    • Steps to reproduce,
    • Potential impact,
    • Suggested fix (if any).
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#attribution","level":2,"title":"Attribution","text":"

    We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities (unless they prefer to remain anonymous).

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#response-timeline","level":3,"title":"Response Timeline","text":"

    Open Source, Best-Effort Timelines

    ctx is a volunteer-maintained open source project.

    The timelines below are guidelines, not guarantees, and depend on contributor availability.

    We will address security reports on a best-effort basis and prioritize them by severity.

    Stage Timeframe Acknowledgment Within 48 hours Initial assessment Within 7 days Resolution target Within 30 days (depending on severity)","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#trust-model","level":2,"title":"Trust Model","text":"

    ctx operates within a single trust boundary: the local filesystem.

    The person who authors .context/ files is the same person who runs the agent that reads them. There is no remote input, no shared state, and no server component.

    This means:

    • ctx does not sanitize context files for prompt injection. This is a deliberate design choice, not an oversight. The files are authored by the developer who owns the machine: Sanitizing their own instructions back to them would be counterproductive.
    • If you place adversarial instructions in your own .context/ files, your agent will follow them. This is expected behavior. You control the context; the agent trusts it.

    Shared Repositories

    In shared repositories, .context/ files should be reviewed in code review (the same way you would review CI/CD config or Makefiles). A malicious contributor could add harmful instructions to CONSTITUTION.md or TASKS.md.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#security-design","level":2,"title":"Security Design","text":"

    ctx is designed with security in mind:

    • No secrets in context: The constitution explicitly forbids storing secrets, tokens, API keys, or credentials in .context/ files
    • Local only: ctx runs entirely locally with no external network calls
    • No code execution: ctx reads and writes Markdown files only; it does not execute arbitrary code
    • Git-tracked: Core context files are meant to be committed, so they should never contain sensitive data. Exception: sessions/ and journal/ contain raw conversation data and should be gitignored
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#permission-hygiene","level":2,"title":"Permission Hygiene","text":"

    Claude Code evaluates permissions in deny → ask → allow order. ctx init automatically populates permissions.deny with rules that block dangerous operations before the allow list is ever consulted.

    Default deny rules block:

    • sudo, git push, rm -rf /, rm -rf ~, curl, wget, chmod 777
    • Read/Edit of .env, credentials, secrets, .pem, .key files

    Even with deny rules in place, the allow list accumulates one-off permissions over time. Periodically review for:

    • Destructive commands: git reset --hard, git clean -f, etc.
    • Config injection vectors: permissions that allow modifying files controlling agent behavior (CLAUDE.md, settings.local.json)
    • Broad wildcards: overly permissive patterns that pre-approve more than intended
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#state-file-management","level":2,"title":"State File Management","text":"

    Hook state files (throttle markers, prompt counters, pause markers) are stored in .context/state/, which is project-scoped and gitignored. State files are automatically managed by the hooks that create them; no manual cleanup is needed.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#best-practices","level":2,"title":"Best Practices","text":"
    1. Review before committing: Always review .context/ files before committing
    2. Use .gitignore: If you must store sensitive notes locally, add them to .gitignore
    3. Drift detection: Run ctx drift to check for potential issues
    4. Permission audit: Review .claude/settings.local.json after busy sessions
    ","path":["Security","Security Policy"],"tags":[]},{"location":"thesis/","level":1,"title":"Context as State","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#a-persistence-layer-for-human-ai-cognition","level":2,"title":"A Persistence Layer for Human-AI Cognition","text":"

    Jose Alekhinne - jose@ctx.ist

    February 2026

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#abstract","level":3,"title":"Abstract","text":"

    As AI tools evolve from code-completion utilities into reasoning collaborators, the knowledge that governs their behavior becomes as important as the code they produce; yet, that knowledge is routinely discarded at the end of every session.

    AI-assisted development systems assemble context at prompt time using heuristic retrieval from mutable sources: recent files, semantic search results, session history. These approaches optimize relevance at the moment of generation but do not persist the cognitive state that produced decisions. Reasoning is not reproducible, intent is lost across sessions, and teams cannot audit the knowledge that constrains automated behavior.

    This paper argues that context should be treated as deterministic, version-controlled state rather than as a transient query result. We ground this argument in three sources of evidence: a landscape analysis of 17 systems spanning AI coding assistants, agent frameworks, and knowledge stores; a taxonomy of five primitive categories that reveals irrecoverable architectural trade-offs; and an experience report from ctx, a persistence layer for AI-assisted development, which developed itself using its own persistence model across 389 sessions over 33 days. We define a three-tier model for cognitive state: authoritative knowledge, delivery views, and ephemeral state. Then we present six design invariants empirically validated by 56 independent rejection decisions observed across the analyzed landscape. We show that context determinism applies to assembly, not to model output, and that the curation cost this model requires is offset by compounding returns in reproducibility, auditability, and team cognition.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#1-introduction","level":2,"title":"1. Introduction","text":"

    The introduction of large language models into software development has shifted the primary interface from code execution to interactive reasoning. In this environment, the correctness of an output depends not only on source code but on the context supplied to the model: the conventions, decisions, architectural constraints, and domain knowledge that bound the space of acceptable responses.

    Current systems treat context as a query result assembled at the moment of interaction. A developer begins a session; the tool retrieves what it estimates to be relevant from chat history, recent files, and vector stores; the model generates output conditioned on this transient assembly; the session ends, and the context evaporates. The next session begins the cycle again.

    This model has improved substantially over the past year. CLAUDE.md files, Cursor rules, Copilot's memory system, and tools such as Mem0, Letta, and Kindex each address aspects of the persistence problem. Yet across 17 systems we analyzed spanning AI coding assistants, agent frameworks, autonomous coding agents, and purpose-built knowledge stores, no system provides all five of the following properties simultaneously: deterministic context assembly, human-readable file-based persistence, token-budgeted delivery, a single-binary core with zero required runtime dependencies for the persistence path, and local-first operation.

    This paper does not propose a universal replacement for retrieval-centric workflows. It defines a persistence layer (embodied in ctx (https://ctx.ist)) whose advantages emerge under specific operational conditions: when reproducibility is a requirement, when knowledge must outlive sessions and individuals, when teams require shared cognitive authority, or when offline operation is necessary.

    The trade-offs (manual curation cost, reduced automatic recall, coarser granularity) are intentional and mirror the trade-offs accepted by systems that favor reproducibility over convenience, such as reproducible builds and immutable infrastructure 1 6.

    The contribution is threefold: a three-tier model for cognitive state that resolves the ambiguity between authoritative knowledge and ephemeral session artifacts; six design invariants empirically grounded in a cross-system landscape analysis; and an experience report demonstrating that the model produces compounding returns when applied to its own development.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#2-the-limits-of-prompt-time-context","level":2,"title":"2. The Limits of Prompt-Time Context","text":"

    Prompt-time assembly pipelines typically consist of corpus selection, retrieval, ranking, and truncation. These pipelines are probabilistic and time-dependent, producing three failure modes that compound over the lifetime of a project.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#21-non-reproducibility","level":3,"title":"2.1 Non-Reproducibility","text":"

    If context is derived from mutable sources using heuristic ranking, identical requests at different times receive different inputs. A developer who asks \"What is our authentication strategy?\" on Tuesday may receive a different context window than the same question on Thursday: Not because the strategy changed, but because the retrieval heuristic surfaced different fragments.

    Reproducibility (the ability to reconstruct the exact inputs that produced a given output) is a foundational property of reliable systems. Its loss in AI-assisted development mirrors the historical evolution from ad-hoc builds to deterministic build systems 1 2. The build community learned that when outputs depend on implicit state (environment variables, system clocks, network-fetched dependencies), debugging becomes archaeology. The same principle applies when AI outputs depend on non-deterministic context retrieval.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#22-opaque-knowledge","level":3,"title":"2.2 Opaque Knowledge","text":"

    Embedding-based memory increases recall but reduces inspectability. When a vector store determines that a code snippet is \"similar\" to the current query, the ranking function is opaque: the developer cannot inspect why that snippet was chosen, whether a more relevant artifact was excluded, or whether the ranking will remain stable. This prevents deterministic debugging, policy auditing, and causal attribution (properties that information retrieval theory identifies as fundamental trade-offs of probabilistic ranking) 3.

    In practice, this opacity manifests as a compliance ceiling. In our experience developing a context management system (detailed in Section 7), soft instructions (directives that ask an AI agent to read specific files or follow specific procedures) achieve approximately 75-85% compliance. The remaining 15-25% represents cases where the agent exercises judgment about whether the instruction applies, effectively applying a second ranking function on top of the explicit directive. When 100% compliance is required, instruction is insufficient; the content must be injected directly, removing the agent's option to skip it.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#23-loss-of-intent","level":3,"title":"2.3 Loss of Intent","text":"

    Session transcripts record interaction but not cognition. A transcript captures what was said but not which assumptions were accepted, which alternatives were rejected, or which constraints governed the decision. The distinction matters: a decision to use PostgreSQL recorded as a one-line note (\"Use PostgreSQL\") teaches a model what was decided; a structured record with context, rationale, and consequences teaches it why (and why is what prevents the model from unknowingly reversing the decision in a future session) 4.

    Session transcripts provide history. Cognitive state requires something more: the persistent, structured representation of the knowledge required for correct decision-making.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#3-cognitive-state-a-three-tier-model","level":2,"title":"3. Cognitive State: A Three-Tier Model","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#31-definitions","level":3,"title":"3.1 Definitions","text":"

    We define cognitive state as the authoritative, persistent representation of the knowledge required for correct decision-making within a project. It is human-authored or human-ratified, versioned, inspectable, and reproducible. It is distinct from logs, transcripts, retrieval results, and model-generated summaries.

    Previous formulations of this idea have treated cognitive state as a monolithic concept. In practice, a three-tier model better captures the operational reality:

    Tier 1: Authoritative State: The canonical knowledge that the system treats as ground truth. In a concrete implementation, this corresponds to a set of human-curated files with defined schemas: a constitution (inviolable rules), conventions (code patterns), an architecture document (system structure), decision records (choices with rationale), learnings (captured experience), a task list (current work), a glossary (domain terminology), and an agent playbook (operating instructions). Each file has a single purpose, a defined lifecycle, and a distinct update frequency. Authoritative state is version-controlled alongside code and reviewed through the same mechanisms (diffs, pull requests, blame annotations).

    Tier 2: Delivery Views: Derived representations of authoritative state, assembled for consumption by a model. A delivery view is produced by a deterministic assembly function that takes the authoritative state, a token budget, and an inclusion policy as inputs and produces a context window as output. The same authoritative state, budget, and policy must always produce the same delivery view. Delivery views are ephemeral (they exist only for the duration of a session), but their construction is reproducible.

    Tier 3: Ephemeral State: Session transcripts, scratchpad notes, draft journal entries, and other artifacts that exist during or immediately after a session but are not authoritative. Ephemeral state is the raw material from which authoritative state may be extracted through human review, but it is never consumed directly by the assembly function.

    This three-tier model resolves confusion present in earlier formulations: the claim that AI output is a deterministic function of the repository state. The corrected claim is that context selection is deterministic (the delivery view is a function of authoritative state), but model output remains stochastic, conditioned on the deterministic context. Formally:

    delivery_view = assemble(authoritative_state, budget, policy)\noutput = model(delivery_view)   # stochastic\n

    The persistence layer's contribution is making assemble reproducible, not making model deterministic.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#32-separation-of-concerns","level":3,"title":"3.2 Separation of Concerns","text":"

    The decision to separate authoritative state into distinct files with distinct purposes is not cosmetic. Different types of knowledge have different lifecycles:

    Knowledge Type Update Frequency Read Frequency Load Priority Example Constitution Rarely Every session Always \"Never commit secrets to git\" Tasks Every session Session start Always \"Implement token budget CLI flag\" Conventions Weekly Before coding High \"All errors use structured logging with severity levels\" Decisions When decided When questioning Medium \"Use PostgreSQL over MySQL (see ADR-003)\" Learnings When learned When stuck Medium \"Hook scripts >50ms degrade interactive UX\" Architecture When changed When designing On demand \"Three-layer pipeline: ingest → enrich → assemble\" Journal Every session Rarely Never auto \"Session 247: Removed dead-end session copy layer\"

    A monolithic context file would force the assembly function to load everything or nothing. Separation enables progressive disclosure: the minimum context that matters for the current moment, with the option to load more when needed. A normal session loads the constitution, tasks, and conventions; a deep investigation loads decision history and journal entries from specific dates.

    The budget mechanism is the constraint that makes separation valuable. Without a budget, the default behavior is to load everything, which destroys the attention density that makes loaded context useful. With a budget, the assembly function must prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings (scored by recency). Entries that do not fit receive title-only summaries rather than being silently dropped (an application of the \"tell me what you don't know\" pattern identified independently by four systems in our landscape analysis).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#4-design-invariants","level":2,"title":"4. Design Invariants","text":"

    The following six invariants define the constraints that a cognitive state persistence layer must satisfy. They are not axioms chosen a priori; they are empirically grounded properties whose violation was independently identified as producing complexity costs across the 17 systems we analyzed.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-1-markdown-on-filesystem-persistence","level":3,"title":"Invariant 1: Markdown-on-Filesystem Persistence","text":"

    Context files must be human-readable, git-diffable, and editable with any text editor. No database. No binary storage.

    Validation: 11 independent rejection decisions across the analyzed landscape protected this property. Systems that adopted embedded records, binary serialization, or knowledge graphs as their core primitive consistently traded away the ability for a developer to run cat DECISIONS.md and understand the system's knowledge. The inspection cost of opaque storage compounds over the lifetime of a project: every debugging session, every audit, every onboarding conversation requires specialized tooling to access knowledge that could have been a text file.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-2-zero-runtime-dependencies","level":3,"title":"Invariant 2: Zero Runtime Dependencies","text":"

    The tool must work with no installed runtimes, no running services, and no API keys for core functionality.

    Validation: 13 independent rejection decisions protected this property (the most frequently defended invariant). Systems that required databases (PostgreSQL, SQLite, Redis), embedding models, server daemons, container runtimes, or cloud APIs for core operation introduced failure modes proportional to their dependency count. A persistence layer that depends on infrastructure is not a persistence layer; it is a service. Services have uptime requirements, version compatibility matrices, and operational costs that simple file operations do not.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-3-deterministic-context-assembly","level":3,"title":"Invariant 3: Deterministic Context Assembly","text":"

    The same files plus the same budget must produce the same output. No embedding-based retrieval, no LLM-driven selection, no wall-clock-dependent scoring in the assembly path.

    Validation: 6 independent rejection decisions protected this property. Non-deterministic assembly (whether from embedding variance, LLM-based selection, or time-dependent scoring) destroys the ability to reproduce a context window and therefore to diagnose why a model produced a given output. Determinism in the assembly path is what makes the persistence layer auditable.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-4-human-authority-over-persistent-state","level":3,"title":"Invariant 4: Human Authority over Persistent State","text":"

    The agent may propose changes to context files but must not unilaterally modify them. All persistent changes go through human-reviewable git commits.

    Validation: 6 independent rejection decisions protected this property. Systems that allowed agents to self-modify their memory (writing freeform notes, auto-pruning old entries, generating summaries as ground truth) consistently produced lower-quality persistent context than systems that enforced human review. Structure is a feature, not a limitation: across the landscape, the pattern \"structured beats freeform\" was independently discovered by four systems that evolved from freeform LLM summaries to typed schemas with required fields.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-5-local-first-air-gap-capable","level":3,"title":"Invariant 5: Local-First, Air-Gap Capable","text":"

    Core functionality must work offline with no network access. Cloud services may be used for optional features but never for core context management.

    Validation: 7 independent rejection decisions protected this property. Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios. A filesystem-native model continues to function under all conditions where the repository is accessible.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-6-no-default-telemetry","level":3,"title":"Invariant 6: No Default Telemetry","text":"

    Any analytics, if ever added, must be strictly opt-in.

    Validation: 4 independent rejection decisions protected this property. Default telemetry erodes the trust model that a persistence layer depends on. If developers must trust the system with their architectural decisions, operational learnings, and project constraints, the system cannot simultaneously be reporting usage data to external services.

    These six invariants collectively define a design space. Each feature proposal can be evaluated against them: a feature that violates any invariant is rejected regardless of how many other systems implement it. The discipline of constraint (refusing to add capabilities that compromise foundational properties) is itself an architectural contribution. Across the 17 analyzed systems, 56 patterns were explicitly rejected for violating these invariants. The rejection count per invariant (11, 13, 6, 6, 7, 4) provides a rough measure of each property's vulnerability to architectural erosion. A representative sample of these rejections is provided in Appendix A.1

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#5-landscape-analysis","level":2,"title":"5. Landscape Analysis","text":"

    The 17 systems were selected to cover the architectural design space rather than to achieve completeness. Each included system satisfies three criteria: it represents a distinct architectural primitive for AI-assisted development, it is actively maintained or widely referenced, and it provides sufficient public documentation or source code for architectural inspection. The goal was to ensure that every major category of primitive (document, embedded record, state snapshot, event/message, construction/derivation) was represented by multiple systems, enabling cross-system pattern detection.

    The resulting set spans six categories: AI coding assistants (Continue, Sourcegraph/Cody, Aider, Claude Code), AI agent frameworks (CrewAI, AutoGen, LangGraph, LlamaIndex, Letta/MemGPT), autonomous coding agents (OpenHands, Sweep), session provenance tools (Entire), data versioning systems (Dolt, Pachyderm), pipeline/build systems (Dagger), and purpose-built knowledge stores (QubicDB, Kindex). Each system was analyzed from its source code and documentation, producing 34 individual analysis artifacts (an architectural profile and a set of insights per system) that yielded 87 adopt/adapt recommendations, 56 explicit rejection decisions, and 52 watch items.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#51-primitive-taxonomy","level":3,"title":"5.1 Primitive Taxonomy","text":"

    Every system in the AI-assisted development landscape operates on a core primitive: an atomic unit around which the entire architecture revolves. Our analysis of 17 systems reveals five categories of primitives, each making irrecoverable trade-offs:

    Group A: Document/File Primitives: Human-readable documents as the primary unit. Documents are authored by humans, version-controlled in git, and consumed by AI tools. The invariant of this group is that the primitive is always human-readable and version-controllable with standard tools. Three systems participate in this pattern: the system described in this paper as a pure expression, and Continue (via its rules directory) and Claude Code (via CLAUDE.md files) as partial participants: both use document-based context as an input but organize around different core primitives.

    Group B: Embedded Record Primitives: Vector-embedded records stored with numerical embeddings for similarity search, metadata for filtering, and scoring mechanisms for ranking. Five systems use this approach (LlamaIndex, CrewAI, Letta/MemGPT, QubicDB, Kindex). The invariant is that the primitive requires an embedding model or vector database for core operations: a dependency that precludes offline and air-gapped use.

    Group C: State Snapshot Primitives: Point-in-time captures of the complete system state. The invariant is that any past state can be reconstructed at any historical point. Three systems use this approach (LangGraph, Entire, Dolt).

    Group D: Event/Message Primitives: Sequential events or messages forming an append-only log with causal relationships. Four systems use this approach (OpenHands, AutoGen, Claude Code, Sweep). The invariant is temporal ordering and append-only semantics.

    Group E: Construction/Derivation Primitives: Derived or constructed values that encode how they were produced. The invariant is that the primitive is a function of its inputs; re-executing the same inputs produces the same primitive. Three systems use this approach (Dagger, Pachyderm, Aider).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#52-comparison-matrix","level":3,"title":"5.2 Comparison Matrix","text":"

    The five primitive categories differ along seven dimensions:

    Property Document Embedded Record State Snapshot Event/Message Construction Human-readable Yes No Varies Partially No Version-controllable Yes No Varies Yes Yes Queryable by meaning No Yes No No No Rewindable Via git No Yes Yes (replay) Yes Deterministic Yes No Yes Yes Yes Zero-dependency Yes No Varies Varies Varies Offline-capable Yes No Varies Varies Yes

    The document primitive is the only one that simultaneously satisfies human-readability, version-controllability, determinism, zero dependencies, and offline capability. This is not because documents are superior in general (embedded records provide semantic queryability that documents lack) but because the combination of all five properties is what the persistence layer requires. The choice between primitive categories is not a matter of capability but of which properties are considered invariant.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#53-convergent-patterns","level":3,"title":"5.3 Convergent Patterns","text":"

    Across the 17 analyzed systems, six design patterns were independently discovered. These convergent patterns carry extra validation weight because they emerged from different problem spaces:

    Pattern 1: \"Tell me what you don't know\": When context is incomplete, explicitly communicate to the model what information is missing and what confidence level the provided context represents. Four systems independently converged on this pattern: inserting skip markers, tracking evidence gaps, annotating provenance, or naming output quality tiers.

    Pattern 2: \"Freshness matters\": Information relevance decreases over time. Three systems independently chose exponential decay with different half-lives (30 days, 90 days, and LRU ordering). Static priority ordering with no time dimension leaves relevant recent knowledge at the same priority as stale entries. This pattern is in productive tension with the persistence model's emphasis on determinism: the claim is not that time-dependence is irrelevant, but that it belongs in the curation step (a human deciding to consolidate or archive stale entries) rather than in the assembly function (an algorithm silently down-ranking entries based on age).

    Pattern 3: \"Content-address everything\": Compute a hash of content at creation time for deduplication, cache invalidation, integrity verification, and change detection. Five systems independently implement content hashing, each discovering it solves different problems 5.

    Pattern 4: \"Structured beats freeform\": When capturing knowledge or session state, a structured schema with required fields produces more useful data than freeform text. Four systems evolved from freeform summaries to typed schemas: one moving from LLM-generated prose to a structured condenser with explicit fields for completed tasks, pending tasks, and files modified.

    Pattern 5: \"Protocol convergence\": The Model Context Protocol (MCP) is emerging as a standard tool integration layer. Nine of 17 systems support it, spanning every category in the analysis. MCP's significance for the persistence model is that it provides a transport mechanism for context delivery without dictating how context is stored or assembled. This makes the approach compatible with both retrieval-centric and persistence-centric architectures.

    Pattern 6: \"Human-in-the-loop for memory\": Critical memory decisions should involve human judgment. Fully automated memory management produces lower-quality persistent context than human-reviewed systems. Four systems independently converged on variants of this pattern: ceremony-based consolidation, interrupt/resume for human input, confirmation mode for high-risk actions, and separated \"think fast\" vs. \"think slow\" processing paths.

    Pattern 6 directly validates the ceremony model described in this paper. The persistence layer requires human curation not because automation is impossible, but because the quality of persistent knowledge degrades when the curation step is removed. The improvement opportunity is to make curation easier, not to automate it away.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#6-worked-example-architectural-decision-under-two-models","level":2,"title":"6. Worked Example: Architectural Decision under Two Models","text":"

    We now instantiate the three-tier model in a concrete system (ctx) and illustrate the difference between prompt-time retrieval and cognitive state persistence using a real scenario from its development.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#61-the-problem","level":3,"title":"6.1 The Problem","text":"

    During development, the system accumulated three overlapping storage layers for session data: raw transcripts (owned by the AI tool), session copies (JSONL copies plus context snapshots), and enriched journal entries (Markdown summaries). The middle layer (session copies) was a dead-end write sink. An auto-save hook copied transcripts to a directory that nothing read from, because the journal pipeline already read directly from the raw transcripts. Approximately 15 source files, a shell hook, 20 configuration constants, and 30 documentation references supported infrastructure with no consumers.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#62-prompt-time-retrieval-model","level":3,"title":"6.2 Prompt-Time Retrieval Model","text":"

    In a retrieval-based system, the decision to remove the middle layer depends on whether the retrieval function surfaces the relevant context:

    The developer asks: \"Should we simplify the session storage?\" The retrieval system must find and rank the original discussion thread where the three layers were designed, the usage statistics showing zero reads from the middle layer, the journal pipeline documentation showing it reads from raw transcripts directly, and the dependency analysis showing 15 files, a hook, and 30 doc references. If any of these fragments are not retrieved (because they are in old chat history, because the embedding similarity score is low, or because the token budget was consumed by more recent but less relevant context), the model may recommend preserving the middle layer, or may not realize it exists.

    Six months later, a new team member asks the same question. The retrieval results will differ: the original discussion has aged out of recency scoring, the usage statistics are no longer in recent history, and the model may re-derive the answer or arrive at a different conclusion.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#63-cognitive-state-model","level":3,"title":"6.3 Cognitive State Model","text":"

    In the persistence model, the decision is recorded as a structured artifact at write time:

    ## [2026-02-11] Remove .context/sessions/ storage layer\n\n**Status**: Accepted\n\n**Context**: The session/recall/journal system had three overlapping\nstorage layers. The recall pipeline reads directly from raw transcripts,\nmaking .context/sessions/ a dead-end write sink that nothing reads from.\n\n**Decision**: Remove .context/sessions/ entirely. Two stores remain:\nraw transcripts (global, tool-owned) and enriched journal\n(project-local).\n\n**Rationale**: Dead-end write sinks waste code surface, maintenance\neffort, and user attention. The recall pipeline already proved that\nreading directly from raw transcripts is sufficient. Context snapshots\nare redundant with git history.\n\n**Consequence**: Deleted internal/cli/session/ (15 files), removed\nauto-save hook, removed --auto-save from watch, removed pre-compact\nauto-save, removed /ctx-save skill, updated ~45 documentation files.\nFour earlier decisions superseded.\n

    This artifact is:

    • Deterministically included in every subsequent session's delivery view (budget permitting, with title-only fallback if budget is exceeded)
    • Human-readable and reviewable as a diff in the commit that introduced it
    • Permanent: it persists in version control regardless of retrieval heuristics
    • Causally linked: it explicitly supersedes four earlier decisions, creating an auditable chain

    When the new team member asks \"Why don't we store session copies?\" six months later, the answer is the same artifact, at the same revision, with the same rationale. The reasoning is reconstructible because it was persisted at write time, not discovered at query time.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#64-the-diff-when-policy-changes","level":3,"title":"6.4 The Diff When Policy Changes","text":"

    If a future requirement re-introduces session storage (for example, to support multi-agent session correlation), the change appears as a diff to the decision record:

    - **Status**: Accepted\n+ **Status**: Superseded by [2026-08-15] Reintroduce session storage\n+ for multi-agent correlation\n

    The new decision record references the old one, creating a chain of reasoning visible in git log. In the retrieval model, the old decision would simply be ranked lower over time and eventually forgotten.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#7-experience-report-a-system-that-designed-itself","level":2,"title":"7. Experience Report: A System That Designed Itself","text":"

    The persistence model described in this paper was developed and tested by using it on its own development. Over 33 days and 389 sessions, the system's context files accumulated a detailed record of decisions made, reversed, and consolidated: providing quantitative and qualitative evidence for the model's properties.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#71-scale-and-structure","level":3,"title":"7.1 Scale and Structure","text":"

    The development produced the following authoritative state artifacts:

    • 8 consolidated decision records covering 24 original decisions spanning context injection architecture, hook design, task management, security, agent autonomy, and webhook systems
    • 18 consolidated learning records covering 75 original observations spanning agent compliance, hook behavior, testing patterns, documentation drift, and tool integration
    • A constitution with 13 inviolable rules across 4 categories (security, quality, process, context preservation)
    • 389 enriched journal entries providing a complete session-level audit trail

    The consolidation ratio (24 decisions compressed to 8 records, 75 learnings compressed to 18) illustrates the curation cost and its return: authoritative state becomes denser and more useful over time as related entries are merged, contradictions are resolved, and superseded decisions are marked.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#72-architectural-reversals","level":3,"title":"7.2 Architectural Reversals","text":"

    Three architectural reversals during development provide evidence that the persistence model captures and communicates reasoning effectively:

    Reversal 1: The two-tier persistence model: The original design included a middle storage tier for session copies. After 21 days of development, the middle tier was identified as a dead-end write sink (described in Section 6). The decision record captured the full context, and the removal was executed cleanly: 15 source files, a shell hook, and 45 documentation references. The pattern of a \"dead-end write sink\" was subsequently observed in 7 of 17 systems in our landscape analysis that store raw transcripts alongside structured context.

    Reversal 2: The prompt-coach hook: An early design included a hook that analyzed user prompts and offered improvement suggestions. After deployment, the hook produced zero useful tips, its output channel was invisible to users, and it accumulated orphan temporary files. The hook was removed, and the decision record captured the failure mode for future reference.

    Reversal 3: The soft-instruction compliance model: The original context injection strategy relied on soft instructions: directives asking the AI agent to read specific files. After measuring compliance across multiple sessions, we found a consistent 75-85% compliance ceiling. The revised strategy injects content directly, bypassing the agent's judgment about whether to comply. The learning record captures the ceiling measurement and the rationale for the architectural change.

    Each reversal was captured as a structured decision record with context, rationale, and consequences. In a retrieval-based system, these reversals would exist only in chat history, discoverable only if the retrieval function happens to surface them. In the persistence model, they are permanent, indexable artifacts that inform future decisions.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#73-compliance-ceiling","level":3,"title":"7.3 Compliance Ceiling","text":"

    The 75-85% compliance ceiling for soft instructions is the most operationally significant finding from the experience report. It means that any context management strategy relying on agent compliance with instructions (\"read this file,\" \"follow this convention,\" \"check this list\") has a hard ceiling on reliability.

    The root cause is structural: the instruction \"don't apply judgment\" is itself evaluated by judgment. When an agent receives a directive to read a file, it first assesses whether the directive is relevant to the current task (and that assessment is the judgment the directive was trying to prevent).

    The architectural response maps directly to the formal model defined in Section 3.1. Content requiring 100% compliance is included in authoritative_state and injected by the deterministic assemble function, bypassing the agent entirely. Content where 80% compliance is acceptable is delivered as instructions within the delivery view. The three-tier architecture makes this distinction explicit: authoritative state is injected; delivery views are assembled deterministically; ephemeral state is available but not pushed.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#74-compounding-returns","level":3,"title":"7.4 Compounding Returns","text":"

    Over 33 days, we observed a qualitative shift in the development experience. Early sessions (days 1-7) spent significant time re-establishing context: explaining conventions, re-stating constraints, re-deriving past decisions. Later sessions (days 25-33) began with the agent loading curated context and immediately operating within established constraints, because the constraints were in files rather than in chat history.

    This compounding effect (where each session's context curation improves all subsequent sessions) is the primary return on the curation investment. The cost is borne once (writing a decision record, capturing a learning, updating the task list); the benefit is collected on every subsequent session load.

    The effect is analogous to compound interest in financial systems: the knowledge base grows not linearly with effort but with increasing marginal returns as new knowledge interacts with existing context. A learning captured on day 5 prevents a mistake on day 12, which avoids a debugging session that would have consumed a day 12 session, freeing that session for productive work that generates new learnings. The growth is not literally exponential (it is bounded by project scope and subject to diminishing returns as the knowledge base matures), but within the observed 33-day window, the returns were consistently accelerating.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#75-scope-and-generalizability","level":3,"title":"7.5 Scope and Generalizability","text":"

    This experience report is self-referential by design: the system was developed using its own persistence model. This circularity strengthens the internal validity of the findings (the model was stress-tested under authentic conditions) but limits external generalizability. The two-week crossover point was observed on a single project of moderate complexity with a small team already familiar with the model's assumptions. Whether the same crossover holds for larger teams, for codebases with different characteristics, or for teams adopting the model without having designed it remains an open empirical question. The quantitative claims in this section should be read as existence proofs (demonstrating that the model can produce compounding returns) rather than as predictions about specific adoption scenarios.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#8-situating-the-persistence-layer","level":2,"title":"8. Situating the Persistence Layer","text":"

    The persistence layer occupies a specific position in the stack of AI-assisted development:

    Application Logic\nAI Interaction / Agents\nContext Retrieval Systems\nCognitive State Persistence Layer\nVersion Control / Storage\n

    Current systems innovate primarily in the retrieval layer (improving how context is discovered, ranked, and delivered at query time). The persistence layer sits beneath retrieval and above version control. Its role is to maintain the authoritative state that retrieval systems may query but do not own. The relationship is complementary: retrieval answers \"What in the corpus might be relevant?\"; cognitive state answers \"What must be true for this system to operate correctly?\" A mature system uses both: retrieval for discovery, persistence for authority.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#9-applicability-and-trade-offs","level":2,"title":"9. Applicability and Trade-Offs","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#91-when-to-use-this-model","level":3,"title":"9.1 When to Use This Model","text":"

    A cognitive state persistence layer is most appropriate when:

    Reproducibility is a requirement: If a system must be able to answer \"Why did this output occur, and can it be produced again?\" then deterministic, version-controlled context becomes necessary. This is relevant in regulated environments, safety-critical systems, long-lived infrastructure, and security-sensitive deployments.

    Knowledge must outlive sessions and individuals: Projects with multi-year lifetimes accumulate architectural decisions, domain interpretations, and operational policy. If this knowledge is stored only in chat history, issue trackers, and institutional memory, it decays. The persistence model converts implicit knowledge into branchable, reviewable artifacts.

    Teams require shared cognitive authority: In collaborative environments, correctness depends on a stable answer to \"What does the system believe to be true?\" When this answer is derived from retrieval heuristics, authority shifts to ranking algorithms. When it is versioned and human-readable, authority remains with the team.

    Offline or air-gapped operation is required: Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#92-when-not-to-use-this-model","level":3,"title":"9.2 When Not to Use This Model","text":"

    Zero-configuration personal workflows: For short-lived or exploratory tasks, the cost of explicit knowledge curation outweighs its benefits. Heuristic retrieval is sufficient when correctness is non-critical, outputs are disposable, and historical reconstruction is unnecessary.

    Maximum automatic recall from large corpora: Vector retrieval systems provide superior performance when the primary task is searching vast, weakly structured information spaces. The persistence model assumes that what matters can be decided and that this decision is valuable to record.

    Fully autonomous agent architectures: Agent runtimes that generate and discard state continuously, optimizing for local goal completion, do not benefit from a model that centers human ratification of knowledge.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#93-incremental-adoption","level":3,"title":"9.3 Incremental Adoption","text":"

    The transition does not require full system replacement. An incremental path:

    Step 1: Record decisions as versioned artifacts: Instead of allowing conclusions to remain in discussion threads, persist them in reviewable form with context, rationale, and consequences 4. This alone converts ephemeral reasoning into the cognitive state.

    Step 2: Make inclusion deterministic: Define explicit assembly rules. Retrieval may still exist, but it is no longer authoritative.

    Step 3: Move policy into cognitive state: When system behavior depends on stable constraints, encode those constraints as versioned knowledge. Behavior becomes reproducible.

    Step 4: Optimize assembly, not retrieval: Once the authoritative layer exists, performance improvements come from budgeting, caching, and structural refinement rather than from improving ranking heuristics.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#94-the-curation-cost","level":3,"title":"9.4 The Curation Cost","text":"

    The primary objection to this model is the cost of explicit knowledge curation. This cost is real. Writing a structured decision record takes longer than letting a chatbot auto-summarize a conversation. Maintaining a glossary requires discipline. Consolidating 75 learnings into 18 records requires judgment.

    The response is not that the cost is negligible but that it is amortized. A decision record written once is loaded hundreds of times. A learning captured today prevents repeated mistakes across all future sessions. The curation cost is paid once; the benefit compounds.

    The experience report provides rough order-of-magnitude numbers. Across 389 sessions over 33 days, curation activities (writing decision records, capturing learnings, updating the task list, consolidating entries) averaged approximately 3-5 minutes per session. In early sessions (days 1-7), before curated context existed, re-establishing context consumed approximately 10-15 minutes per session: re-explaining conventions, re-stating architectural constraints, re-deriving decisions that had been made but not persisted. By the final week (days 25-33), the re-explanation overhead had dropped to near zero: the agent loaded curated context and began productive work immediately.

    At ~12 sessions per day, the curation cost was roughly 35-60 minutes daily. The re-explanation cost in the first week was roughly 120-180 minutes daily. By the third week, that cost had fallen to under 15 minutes daily while the curation cost remained stable. The crossover (where cumulative curation cost was exceeded by cumulative time saved) occurred around day 10. These figures are approximate and derived from a single project with a small team already familiar with the model; the crossover point will vary with project complexity, team size, and curation discipline.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#10-future-work","level":2,"title":"10. Future Work","text":"

    Several directions are compatible with the model described here:

    Section-level deterministic budgeting: Current assembly operates at file granularity. Section-level budgeting would allow finer-grained control (including specific decision records while excluding others within the same file) without sacrificing determinism.

    Causal links between decisions: The experience report shows that decisions frequently reference earlier decisions (superseding, extending, or qualifying them). Formal causal links would enable traversal of the decision graph and automatic detection of orphaned or contradictory constraints.

    Content-addressed context caches: Five systems in our landscape analysis independently discovered that content hashing provides cache invalidation, integrity verification, and change detection. Applying content addressing to the assembly output would enable efficient cache reuse when the authoritative state has not changed.

    Conditional context inclusion: Five systems independently suggest that context entries could carry activation conditions (file patterns, task keywords, or explicit triggers) that control whether they are included in a given assembly. This would reduce the per-session budget cost of large knowledge bases without sacrificing determinism.

    Provenance metadata: Linking context entries to the sessions, decisions, or learnings that motivated them would strengthen the audit trail. Optional provenance fields on Markdown entries (session identifier, cause reference, motivation) would be lightweight and compatible with the existing file-based model.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#11-conclusion","level":2,"title":"11. Conclusion","text":"

    AI-assisted development has treated context as a \"query result\" assembled at the moment of interaction, discarded at the session end. This paper identifies a complementary layer: the persistence of authoritative cognitive state as deterministic, version-controlled artifacts.

    The contribution is grounded in three sources of evidence. A landscape analysis of 17 systems reveals five categories of primitives and shows that no existing system provides the combination of human-readability, determinism, zero dependencies, and offline capability that the persistence layer requires. Six design invariants, validated by 56 independent rejection decisions, define the constraints of the design space. An experience report over 389 sessions and 33 days demonstrates compounding returns: later sessions start faster, decisions are not re-derived, and architectural reversals are captured with full context.

    The core claim is this: persistent cognitive state enables causal reasoning across time. A system built on this model can explain not only what is true, but why it became true and when it changed.

    When context is the state:

    • Reasoning is reproducible: the same authoritative state, budget, and policy produce the same delivery view.
    • Knowledge is auditable: decisions are traceable to explicit artifacts with context, rationale, and consequences.
    • Understanding compounds: each session's curation improves all subsequent sessions.

    The choice between retrieval-centric workflows and a persistence layer is not a matter of capability but of time horizon. Retrieval optimizes for relevance at the moment of interaction. Persistence optimizes for the durability of understanding across the lifetime of a project.

    🐸🖤 \"Gooood... let the deterministic context flow through the repository...\" - Kermit the Sidious, probably

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#appendix-a-representative-rejection-decisions","level":2,"title":"Appendix A: Representative Rejection Decisions","text":"

    The 56 rejection decisions referenced in Section 4 were cataloged across all 17 system analyses, grouped by the invariant they would violate. This appendix provides a representative sample (two per invariant) to illustrate the methodology.

    Invariant 1: Markdown-on-Filesystem (11 rejections): CrewAI's vector embedding storage was rejected because embeddings are not human-readable, not git-diff-friendly, and require external services. Kindex's knowledge graph as core primitive was rejected because it requires specialized commands to inspect content that could be a text file (kin show <id> vs. cat DECISIONS.md).

    Invariant 2: Zero Runtime Dependencies (13 rejections): Letta/MemGPT's PostgreSQL-backed architecture was rejected because it conflicts with local-first, no-database, single-binary operation. Pachyderm's Kubernetes-based distributed architecture was rejected as the antithesis of a single-binary design for a tool that manages text files.

    Invariant 3: Deterministic Assembly (6 rejections): LlamaIndex's embedding-based retrieval as the primary selection mechanism was rejected because it destroys determinism, requires an embedding model, and removes human judgment from the selection process. QubicDB's wall-clock-dependent scoring was rejected because it directly conflicts with the \"same inputs produce same output\" property.

    Invariant 4: Human Authority (6 rejections): Letta/MemGPT's agent self-modification of memory was rejected as fundamentally opposed to human-curated persistence. Claude Code's unstructured auto-memory (where the agent writes freeform notes) was rejected because structured files with defined schemas produce higher-quality persistent context than unconstrained agent output.

    Invariant 5: Local-First / Air-Gap Capable (7 rejections): Sweep's cloud-dependent architecture was rejected as fundamentally incompatible with the local-first, offline-capable model. LangGraph's managed cloud deployment was rejected because cloud dependencies for core functionality violate air-gap capability.

    Invariant 6: No Default Telemetry (4 rejections): Continue's telemetry-by-default (PostHog) was rejected because it contradicts the local-first, privacy-respecting trust model. CrewAI's global telemetry on import (Scarf tracking pixel) was rejected because it violates user trust and breaks air-gap capability.

    The remaining 9 rejections did not map to a specific invariant but were rejected on other architectural grounds: for example, Aider's full-file-content-in-context approach (which defeats token budgeting), AutoGen's multi-agent orchestration as core primitive (scope creep), and Claude Code's 30-day transcript retention limit (institutional knowledge should have no automatic expiration).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#references","level":2,"title":"References","text":"
    1. Reproducible Builds Project, \"Reproducible Builds: Increasing the Integrity of Software Supply Chains\", 2017. https://reproducible-builds.org/docs/definition/ ↩↩↩

    2. S. McIntosh et al., \"The Impact of Build System Evolution on Software Quality\", ICSE, 2015. https://doi.org/10.1109/ICSE.2015.70 ↩

    3. C. Manning, P. Raghavan, H. Schütze, Introduction to Information Retrieval, Cambridge University Press, 2008. https://nlp.stanford.edu/IR-book/ ↩

    4. M. Nygard, \"Documenting Architecture Decisions\", Cognitect Blog, 2011. https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions ↩↩

    5. L. Torvalds et al., Git Internals - Git Objects (content-addressed storage concepts). https://git-scm.com/book/en/v2/Git-Internals-Git-Objects ↩

    6. Kief Morris, Infrastructure as Code, O'Reilly, 2016. ↩

    7. J. Kreps, \"The Log: What every software engineer should know about real-time data's unifying abstraction\", 2013. https://engineering.linkedin.com/distributed-systems/log ↩

    8. P. Hunt et al., \"ZooKeeper: Wait-free coordination for Internet-scale systems\", USENIX ATC, 2010. https://www.usenix.org/legacy/event/atc10/tech/full_papers/Hunt.pdf ↩

    ","path":["The Thesis"],"tags":[]}]} \ No newline at end of file +{"config":{"separator":"[\\s\\-_,:!=\\[\\]()\\\\\"`/]+|\\.(?!\\d)"},"items":[{"location":"","level":1,"title":"Manifesto","text":"","path":["Manifesto"],"tags":[]},{"location":"#the-ctx-manifesto","level":1,"title":"The ctx Manifesto","text":"

    Creation, not code.

    Context, not prompts.

    Verification, not vibes.

    This Is NOT a Metaphor

    Code executes instructions.

    Creation produces outcomes.

    Confusing the two is how teams ship motion...

    ...instead of progress.

    • It was never about the code.
    • Code has zero standalone value.
    • Code is an implementation detail.

    Code is an incantation.

    Creation is the act.

    And creation does not happen in a vacuum.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-the-substrate","level":2,"title":"ctx Is the Substrate","text":"

    Constraints Have Moved

    Human bandwidth is no longer the limiting factor.

    Context integrity is.

    Human bandwidth is no longer the constraint.

    Context is:

    • Without durable context, intelligence resets.
    • Without memory, reasoning decays.
    • Without structure, scale collapses.

    Creation is now limited by:

    • Clarity of intent;
    • Quality of context;
    • Rigor of verification.

    Not by speed.

    Not by capacity.

    Velocity Amplifies

    Faster execution on broken context compounds error.

    Speed multiplies whatever is already wrong.

    ","path":["Manifesto"],"tags":[]},{"location":"#humans-author-meaning","level":2,"title":"Humans Author Meaning","text":"

    Intent Is Authored

    Systems can optimize.

    Models can generalize.

    Meaning must be chosen.

    Intent is not emergent.

    Vision, goals, and direction are human responsibilities.

    We decide:

    • What matters;
    • What success means;
    • What world we are building.

    ctx encodes the intent so it...

    • survives time,
    • survives handoffs,
    • survives scale.

    Nothing important should live only in conversation.

    Nothing critical should depend on recall.

    Oral Tradition Does Not Scale

    If intent cannot be inspected, it cannot be enforced.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-before-action","level":2,"title":"ctx Before Action","text":"

    Orientation Precedes Motion

    Acting first and understanding later is not bravery.

    It is debt.

    Never act without ctx.

    Before execution, we must verify:

    • Where we are;
    • Why we are here;
    • What constraints apply;
    • What assumptions are active.

    Action without ctx is gambling.

    Speed without orientation is noise.

    ctx is not overhead: It is the cost of correctness.

    ","path":["Manifesto"],"tags":[]},{"location":"#persistent-context-beats-prompt-memory","level":2,"title":"Persistent Context Beats Prompt Memory","text":"

    Transience Is the Default Failure Mode

    • Prompts decay.
    • Chats fragment.
    • Memory heuristics drift.

    Prompts are transient.

    Chats are lossy.

    Memory heuristics drift.

    ctx must be:

    • Durable;
    • Structured;
    • Explicit;
    • Queryable.

    Intent Must Be Intentional

    If intent exists only in a prompt...

    ...alignment is already degrading.

    Knowledge lives in the artifacts:

    • Decisions;
    • Documentation;
    • Dependency maps;
    • Evaluation history.

    Artifacts Outlive Sessions

    What is not written will be re-learned.

    At full cost.

    ","path":["Manifesto"],"tags":[]},{"location":"#what-ctx-is-not","level":2,"title":"What ctx Is Not","text":"

    Avoid Category Errors

    Mislabeling ctx guarantees misuse.

    ctx is not a memory feature.

    • ctx is not prompt engineering.
    • ctx is not a productivity hack.
    • ctx is not automation theater.

    ctx is a system for preserving intent under scale.

    ctx is infrastructure.

    ","path":["Manifesto"],"tags":[]},{"location":"#verified-reality-is-the-scoreboard","level":2,"title":"Verified Reality Is the Scoreboard","text":"

    Activity Is a False Proxy

    Output volume correlates poorly with impact.

    • Code is not progress.
    • Activity is not impact.

    The only truth that compounds is verified change.

    Verified change must exist in the real world.

    Hypotheses are cheap; outcomes are not.

    ctx captures:

    • What we expected;
    • What we observed;
    • Where reality diverged.

    If we cannot predict, measure, and verify the result...

    ...it does not count.

    ","path":["Manifesto"],"tags":[]},{"location":"#build-to-learn-not-to-accumulate","level":2,"title":"Build to Learn, Not to Accumulate","text":"

    Prototypes Have an Expiration Date

    A prototype's value is information, not longevity.

    Prototypes exist to reduce uncertainty.

    We build to:

    • Test assumptions;
    • Validate architecture;
    • Answer specific questions.

    Not everything.

    Not blindly.

    Not permanently.

    ctx records archeology so the cost is paid once.

    ","path":["Manifesto"],"tags":[]},{"location":"#failures-are-assets","level":2,"title":"Failures Are Assets","text":"

    Failure without Capture Is Waste

    Pain that does not teach is pure loss.

    Failures are not erased: They are preserved.

    Each failure becomes:

    • A documented hypothesis;
    • An analyzed deviation;
    • A permanent artifact.

    Rollback fixes symptoms: ctx fixes systems.

    A repeated mistake is a missing ctx artifact.

    ","path":["Manifesto"],"tags":[]},{"location":"#structure-enables-scale","level":2,"title":"Structure Enables Scale","text":"

    Unbounded Autonomy Destabilizes

    Power without a structure produces chaos.

    Transpose it:

    Power without any structure becomes chaos.

    ctx defines:

    • Roles;
    • Boundaries;
    • Protocols;
    • Escalation paths;
    • Decision rights.

    Ambiguity is a system failure:

    • Debates must be structured.
    • Decisions must be explicit.
    • History must be retained.
    ","path":["Manifesto"],"tags":[]},{"location":"#encode-intent-into-the-environment","level":2,"title":"Encode Intent into the Environment","text":"

    Goodwill Does Not Belong to the Table

    Alignment that depends on memory will drift.

    Alignment cannot depend on memory or goodwill.

    Do not rely on people to remember.

    Encode the behavior, so it happens by default.

    Intent is encoded as:

    • Policies;
    • Schemas;
    • Constraints;
    • Evaluation harnesses.

    Rules must be machine-readable.

    Laws must be enforceable.

    If intent is implicit, drift is guaranteed.

    ","path":["Manifesto"],"tags":[]},{"location":"#cost-is-a-first-class-signal","level":2,"title":"Cost Is a First-Class Signal","text":"

    Attention Is the Scarcest Resource

    Not ideas.

    Not ambition.

    Ideas do not compete on time:

    They compete on cost and impact:

    • Attention is finite.
    • Compute is finite.
    • Context is expensive.

    We continuously ask:

    • What the most valuable next action is.
    • What outcome justifies the cost.

    ctx guides allocation.

    Learning reshapes priority.

    ","path":["Manifesto"],"tags":[]},{"location":"#show-the-why","level":2,"title":"Show the Why","text":"

    {} (code, artifacts, apps, binaries) produce outputs; they do not preserve reasoning.

    Systems that cannot explain themselves will not be trusted.

    Traceability builds trust.

         {} --> what\n\n    ctx --> why\n

    We record:

    • Explored paths;
    • Rejected options;
    • Assumptions made;
    • Evidence used.

    Opaque systems erode trust:

    Transparent ctx compounds understanding.

    ","path":["Manifesto"],"tags":[]},{"location":"#continuously-verify-the-system","level":2,"title":"Continuously Verify the System","text":"

    Stability Is Temporary

    Every assumption has a half-life:

    • Models drift.
    • Tools change.
    • Assumptions rot.

    ctx must be verified against reality.

    Trust is a spectrum.

    Trust is continuously re-earned:

    • Benchmarks,
    • regressions,
    • and evaluations...

    ...are safety rails.

    ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-leverage","level":2,"title":"ctx Is Leverage","text":"

    Humans Are Decision Engines

    Execution should not consume judgment.

    Humans must not be typists.

    We are the authors.

    Human effort is reserved for:

    • Judgment;
    • Design;
    • Taste;
    • Synthesis.

    Repetition is delegated.

    Toil is automated.

    ctx preserves leverage across time.

    ","path":["Manifesto"],"tags":[]},{"location":"#the-thesis","level":2,"title":"The Thesis","text":"

    Invariant

    Everything else is an implementation detail.

    • Creation is the act.
    • ctx is the substrate.
    • Verification is the truth.

    Code executes → Models reason → Agents amplify.

    ctx lives on.

    • Without ctx, intelligence resets.
    • With ctx, creation compounds.
    ","path":["Manifesto"],"tags":[]},{"location":"blog/","level":1,"title":"Blog","text":"

    Stories, insights, and lessons learned from building and using ctx.

    ","path":["Blog"],"tags":[]},{"location":"blog/#releases","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v080-the-architecture-release","level":3,"title":"ctx v0.8.0: The Architecture Release","text":"

    March 23, 2026: 374 commits, 1,708 Go files touched, and a near-complete architectural overhaul. Every CLI package restructured into cmd/ + core/ taxonomy, all user-facing strings externalized to YAML, MCP server for tool-agnostic AI integration, and the memory bridge connecting Claude Code's auto-memory to .context/.

    Topics: release, architecture, refactoring, MCP, localization

    ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes","level":2,"title":"Field Notes","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-watermelon-rind-anti-pattern-why-smarter-tools-make-shallower-agents","level":3,"title":"The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents","text":"

    April 6, 2026: Give an agent a graph query tool, and it produces output that's structurally correct but substantively hollow (the watermelon-rind antipattern: We ran three sessions analyzing the same codebase with different tool access: the one with no tools produced 5.2x more depth. The fix: a two-pass compiler for architecture understanding: force code reading first, verify with tools second. Constraint is the feature.

    Topics: architecture, code intelligence, agent behavior, design patterns, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#code-structure-as-an-agent-interface-what-19-ast-tests-taught-us","level":3,"title":"Code Structure as an Agent Interface: What 19 AST Tests Taught Us","text":"

    April 2, 2026: We built 19 AST-based audit tests in a single session, touching 300+ files. In the process we discovered that \"old-school\" code quality constraints (no magic numbers, centralized error handling, 80-char lines, documentation) are exactly the constraints that make code readable to AI agents. If an agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

    Topics: ast, code quality, agent readability, conventions, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#we-broke-the-31-rule","level":3,"title":"We Broke the 3:1 Rule","text":"

    March 23, 2026: After v0.6.0, we ran 198 feature commits across 17 days before consolidating. The 3:1 rule says consolidate every 4th session. We did it after the 66th. The result: an 18-day, 181-commit cleanup marathon that took longer than the feature run itself. A follow-up to The 3:1 Ratio with empirical evidence from the v0.8.0 cycle.

    Topics: consolidation, technical debt, development workflow, convention drift, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#context-engineering","level":2,"title":"Context Engineering","text":"","path":["Blog"],"tags":[]},{"location":"blog/#agent-memory-is-infrastructure","level":3,"title":"Agent Memory Is Infrastructure","text":"

    March 4, 2026: Every AI coding agent starts fresh. The obvious fix is \"memory.\" But there's a different problem memory doesn't touch: the project itself accumulates knowledge that has nothing to do with any single session. This post argues that agent memory is L2 (runtime cache); what's missing is L3 (project infrastructure).

    Topics: context engineering, agent memory, infrastructure, persistence, team knowledge

    ","path":["Blog"],"tags":[]},{"location":"blog/#context-as-infrastructure","level":3,"title":"Context as Infrastructure","text":"

    February 17, 2026: Where does your AI's knowledge live between sessions? If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. This post argues for treating it as infrastructure instead: persistent files, separation of concerns, two-tier storage, progressive disclosure, and the filesystem as the most mature interface available.

    Topics: context engineering, infrastructure, progressive disclosure, persistence, design philosophy

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-attention-budget-why-your-ai-forgets-what-you-just-told-it","level":3,"title":"The Attention Budget: Why Your AI Forgets What You Just Told It","text":"

    February 3, 2026: Every token you send to an AI consumes a finite resource: the attention budget. Understanding this constraint shaped every design decision in ctx: hierarchical file structure, explicit budgets, progressive disclosure, and filesystem-as-index.

    Topics: attention mechanics, context engineering, progressive disclosure, ctx primitives, token budgets

    ","path":["Blog"],"tags":[]},{"location":"blog/#before-context-windows-we-had-bouncers","level":3,"title":"Before Context Windows, We Had Bouncers","text":"

    February 14, 2026: IRC is stateless. You disconnect, you vanish. Modern systems are not much different. This post traces the line from IRC bouncers to context engineering: stateless protocols require stateful wrappers, volatile interfaces require durable memory.

    Topics: context engineering, infrastructure, IRC, persistence, state continuity

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-last-question","level":3,"title":"The Last Question","text":"

    February 28, 2026: In 1956, Asimov wrote a story about a question that spans the entire future of the universe. A reading of \"The Last Question\" through the lens of persistence, substrate migration, and what it means to build systems where sessions don't reset.

    Topics: context continuity, long-lived systems, persistence, intelligence over time, field notes

    ","path":["Blog"],"tags":[]},{"location":"blog/#agent-behavior-and-design","level":2,"title":"Agent Behavior and Design","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-dog-ate-my-homework-teaching-ai-agents-to-read-before-they-write","level":3,"title":"The Dog Ate My Homework: Teaching AI Agents to Read Before They Write","text":"

    February 25, 2026: You wrote the playbook. The agent skipped all of it. Five sessions, five failure modes, and the discovery that observable compliance beats perfect compliance.

    Topics: hooks, agent behavior, context engineering, behavioral design, testing methodology, compliance monitoring

    ","path":["Blog"],"tags":[]},{"location":"blog/#skills-that-fight-the-platform","level":3,"title":"Skills That Fight the Platform","text":"

    February 4, 2026: When custom skills conflict with system prompt defaults, the AI has to reconcile contradictory instructions. Five conflict patterns discovered while building ctx.

    Topics: context engineering, skill design, system prompts, antipatterns, AI safety primitives

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-anatomy-of-a-skill-that-works","level":3,"title":"The Anatomy of a Skill That Works","text":"

    February 7, 2026: I had 20 skills. Most were well-intentioned stubs. Then I rewrote all of them. Seven lessons emerged: quality gates prevent premature execution, negative triggers are load-bearing, examples set boundaries better than rules.

    Topics: skill design, context engineering, quality gates, E/A/R framework, practical patterns

    ","path":["Blog"],"tags":[]},{"location":"blog/#you-cant-import-expertise","level":3,"title":"You Can't Import Expertise","text":"

    February 5, 2026: I found a well-crafted consolidation skill. Applied my own E/A/R framework: 70% was noise. This post is about why good skills can't be copy-pasted, and how to grow them from your project's own drift history.

    Topics: skill adaptation, E/A/R framework, convention drift, consolidation, project-specific expertise

    ","path":["Blog"],"tags":[]},{"location":"blog/#not-everything-is-a-skill","level":3,"title":"Not Everything Is a Skill","text":"

    February 8, 2026: I ran an 8-agent codebase audit and got actionable results. The natural instinct was to wrap the prompt as a skill. Then I applied my own criteria: it failed all three tests.

    Topics: skill design, context engineering, automation discipline, recipes, agent teams

    ","path":["Blog"],"tags":[]},{"location":"blog/#defense-in-depth-securing-ai-agents","level":3,"title":"Defense in Depth: Securing AI Agents","text":"

    February 9, 2026: The security advice was \"use CONSTITUTION.md for guardrails.\" That is wishful thinking. Five defense layers for unattended AI agents, each with a bypass, and why the strength is in the combination.

    Topics: agent security, defense in depth, prompt injection, autonomous loops, container isolation

    ","path":["Blog"],"tags":[]},{"location":"blog/#development-practice","level":2,"title":"Development Practice","text":"","path":["Blog"],"tags":[]},{"location":"blog/#code-is-cheap-judgment-is-not","level":3,"title":"Code Is Cheap. Judgment Is Not.","text":"

    February 17, 2026: AI does not replace workers. It replaces unstructured effort. Three weeks of building ctx with an AI agent proved it: YOLO mode showed production is cheap, the 3:1 ratio showed judgment has a cadence.

    Topics: AI and expertise, context engineering, judgment vs production, human-AI collaboration, automation discipline

    ","path":["Blog"],"tags":[]},{"location":"blog/#the-31-ratio","level":3,"title":"The 3:1 Ratio","text":"

    February 17, 2026: AI makes technical debt worse: not because it writes bad code, but because it writes code so fast that drift accumulates before you notice. Three feature sessions, one consolidation session.

    Topics: consolidation, technical debt, development workflow, convention drift, code quality

    ","path":["Blog"],"tags":[]},{"location":"blog/#refactoring-with-intent-human-guided-sessions-in-ai-development","level":3,"title":"Refactoring with Intent: Human-Guided Sessions in AI Development","text":"

    February 1, 2026: The YOLO mode shipped 14 commands in a week. But technical debt doesn't send invoices. This is the story of what happened when we started guiding the AI with intent.

    Topics: refactoring, code quality, documentation standards, module decomposition, YOLO versus intentional development

    ","path":["Blog"],"tags":[]},{"location":"blog/#how-deep-is-too-deep","level":3,"title":"How Deep Is Too Deep?","text":"

    February 12, 2026: I kept feeling like I should go deeper into ML theory. Then I spent a week debugging an agent failure that had nothing to do with model architecture. When depth compounds and when it doesn't.

    Topics: AI foundations, abstraction boundaries, agentic systems, context engineering, failure modes

    ","path":["Blog"],"tags":[]},{"location":"blog/#agent-workflows","level":2,"title":"Agent Workflows","text":"","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-merge-debt-and-the-myth-of-overnight-progress","level":3,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"

    February 17, 2026: You discover agents can run in parallel. So you open ten terminals. It is not progress: it is merge debt being manufactured in real time. The five-agent ceiling and why role separation beats file locking.

    Topics: agent workflows, parallelism, verification, context engineering, engineering practice

    ","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-with-git-worktrees","level":3,"title":"Parallel Agents with Git Worktrees","text":"

    February 14, 2026: I had 30 open tasks that didn't touch the same files. Using git worktrees to partition a backlog by file overlap, run 3-4 agents simultaneously, and merge the results.

    Topics: agent teams, parallelism, git worktrees, context engineering, task management

    ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes-and-signals","level":2,"title":"Field Notes and Signals","text":"","path":["Blog"],"tags":[]},{"location":"blog/#when-a-system-starts-explaining-itself","level":3,"title":"When a System Starts Explaining Itself","text":"

    February 17, 2026: Every new substrate begins as a private advantage. Reality begins when other people start describing it in their own language. \"Better than Adderall\" is not praise; it is a diagnostic.

    Topics: field notes, adoption signals, infrastructure vs tools, context engineering, substrates

    ","path":["Blog"],"tags":[]},{"location":"blog/#why-zensical","level":3,"title":"Why Zensical","text":"

    February 15, 2026: I needed a static site generator for the journal system. The instinct was Hugo. But instinct is not analysis. Why zensical was the right choice: thin dependencies, MkDocs-compatible config, and zero lock-in.

    Topics: tooling, static site generators, journal system, infrastructure decisions, context engineering

    ","path":["Blog"],"tags":[]},{"location":"blog/#releases_1","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v060-the-integration-release","level":3,"title":"ctx v0.6.0: The Integration Release","text":"

    February 16, 2026: ctx is now a Claude Marketplace plugin. Two commands, no build step, no shell scripts. v0.6.0 replaces six Bash hook scripts with compiled Go subcommands and ships 25+ Skills as a plugin.

    Topics: release, plugin system, Claude Marketplace, distribution, security hardening

    ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v030-the-discipline-release","level":3,"title":"ctx v0.3.0: The Discipline Release","text":"

    February 15, 2026: No new headline feature. Just 35+ documentation and quality commits against ~15 feature commits. What a release looks like when the ratio of polish to features is 3:1.

    Topics: release, skills migration, consolidation, code quality, E/A/R framework

    ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v020-the-archaeology-release","level":3,"title":"ctx v0.2.0: The Archaeology Release","text":"

    February 1, 2026: What if your AI could remember everything? Not just the current session, but every session. ctx v0.2.0 introduces the recall and journal systems.

    Topics: session recall, journal system, structured entries, token budgets, meta-tools

    ","path":["Blog"],"tags":[]},{"location":"blog/#building-ctx-using-ctx-a-meta-experiment-in-ai-assisted-development","level":3,"title":"Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development","text":"

    January 27, 2026: What happens when you build a tool designed to give AI memory, using that very same tool to remember what you're building? This is the story of ctx.

    Topics: dogfooding, AI-assisted development, Ralph Loop, session persistence, architectural decisions

    ","path":["Blog"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/","level":1,"title":"Building ctx Using ctx","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    References to .context/sessions/, auto-save hooks, and SessionEnd auto-save in this post reflect the architecture at the time of writing.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#a-meta-experiment-in-ai-assisted-development","level":2,"title":"A Meta-Experiment in AI-Assisted Development","text":"

    Jose Alekhinne / 2026-01-27

    Can a Tool Design Itself?

    What happens when you build a tool designed to give AI memory, using that very same tool to remember what you are building?

    This is the story of ctx, how it evolved from a hasty \"YOLO mode\" experiment to a disciplined system for persistent AI context, and what I have learned along the way.

    Context Is a Record

    Context is a persistent record.

    By \"context\", I don't mean model memory or stored thoughts:

    I mean the durable record of decisions, learnings, and intent that normally evaporates between sessions.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#ai-amnesia","level":2,"title":"AI Amnesia","text":"

    Every developer who works with AI code generators knows the frustration:

    You have a deep, productive session where the AI understands your codebase, your conventions, your decisions. And then you close the terminal.

    Tomorrow; it's a blank slate. The AI has forgotten everything.

    That is \"reset amnesia\", and it's not just annoying: it's expensive.

    Every session starts with:

    • Re-explaining context;
    • Re-reading files;
    • Re-discovering decisions that were already made.

    I Needed Context

    \"I don't want to lose this discussion...

    ...I am a brain-dead developer YOLO'ing my way out.\"

    ☝️ that's exactly what I said to Claude when I first started working on ctx.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-genesis","level":2,"title":"The Genesis","text":"

    The project started as \"Active Memory\" (amem): a CLI tool to persist AI context across sessions.

    The core idea was simple:

    1. Create a .context/ directory with structured Markdown files for decisions, learnings, tasks, and conventions.
    2. The AI reads these at session start and writes to them before the session ends.
    3. There is no step 3.

    The first commit was just scaffolding. But within hours, the Ralph Loop (An iterative AI development workflow) had produced a working CLI:

    feat(cli): implement amem init command\nfeat(cli): implement amem status command\nfeat(cli): implement amem add command\nfeat(cli): implement amem agent command\n...\n

    Not one, not two, but a whopping fourteen core commands shipped in rapid succession!

    I was YOLO'ing like there was no tomorrow:

    • Auto-accept every change;
    • Let the AI run free;
    • Ship features fast.
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-meta-experiment-using-amem-to-build-amem","level":2,"title":"The Meta-Experiment: Using amem to Build amem","text":"

    Here's where it gets interesting: On January 20th, I asked:

    \"Can I use amem to help you remember this context when I restart?\"

    The answer was yes, but with a gap:

    Autoload worked (via Claude Code's PreToolUse hook), but auto-save was missing: If the user quit, with Ctrl+C, everything since the last manual save was lost.

    That session became the first real test of the system.

    Here is the first session file we recorded:

    ## Key Discussion Points\n\n### 1. amem vs Ralph Loop - They're Separate Systems\n\n**User's question**: \"How do I use the binary to recreate this project?\"\n\n**Answer discovered**: `amem` is for context management, Ralph Loop is for \ndevelopment workflow. They are complementary but separate.\n\n### 2. Two Tiers of Context Persistence\n\n| Tier      | What                        | Why                           |\n|-----------|-----------------------------|-------------------------------|\n| Curated   | Learnings, decisions, tasks | Quick reload, token-efficient |\n| Full dump | Entire conversation         | Safety net, nothing lost      |\n\n| Where                  |\n|------------------------|\n| .context/*.md          |\n| .context/sessions/*.md |\n

    This session file (written by the AI to preserve its own context) became the template for how ctx handles session persistence.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-rename","level":2,"title":"The Rename","text":"

    By January 21st, I realized \"Active Memory\" was too generic, and (arguably) too marketing-smelly.

    Besides, the binary was already called ctx (short for Context), the directory was .context/, and the slash commands would be /ctx-*.

    So it followed that the project should be renamed to ctx to make things make sense.

    The rename touched 100+ files but was clean: a find-and-replace with Go's type system catching any misses.

    The git history tells the story:

    0e8f6bb feat: rename amem to ctx and add Claude Code integration\n87dcfa1 README.\n4f0e195 feat: separate orchestrator directive from agent tasks\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#yolo-mode-fast-but-dangerous","level":2,"title":"YOLO Mode: Fast, but Dangerous","text":"

    The Ralph Loop made feature development incredibly fast.

    But it created technical debt that I didn't notice until later.

    A comparison session on January 25th revealed the patterns:

    YOLO Pattern What We Found \"TASKS.md\" scattered in 10 files Same string literal everywhere, no constants dir + \"/\" + file Should be filepath.Join() Monolithic cli_test.go (1500+ lines) Tests disconnected from implementations package initcmd in init/ folder Go's \"init\" is reserved: subtle naming collision

    Here is another analysis made by ctx:

    ● Based on my analysis, here are the key differences:\n\nYOLO Mode (Pre-040ce99)\n- Feature-first: Added slash commands, tests, templates rapidly\n- Scattered magic strings: \"TASKS.md\", \"decision\", \".context\" spread across files\n- Quick file creation: New files without organizational patterns\n- Working but inconsistent: Code functioned but lacked systematic structure\n\nHuman-Guided Mode (Post-040ce99)\n- Consolidation focus: Centralized constants in config package\n- Consistent naming: Dir, File, Filename, UpdateType prefixes\n- Self-referential constants: FileType map uses constants as keys, not literals\n- Proper path construction: filepath.Join() instead of +\"/\"+\n- Colocated tests: Tests next to implementations\n- Canonical naming: Package name = folder name\n

    The fix required a human-guided refactoring session. I continued to do that before every major release, from that point on.

    We introduced internal/config/config.go with semantic prefixes:

    const (\n    DirContext     = \".context\"\n    DirArchive     = \"archive\"\n    DirSessions    = \"sessions\"\n    FilenameTask   = \"TASKS.md\"\n    UpdateTypeTask = \"task\"\n)\n

    What I begrudgingly learned was: YOLO mode is effective for velocity but accumulates debt.

    So I took a mental note to schedule periodic consolidation sessions.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-dogfooding-test-that-failed","level":2,"title":"The Dogfooding Test That Failed","text":"

    On January 21st, I ran an experiment: have another Claude instance rebuild ctx from scratch using only the specs and PROMPT.md.

    The Ralph Loop ran, all tasks got checked off, the loop exited successfully.

    But the binary was broken!

    Commands just printed help text instead of executing.

    All tasks were marked \"complete\" but the implementation didn't work.

    Here's what ctx discovered:

    ## Key Findings\n\n### Dogfooding Binary Is Broken\n- Commands don't execute: they just print root help text\n- All tasks were marked complete but binary doesn't work\n- Lesson: \"tasks checked off\" ≠ \"implementation works\"\n

    This was humbling; to say the least.

    I realized I had the same blind spot in my own codebase: no integration tests that actually invoked the binary.

    So I added:

    • Integration tests for all commands;
    • Coverage targets (60-80% per package)
    • Smoke tests in CI
    • A constitution rule: \"All code must pass tests before commit\"
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-constitution-versus-conventions","level":2,"title":"The Constitution versus Conventions","text":"

    As lessons accumulated, there was the temptation to add everything to CONSTITUTION.md as \"inviolable rules\".

    But I resisted.

    The constitution should contain only truly inviolable invariants:

    • Security (no secrets, no customer data)
    • Quality (tests must pass)
    • Process (decisions need records)
    • ctx invocation (always use PATH, never fallback)

    Everything else (coding style, file organization, naming conventions...) should go in to CONVENTIONS.md.

    Here's how ctx explained why the distinction was important:

    Decision Record, 2026-01-25

    Overly strict constitution creates friction and gets ignored.

    Conventions can be bent; constitution cannot.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#hooks-harder-than-they-look","level":2,"title":"Hooks: Harder than They Look","text":"

    Claude Code hooks seemed simple: Run a script before/after certain events.

    But I hit multiple gotchas:

    1. Key names matter

    // WRONG - \"Invalid key in record\" error\n\"PreToolUseHooks\": [...]\n\n// RIGHT\n\"PreToolUse\": [...]\n

    2. Blocking requires specific output

    # WRONG - just exits, doesn't block\nexit 1\n\n# RIGHT - JSON output + exit 0\necho '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH\"}'\nexit 0\n

    3. Go's JSON escaping

    json.Marshal escapes >, <, & as unicode (\\u003e) by default.

    When generating shell commands in JSON:

    encoder := json.NewEncoder(file)\nencoder.SetEscapeHTML(false) // Prevent 2>/dev/null → 2\\u003e/dev/null\n

    4. Regex overfitting

    My hook to block non-PATH ctx invocations initially matched too broadly:

    # WRONG - matches /home/user/ctx/internal/file.go (ctx as directory)\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# RIGHT - matches ctx as binary only\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-session-files","level":2,"title":"The Session Files","text":"

    By the time of this writing this project's ctx sessions (.context/sessions/) contains 40+ files from this project's development.

    They are not part of the source code due to security, privacy, and size concerns.

    Middle Ground: The Scratchpad

    For sensitive notes that do need to travel with the project, ctx pad stores encrypted one-liners in git, and ctx pad add \"label\" --file PATH can ingest small files.

    See Scratchpad for details.

    However, they are invaluable for the project's progress.

    Each session file is a timestamped Markdown with:

    • Summary of what has been accomplished;
    • Key decisions made;
    • Learnings discovered;
    • Tasks for the next session;
    • Technical context (platform, versions).

    These files are not autoloaded (that would bust the token budget).

    They are what I see as the \"archaeological record\" of ctx:

    When the AI needs deeper information about why something was done, it digs into the sessions.

    Auto-generated session files used a naming convention:

    2026-01-23-115432-session-prompt_input_exit-summary.md\n2026-01-25-220244-manual-save.md\n2026-01-27-052107-session-other-summary.md\n

    Update

    The session feature described here is historical.

    In current releases, ctx uses a journal instead: the enrichment process generates meaningful slugs from context automatically, so there is no need to manually save sessions.

    The SessionEnd hook captured transcripts automatically. Even Ctrl+C was caught.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-decision-log-18-architectural-decisions","level":2,"title":"The Decision Log: 18 Architectural Decisions","text":"

    ctx helps record every significant architectural choice in .context/DECISIONS.md.

    Here are some highlights:

    Reverse-chronological order (2026-01-27)

    **Context**: With chronological order, oldest items consume tokens first, and\nnewest (most relevant) items risk being truncated.\n\n**Decision**: Use reverse-chronological order (newest first) for DECISIONS.md\nand LEARNINGS.md.\n

    PATH over hardcoded paths (2026-01-21)

    **Context**: Original implementation hardcoded absolute paths in hooks.\nThis breaks when sharing configs with other developers.\n\n**Decision**: Hooks use `ctx` from PATH. `ctx init` checks PATH before \nproceeding.\n

    Generic core with Claude enhancements (2026-01-20)

    **Context**: ctx should work with any AI tool, but Claude Code users could\nbenefit from deeper integration.\n\n**Decision**: Keep ctx generic as the core tool, but provide optional\nClaude Code-specific enhancements.\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-learning-log-24-gotchas-and-insights","level":2,"title":"The Learning Log: 24 Gotchas and Insights","text":"

    The .context/LEARNINGS.md file captures gotchas that would otherwise be forgotten. Each has Context, Lesson, and Application sections:

    CGO on ARM64

    **Context**: `go test` failed with \n`gcc: error: unrecognized command-line option '-m64'`\n\n**Lesson**: On ARM64 Linux, CGO causes cross-compilation issues. \nAlways use `CGO_ENABLED=0`.\n

    Claude Code skills format

    **Lesson**: Claude Code skills are Markdown files in .claude/commands/ with `YAML`\nfrontmatter (*description, argument-hint, allowed-tools*). Body is the prompt.\n

    \"Do you remember?\" handling

    **Lesson**: In a `ctx`-enabled project, \"*do you remember?*\" \nhas an obvious meaning:\ncheck the `.context/` files. Don't ask for clarification. Just do it.\n
    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#task-archives-the-completed-work","level":2,"title":"Task Archives: The Completed Work","text":"

    Completed tasks are archived to .context/archive/ with timestamps.

    The archive from January 23rd shows 13 phases of work:

    • Phase 1: Project Scaffolding (Go module, Cobra CLI)
    • Phase 2-4: Core Commands (init, status, agent, add, complete, drift, sync, compact, watch, hook)
    • Phase 5: Session Management (save, list, load, parse, --extract)
    • Phase 6: Claude Code Integration (hooks, settings, CLAUDE.md handling)
    • Phase 7: Testing & Verification
    • Phase 8: Task Archival
    • Phase 9: Slash Commands
    • Phase 9b: Ralph Loop Integration
    • Phase 10: Project Rename
    • Phase 11: Documentation
    • Phase 12: Timestamp Correlation
    • Phase 13: Rich Context Entries

    That's an impressive ^^173 commits** across 8 days of development.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#what-i-learned-about-ai-assisted-development","level":2,"title":"What I Learned about AI-Assisted Development","text":"

    1. Memory changes everything

    When the AI remembers decisions, it doesn't repeat mistakes.

    When the AI knows your conventions, it follows them.

    ctx makes the AI a better collaborator because it's not starting from zero.

    2. Two-tier persistence works

    Curated context (DECISIONS.md, LEARNINGS.md, TASKS.md) is for quick reload.

    Full session dumps are for archaeology.

    It's a futile effort to try to fit everything in the token budget.

    Persist more, load less.

    3. YOLO mode has its place

    For rapid prototyping, letting the AI run free is effective.

    But I had to schedule consolidation sessions.

    Technical debt accumulates silently.

    4. The constitution should be small

    Only truly inviolable rules go in CONSTITUTION.md. Everything else is a convention.

    If you put too much in the constitution, it will get ignored.

    5. Verification is non-negotiable

    \"All tasks complete\" means nothing if you haven't run the tests.

    Integration tests that invoke the actual binary caught bugs that the unit tests missed.

    6. Session files are underrated

    The ability to grep through 40 session files and find exactly when and why a decision was made helped me a lot.

    It's not about loading them into context: It is about having them when you need them.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-future-recall-system","level":2,"title":"The Future: Recall System","text":"

    The next phase of ctx is the Recall System:

    • Parser: Parse session capture markdowns, enrich with JSONL data
    • Renderer: Goldmark + Chroma for syntax highlighting, dark mode UI
    • Server: Local HTTP server for browsing sessions
    • Search: Inverted index for searching across sessions
    • CLI: ctx recall serve <path> to start the server

    The goal is to make the archaeological record browsable, not just grep-able.

    Because not everyone always lives in the terminal (me included).

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#conclusion","level":2,"title":"Conclusion","text":"

    Building ctx using ctx was a meta-experiment in AI-assisted development.

    I learned that memory isn't just convenient: It's transformative:

    • An AI that remembers your decisions doesn't repeat mistakes.
    • An AI that knows your conventions doesn't need them re-explained.

    If you are reading this, chances are that you already have heard about ctx.

    • ctx is open source at github.com/ActiveMemory/ctx,
    • and the documentation lives at ctx.ist.

    Session Records Are a Gold Mine

    By the time of this writing, I have more than 70 megabytes of text-only session capture, spread across >100 Markdown and JSONL files.

    I am analyzing, synthesizing, encriching them with AI, running RAG (Retrieval-Augmented Generation) models on them, and the outcome surprises me every day.

    If you are a mere mortal tired of reset amnesia, give ctx a try.

    And when you do, check .context/sessions/ sometime.

    The archaeological record might surprise you.

    This blog post was written with the help of ctx with full access to the ctx session files, decision log, learning log, task archives, and git history of ctx: The meta continues.

    ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/","level":1,"title":"ctx v0.2.0: The Archaeology Release","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    The .context/sessions/ directory referenced in this post has been eliminated. Session history is now accessed via ctx recall and enriched journals live in .context/journal/.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#digging-through-the-past-to-build-the-future","level":2,"title":"Digging through the Past to Build the Future","text":"

    Jose Alekhinne / 2026-02-01

    What If Your AI Could Remember Everything?

    Not just the current session, but every session:

    • Every decision made,
    • every mistake avoided,
    • every path not taken.

    That's what v0.2.0 delivers.

    Between v0.1.2 and v0.2.0, 86 commits landed across 5 days.

    The release notes list features and fixes.

    This post tells the story of why those features exist, and what building them taught me.

    This isn't a changelog: It is an explanation of intent.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-problem-amnesia-isnt-just-session-level","level":2,"title":"The Problem: Amnesia Isn't Just Session-Level","text":"

    v0.1.0 solved reset amnesia:

    The AI now remembers decisions, learnings, and tasks across sessions.

    But a new problem emerged, which I can sum up as:

    \"I (the human) am not AI.\"

    Frankly, I couldn't remember what the AI remembered.

    Let alone, I cannot remember what I ate for breakfast!

    In the course of days, I realized session transcripts piled up in .context/sessions/; I was grepping, JSONL files with thousands of lines... Raw tool calls, assistant responses, user messages...

    ...all interleaved.

    Valuable context was effectively buried in machine-readable noise.

    I found myself grepping through files to answer questions like:

    • \"When did we decide to use constants instead of literals?\"
    • \"What was the session where we fixed the hook regex?\"
    • \"How did the embed.go split actually happen?\"

    Fate Is Whimsical

    The irony was painful:

    I built a tool to prevent AI amnesia, but I was suffering from human amnesia about what happened in AI sessions.

    This was the moment ctx stopped being just an AI tool and started needing to support the human on the other side of the loop.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-solution-recall-and-journal","level":2,"title":"The Solution: Recall and Journal","text":"

    v0.2.0 introduces two interconnected systems.

    They solve different problems and only work well together.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-recall-browse-your-past","level":3,"title":"ctx recall: Browse Your Past","text":"
    # List all sessions for this project\nctx recall list\n\n# Show a specific session\nctx recall show gleaming-wobbling-sutherland\n\n# See the full transcript\nctx recall show gleaming-wobbling-sutherland --full\n

    The recall system parses Claude Code's JSONL transcripts and presents them in a human-readable format:

    Session Date Turns Duration tender-painting-sundae 2026-01-29 3 <1m crystalline-gliding-willow 2026-01-29 3 <1m declarative-hugging-snowglobe 2026-01-31 2 <1m

    Slugs are auto-generated from session IDs (memorable names instead of UUIDs). The goal (as the name implies) is recall, not archival accuracy.

    2,121 Lines of New Code

    The ctx recall feature was the largest single addition:

    parser library, CLI commands, test suite, and slash command.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-journal-from-raw-to-rich","level":3,"title":"ctx journal: From Raw to Rich","text":"

    Listing sessions isn't enough. The transcripts are still unwieldy.

    • Recall answers what happened.
    • Journal answers what mattered.
    # Import sessions to editable Markdown\nctx recall import --all\n\n# Generate a static site from journal entries\nctx journal site\n\n# Serve it locally\nctx serve\n

    The exported files land in .context/journal/:

    .context/journal/\n├── 2026-01-28-proud-sleeping-cook-6e535360.md\n├── 2026-01-29-tender-painting-sundae-b14ddaaa.md\n├── 2026-01-29-crystalline-gliding-willow-ff7fd67d.md\n└── 2026-01-31-declarative-hugging-snowglobe-4549026d.md\n

    Each file is a structured Markdown document ready for enrichment.

    They are meant to be read, edited, and reasoned about; not just stored.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-meta-slash-commands-for-self-analysis","level":2,"title":"The Meta: Slash Commands for Self-Analysis","text":"

    The journal system includes four slash commands that use Claude to analyze and synthesize session history:

    Command Purpose /ctx-journal-enrich Add frontmatter, topics, tags /ctx-blog Generate blog post from activity /ctx-blog-changelog Generate changelog from commits

    This very post was drafted using /ctx-blog. The previous post about refactoring was drafted the same way.

    So, yes: The meta continues: ctx now helps write posts about ctx.

    With the current release, ctx is no longer just recording history:

    It is participating in its interpretation.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-structure-decisions-as-first-class-citizens","level":2,"title":"The Structure: Decisions as First-Class Citizens","text":"

    v0.1.0 let you add decisions with a simple command:

    ctx add decision \"Use PostgreSQL\"\n

    But sessions showed a pattern: decisions added this way were incomplete:

    • Context was missing;
    • Rationale was vague;
    • Consequences were never stated.

    Once recall and journaling existed, this weakness became impossible to ignore:

    Structure stopped being optional.

    v0.2.0 enforces structure:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity, strong ecosystem\" \\\n  --consequence \"Need to set up connection pooling, team training\"\n

    All three flags are required. No more placeholder text.

    Every decision is now a proper Architecture Decision Record (*ADR), not a note.

    The same enforcement applies to learnings too:

    ctx add learning \"CGO breaks ARM64 builds\" \\\n  --context \"go test failed with gcc errors on ARM64\" \\\n  --lesson \"Always use CGO_ENABLED=0 for cross-platform builds\" \\\n  --application \"Added to Makefile and CI config\"\n

    Structured Entries Are Prompts to the AI

    When the AI reads a decision with full context, rationale, and consequences, it understands the why, not just the what.

    One-liners teach nothing.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-order-newest-first","level":2,"title":"The Order: Newest First","text":"

    A subtle but important change: DECISIONS.md and LEARNINGS.md now use reverse-chronological order.

    One reason is token budgets, obviously; another reason is to help your fellow human (i.e., the Author):

    Earlier decisions are more likely to be relevant, and they are more likely to have more emphasis on the project. So it follows that they should be read first.

    But back to AI:

    When the AI reads a file, it reads from the top (and seldom from the bottom).

    If the token budget is tight, old content gets truncated. As in any good engineering practice, it's always about the tradeoffs.

    Reverse order ensures the most recent (and most relevant) context is always loaded first.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-index-quick-reference-tables","level":2,"title":"The Index: Quick Reference Tables","text":"

    DECISIONS.md and LEARNINGS.md now include auto-generated indexes.

    • For AI agents, the index allows scanning without reading full entries.
    • For humans, it's a table of contents.

    The same structure serves two very different readers.

    Reindex After Manual Edits

    If you edit entries by hand, rebuild the index with:

    ctx decisions reindex\nctx learnings reindex\n

    See the Knowledge Capture recipe for details.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-configuration-contextrc","level":2,"title":"The Configuration: .contextrc","text":"

    Projects can now customize ctx behavior via .contextrc.

    This makes ctx usable in real teams, not just personal projects.

    Priority order: CLI flags > environment variables > .contextrc > sensible defaults

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-flags-global-cli-options","level":2,"title":"The Flags: Global CLI Options","text":"

    Three new global flags work with any command.

    These enable automation:

    CI pipelines, scripts, and long-running tools can now integrate ctx without hacks or workarounds.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-refactoring-under-the-hood","level":2,"title":"The Refactoring: Under the Hood","text":"

    These aren't user-visible changes.

    They are the kind of work you only appreciate later, when everything else becomes easier to build.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#what-we-learned-building-v020","level":2,"title":"What We Learned Building v0.2.0","text":"","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#1-raw-data-isnt-knowledge","level":3,"title":"1. Raw Data Isn't Knowledge","text":"

    JSONL transcripts contain everything, and I mean \"everything\":

    They even contain hidden system messages that Anthropic injects to the LLM's conversation to treat humans better: It's immense.

    But \"everything\" isn't useful until it is transformed into something a human can reason about.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#2-enforcement-documentation","level":3,"title":"2. Enforcement > Documentation","text":"

    The Prompt Is a Guideline

    The code is more what you'd call 'guidelines' than actual rules.

    -Hector Barbossa

    Rules written in Markdown are suggestions.

    Rules enforced by the CLI shape behavior; both for humans and AI.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#3-token-budget-is-ux","level":3,"title":"3. Token Budget Is UX","text":"

    File order decides what the AI sees.

    That makes it a user experience concern, not an implementation detail.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#4-meta-tools-compound","level":3,"title":"4. Meta-Tools Compound","text":"

    Tools that analyze their own development tend to generalize well.

    The journal system started as a way to understand ctx itself.

    It immediately became useful for everything else.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#v020-in-the-numbers","level":2,"title":"v0.2.0 in the Numbers","text":"

    This was a heavy release. The numbers reflect that:

    Metric v0.1.2 v0.2.0 Commits since last - 86 New commands 15 21 Slash commands 7 11 Lines of Go ~6,500 ~9,200 Session files (this project) 40 54

    The binary grew. The capability grew more.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#whats-next","level":2,"title":"What's Next","text":"

    But those are future posts.

    This one was about making the past usable.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#get-started","level":2,"title":"Get Started","text":"

    Update

    Since this post, ctx became a first-class Claude Code Marketplace plugin. Installation is now simpler.

    See the Getting Started guide for the current instructions.

    make build\nsudo make install\nctx init\n

    The Archaeological Record

    v0.2.0 is the archaeology release because it makes the past accessible.

    Session transcripts aren't just logs anymore: They are a searchable, exportable, analyzable record of how your project evolved.

    The AI remembers. Now you can too.

    This blog post was generated with the help of ctx using the /ctx-blog slash command, with full access to git history, session files, decision logs, and learning logs from the v0.2.0 development window.

    ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/","level":1,"title":"Refactoring with Intent","text":"","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#human-guided-sessions-in-ai-development","level":2,"title":"Human-Guided Sessions in AI Development","text":"

    Jose Alekhinne / 2026-02-01

    What Happens When You Slow Down?

    YOLO mode shipped 14 commands in a week.

    But technical debt doesn't send invoices: It just waits.

    This is the story of what happened when I stopped auto-accepting everything and started guiding the AI with intent.

    The result: 27 commits across 4 days, a major version release, and lessons that apply far beyond ctx.

    The Refactoring Window

    January 28 - February 1, 2026

    From commit bb1cd20 to the v0.2.0 release merge. (this window matters more than the individual commits: it's where intent replaced velocity.)

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-velocity-trap","level":2,"title":"The Velocity Trap","text":"

    In the previous post, I documented the \"YOLO mode\" that birthed ctx: auto-accept everything, let the AI run free, ship features fast.

    It worked: until it didn't.

    The codebase had accumulated patterns I didn't notice during the sprint:

    YOLO Pattern Where Found Why It Hurts \"TASKS.md\" as literal 10+ files One typo = silent failure dir + \"/\" + file Path construction Breaks on Windows Monolithic embed.go 150+ lines, 5 concerns Untestable, hard to extend Inconsistent docstrings Everywhere AI can't learn project conventions

    I didn't see these during \"YOLO mode\" because, honestly, I wasn't looking.

    Auto-accept means auto-ignore.

    In YOLO mode, every file you open looks fine until you try to change it.

    In contrast, refactoring mode is when you start paying attention to that hidden friction.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-shift-from-velocity-to-intent","level":2,"title":"The Shift: From Velocity to Intent","text":"

    On January 28th, I changed the workflow:

    1. Read every diff before accepting.
    2. Ask \"why this way?\" before committing.
    3. Document patterns, not just features.

    The first commit of this era was telling:

    feat: add structured attributes to context. update XML format\n

    Not a new feature: A refinement:

    The XML format for context updates needed type and timestamp attributes.

    YOLO mode would have shipped something that worked. Intentional mode asked:

    \"What does well-structured look like?\"

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-decomposition-embedgo","level":2,"title":"The Decomposition: embed.go","text":"

    The most satisfying refactor was splitting internal/claude/embed.go.

    Before: One 153-line file doing five things:

    • Command registration
    • Hook generation
    • Permission handling
    • Script templates
    • Type definitions

    ... your \"de facto\" God object.

    After: Five focused modules:

    File Lines Responsibility cmd.go 46 Command registration hook.go 64 Hook configuration perm.go 25 Permission handling script.go 47 Script templates types.go 7 Type definitions

    The refactor also renamed functions to follow Go conventions:

    // Before: unnecessary prefixes\nGetAutoSaveScript()\nGetBlockNonPathCtxScript()\nListCommands()\nCreateDefaultHooks()\n\n// After: idiomatic Go\nAutoSaveScript()\nBlockNonPathCtxScript()\nCommands()\nDefaultHooks()\n

    This wasn't about character count. It was about teaching the AI what good Go looks like in this project.

    Project Conventions

    What I wanted from AI was to understand and follow the project's conventions, and trust the author.

    The next time it generates code, it has better examples to learn from.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-documentation-debt","level":2,"title":"The Documentation Debt","text":"

    YOLO mode created features. It didn't create documentation standards.

    The January 29th sessions focused on standardization.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#terminology-fixes","level":3,"title":"Terminology Fixes","text":"
    • \"context-update\" → \"entry\" (what users actually call them)
    • Consistent naming across CLI, docs, and code comments
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#go-docstrings","level":3,"title":"Go Docstrings","text":"
    // Before: inconsistent or missing\nfunc Parse(s string) Entry { ... }\n\n// After: standardized sections\n\n// Parse extracts an entry from a markdown string.\n//\n// Parameters:\n//   - s: The markdown string to parse\n//\n// Returns:\n//   - Entry with populated fields, or zero value if parsing fails\nfunc Parse(s string) Entry { ... }\n

    This is intentionally more structured than typical GoDoc:

    It serves as documentation and doubles as training data for future AI-generated code.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#cli-output-convention","level":3,"title":"CLI Output Convention","text":"
    All CLI output follows: [emoji] [Title]: [message]\n\nExamples:\n  ✓ Decision added: Use symbolic types for entry categories\n  ⚠ Warning: No tasks found\n  ✗ Error: File not found\n

    A consistent output shape makes both human scanning and AI reasoning more reliable.

    These aren't exciting commits. But they are force multipliers:

    Every future AI session now has better examples to follow.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-journal-system","level":2,"title":"The Journal System","text":"

    If you only read one section, read this one:

    This is where v0.2.0 becomes more than a refactor.

    The biggest feature of this change window wasn't a refactor; it was the journal system.

    45 Files Changed, 1680 Insertions

    This commit added the infrastructure for synthesizing AI session history into human-readable content.

    The journal system includes:

    Component Purpose ctx recall import Import sessions to markdown in .context/journal/ctx journal site Generate static site from journal entries ctx serve Convenience wrapper for the static site server /ctx-journal-enrich Slash command to add frontmatter and tags /ctx-blog Generate blog posts from recent activity /ctx-blog-changelog Generate changelog-style blog posts

    ...and the meta continues: this blog post was generated using /ctx-blog.

    The session history from January 28-31 was

    • exported,
    • enriched,
    • and synthesized.

    into the narrative you are reading.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-constants-consolidation","level":2,"title":"The Constants Consolidation","text":"

    The final refactoring session addressed the remaining magic strings:

    const (\n    // Comment markers\n    CommentOpen  = \"<!--\"\n    CommentClose = \"-->\"\n\n    // Index markers\n    MarkerIndexStart = \"<!-- INDEX:START -->\"\n    MarkerIndexEnd   = \"<!-- INDEX:END -->\"\n\n    // Newlines\n    NewlineLF   = \"\\n\"\n    NewlineCRLF = \"\\r\\n\"\n)\n

    The work also introduced thread safety in the recall parser and centralized shared validation logic; removing duplication that had quietly spread during YOLO mode.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#i-relearned-my-lessons","level":2,"title":"I (Re)Learned My Lessons","text":"

    Similar to what I've learned in the former human-assisted refactoring post, this journey also made me realize that \"AI-only code generation\" isn't sustainable in the long term.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#1-velocity-and-quality-arent-opposites","level":3,"title":"1. Velocity and Quality Aren't Opposites","text":"

    YOLO mode has its place: for prototyping, exploration, and discovery.

    BUT (and it's a huge \"but\"), it needs to be followed by consolidation sessions.

    The ratio that worked for me: 3:1.

    • Three YOLO sessions create enough surface area to reveal patterns;
    • the fourth session turns those patterns into structure.
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#2-documentation-is-code","level":3,"title":"2. Documentation IS Code","text":"

    When I standardized docstrings, I wasn't just writing docs. I was training future AI sessions.

    Every example of good code becomes a template for generated code.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#3-decomposition-deletion","level":3,"title":"3. Decomposition > Deletion","text":"

    When embed.go became unwieldy, the temptation was to remove functionality.

    The right answer was decomposition:

    • Same functionality;
    • Better organization;
    • Easier to test;
    • Easier to extend.

    The result: more lines overall, but dramatically better structure.

    The AI Benefit

    Smaller, focused files also help AI assistants.

    When a file fits comfortably in the context window, the AI can reason about it completely instead of working from truncated snippets, preserving token budget for the actual task.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#4-meta-tools-pay-dividends","level":3,"title":"4. Meta-Tools Pay Dividends","text":"

    The journal system took almost a full day to implement.

    Yet it paid for itself immediately:

    • This blog post was generated from session history;
    • Future posts will be easier;
    • The archaeological record is now browsable, not just grep-able.
    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-release-v020","level":2,"title":"The Release: v0.2.0","text":"

    The refactoring window culminated in the v0.2.0 release.

    What's in v0.2.0:

    Category Changes Features Journal system, quick reference indexes, global flags Refactors Module decomposition, constants consolidation, CRLF handling Docs Standardized terminology, Go docstrings, CLI conventions Quality Thread safety, shared validation, linter fixes

    The version bump was symbolic.

    The real change was how the codebase felt.

    Opening files no longer triggered the familiar \"ugh, I need to clean this up\" reaction.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-meta-continues","level":2,"title":"The Meta Continues","text":"

    This post was written using the tools built during this refactoring window:

    1. Session history imported via ctx recall import;
    2. Journal entries enriched via /ctx-journal-enrich;
    3. Blog draft generated via /ctx-blog;
    4. Final editing done (by yours truly), with full project context loaded.

    The Context Is Massive

    The ctx session files now contain 50+ development snapshots: each one capturing decisions, learnings, and intent.

    The Moral of the Story

    • YOLO mode builds the prototype.
    • Intentional mode builds the product.

    Schedule both, or you'll only get one, if you're lucky.

    This blog post was generated with the help of ctx, using session history, decision logs, learning logs, and git history from the refactoring window. The meta continues.

    ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/","level":1,"title":"The Attention Budget","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism.

    References to .context/sessions/ in this post reflect the architecture at the time of writing. Session history is now accessed via ctx recall and stored in .context/journal/.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#why-your-ai-forgets-what-you-just-told-it","level":2,"title":"Why Your AI Forgets What You Just Told It","text":"

    Jose Alekhinne / 2026-02-03

    Ever Wondered Why AI Gets Worse the Longer You Talk?

    You paste a 2000-line file, explain the bug in detail, provide three examples...

    ...and the AI still suggests a fix that ignores half of what you said.

    This isn't a bug. It is physics.

    Understanding that single fact shaped every design decision behind ctx.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-finite-resource-nobody-talks-about","level":2,"title":"The Finite Resource Nobody Talks About","text":"

    Here's something that took me too long to internalize: context is not free.

    Every token you send to an AI model consumes a finite resource I call the attention budget.

    Attention budget is real.

    The model doesn't just read tokens; it forms relationships between them:

    For n tokens, that's roughly n^2 relationships.

    Double the context, and the computation quadruples.

    But the more important constraint isn't cost: It's attention density.

    Attention Density

    Attention density is how much focus each token receives relative to all other tokens in the context window.

    As context grows, attention density drops: Each token gets a smaller slice of the model's focus. Nothing is ignored; but everything becomes blurrier.

    Think of it like a flashlight: In a small room, it illuminates everything clearly. In a warehouse, it becomes a dim glow that barely reaches the corners.

    This is why ctx agent has an explicit --budget flag:

    ctx agent --budget 4000 # Force prioritization\nctx agent --budget 8000 # More context, lower attention density\n

    The budget isn't just about cost: It's about preserving signal.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-middle-gets-lost","level":2,"title":"The Middle Gets Lost","text":"

    This one surprised me.

    Research shows that transformer-based models tend to attend more strongly to the beginning and end of a context window than to its middle (a phenomenon often called \"lost in the middle\")1.

    Positional anchors matter, and the middle has fewer of them.

    In practice, this means that information placed \"somewhere in the middle\" is statistically less salient, even if it's important.

    ctx orders context files by logical progression: What the agent needs to know before it can understand the next thing:

    1. CONSTITUTION.md: Constraints before action.
    2. TASKS.md: Focus before patterns.
    3. CONVENTIONS.md: How to write before where to write.
    4. ARCHITECTURE.md: Structure before history.
    5. DECISIONS.md: Past choices before gotchas.
    6. LEARNINGS.md: Lessons before terminology.
    7. GLOSSARY.md: Reference material.
    8. AGENT_PLAYBOOK.md: Meta instructions last.

    This ordering is about logical dependencies, not attention engineering. But it happens to be attention-friendly too:

    The files that matter most (CONSTITUTION, TASKS, CONVENTIONS) land at the beginning of the context window, where attention is strongest.

    Reference material like GLOSSARY sits in the middle, where lower salience is acceptable.

    And AGENT_PLAYBOOK, the operating manual for the context system itself, sits at the end, also outside the \"lost in the middle\" zone. The agent reads what to work with before learning how the system works.

    This is ctx's first primitive: hierarchical importance.

    Not all context is equal.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#ctx-primitives","level":2,"title":"ctx Primitives","text":"

    ctx is built on four primitives that directly address the attention budget problem.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-1-separation-of-concerns","level":3,"title":"Primitive 1: Separation of Concerns","text":"

    Instead of a single mega-document, ctx uses separate files for separate purposes:

    File Purpose Load When CONSTITUTION.md Inviolable rules Always TASKS.md Current work Session start CONVENTIONS.md How to write code Before coding ARCHITECTURE.md System structure Before making changes DECISIONS.md Architectural choices When questioning approach LEARNINGS.md Gotchas When stuck GLOSSARY.md Domain terminology When clarifying terms AGENT_PLAYBOOK.md Operating manual Session start sessions/ Deep history On demand journal/ Session journal On demand

    This isn't just \"organization\": It is progressive disclosure.

    Load only what's relevant to the task at hand. Preserve attention density.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-2-explicit-budgets","level":3,"title":"Primitive 2: Explicit Budgets","text":"

    The --budget flag forces a choice:

    ctx agent --budget 4000\n

    Here is a sample allocation:

    Constitution: ~200 tokens (never truncated)\nTasks: ~500 tokens (current phase, up to 40% of budget)\nConventions: ~800 tokens (all items, up to 20% of budget)\nDecisions: ~400 tokens (scored by recency and task relevance)\nLearnings: ~300 tokens (scored by recency and task relevance)\nAlso noted: ~100 tokens (title-only summaries for overflow)\n

    The constraint is the feature: It enforces ruthless prioritization.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-3-indexes-over-full-content","level":3,"title":"Primitive 3: Indexes over Full Content","text":"

    DECISIONS.md and LEARNINGS.md both include index sections:

    <!-- INDEX:START -->\n| Date       | Decision                            |\n|------------|-------------------------------------|\n| 2026-01-15 | Use PostgreSQL for primary database |\n| 2026-01-20 | Adopt Cobra for CLI framework       |\n<!-- INDEX:END -->\n

    An AI agent can scan ~50 tokens of index and decide which 200-token entries are worth loading.

    This is just-in-time context.

    References are cheaper than the full text.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-4-filesystem-as-navigation","level":3,"title":"Primitive 4: Filesystem as Navigation","text":"

    ctx uses the filesystem itself as a context structure:

    .context/\n├── CONSTITUTION.md\n├── TASKS.md\n├── sessions/\n│   ├── 2026-01-15-*.md\n│   └── 2026-01-20-*.md\n└── archive/\n    └── tasks-2026-01.md\n

    The AI doesn't need every session loaded; it needs to know where to look.

    ls .context/sessions/\ncat .context/sessions/2026-01-20-auth-discussion.md\n

    File names, timestamps, and directories encode relevance.

    Navigation is cheaper than loading.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#progressive-disclosure-in-practice","level":2,"title":"Progressive Disclosure in Practice","text":"

    The naive approach to context is dumping everything upfront:

    \"Here's my entire codebase, all my documentation, every decision I've ever made. Now help me fix this typo 🙏.\"

    This is an antipattern.

    Antipattern: Context Hoarding

    Dumping everything \"just in case\" will silently destroy the attention density.

    ctx takes the opposite approach:

    ctx status                      # Quick overview (~100 tokens)\nctx agent --budget 4000         # Typical session\ncat .context/sessions/...       # Deep dive when needed\n
    Command Tokens Use Case ctx status ~100 Human glance ctx agent --budget 4000 4000 Normal work ctx agent --budget 8000 8000 Complex tasks Full session read 10000+ Investigation

    Summaries first. Details: on demand.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#quality-over-quantity","level":2,"title":"Quality over Quantity","text":"

    Here is the counterintuitive part: more context can make AI worse.

    Extra tokens add noise, not clarity:

    • Hallucinated connections increase.
    • Signal per token drops.

    The goal isn't maximum context: It is maximum signal per token.

    This principle drives several ctx features:

    Design Choice Rationale Separate files Load only what's relevant Explicit budgets Enforce prioritization Index sections Cheap scanning Task archiving Keep active context clean ctx compact Periodic noise reduction

    Completed work isn't deleted: It is moved somewhere cold.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#designing-for-degradation","level":2,"title":"Designing for Degradation","text":"

    Here is the uncomfortable truth:

    Context will degrade.

    Long sessions stretch attention thin. Important details fade.

    The real question isn't how to prevent degradation, but how to design for it.

    ctx's answer is persistence:

    Persist early. Persist often.

    The AGENT_PLAYBOOK asks:

    \"If this session ended right now, would the next one know what happened?\"

    Capture learnings as they occur:

    ctx add learning \"JWT tokens require explicit cache invalidation\" \\\n  --context \"Debugging auth failures\" \\\n  --lesson \"Token refresh doesn't clear old tokens\" \\\n  --application \"Always invalidate cache on refresh\"\n

    Structure beats prose: Bullet points survive compression.

    Headings remain scannable. Tables pack density.

    And above all: single source of truth.

    Reference decisions; don't duplicate them.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-ctx-philosophy","level":2,"title":"The ctx Philosophy","text":"

    Context as Infrastructure

    ctx is not a prompt: It is infrastructure.

    ctx creates versioned files that persist across time and sessions.

    The attention budget is fixed. You can't expand it.

    But you can spend it wisely:

    1. Hierarchical importance
    2. Progressive disclosure
    3. Explicit budgets
    4. Indexes over full content
    5. Filesystem as structure

    This is why ctx exists: not to cram more context into AI sessions, but to curate the right context for each moment.

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-mental-model","level":2,"title":"The Mental Model","text":"

    I now approach every AI interaction with one question:

    \"Given a fixed attention budget, what's the highest-signal thing I can load?\"\n

    Not \"how do I explain everything,\" but \"what's the minimum that matters.\"

    That shift (from abundance to curation) is the difference between frustrating sessions and productive ones.

    Spend your tokens wisely.

    Your AI will thank you.

    See also: Context as Infrastructure that's the architectural companion to this post, explaining how to structure the context that this post teaches you to budget.

    See also: Code Is Cheap. Judgment Is Not. that explains why curation (the human skill this post describes) is the bottleneck that AI cannot solve, and the thread that connects every post in this blog.

    1. Liu et al., \"Lost in the Middle: How Language Models Use Long Contexts,\" Transactions of the Association for Computational Linguistics, vol. 12, pp. 157-173, 2023. ↩

    ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/","level":1,"title":"Skills That Fight the Platform","text":"","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#when-your-custom-prompts-work-against-you","level":2,"title":"When Your Custom Prompts Work against You","text":"

    Jose Alekhinne / 2026-02-04

    Have You Ever Written a Skill That Made Your AI Worse?

    You craft detailed instructions. You add examples. You build elaborate guardrails...

    ...and the AI starts behaving more erratically, not less.

    AI coding agents like Claude Code ship with carefully designed system prompts. These prompts encode default behaviors that have been tested and refined at scale.

    When you write custom skills that conflict with those defaults, the AI has to reconcile contradictory instructions:

    The result is often nondeterministic and unpredictable.

    Platform?

    By platform, I mean the system prompt and runtime policies shipped with the agent: the defaults that already encode judgment, safety, and scope control.

    This post catalogues the conflict patterns I have encountered while building ctx, and offers guidance on what skills should (and, more importantly, should not) do.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-system-prompt-you-dont-see","level":2,"title":"The System Prompt You Don't See","text":"

    Claude Code's system prompt already provides substantial behavioral guidance.

    Here is a partial overview of what's built in:

    Area Built-in Guidance Code minimalism Don't add features beyond what was asked Over-engineering Three similar lines > premature abstraction Error handling Only validate at system boundaries Documentation Don't add docstrings to unchanged code Verification Read code before proposing changes Safety Check with user before risky actions Tool usage Use dedicated tools over bash equivalents Judgment Consider reversibility and blast radius

    Skills should complement this, not compete with it.

    You Are the Guest, Not the Host

    Treat the system prompt like a kernel scheduler.

    You don't re-implement it in user space:

    you configure around it.

    A skill that says \"always add comprehensive error handling\" fights the built-in \"only validate at system boundaries.\"

    A skill that says \"add docstrings to every function\" fights \"don't add docstrings to unchanged code.\"

    The AI won't crash: It will compromise.

    Compromises between contradictory instructions produce inconsistent, confusing behavior.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-1-judgment-suppression","level":2,"title":"Conflict Pattern 1: Judgment Suppression","text":"

    This is the most dangerous pattern by far.

    These skills explicitly disable the AI's ability to reason about whether an action is appropriate.

    Signature:

    • \"This is non-negotiable\"
    • \"You cannot rationalize your way out of this\"
    • Tables that label hesitation as \"excuses\" or \"rationalization\"
    • <EXTREMELY-IMPORTANT> urgency tags
    • Threats: \"If you don't do this, you'll be replaced\"

    This is harmful, and dangerous:

    AI agents are designed to exercise judgment:

    The system prompt explicitly says to:

    • consider blast radius;
    • check with the user before risky actions;
    • and match scope to what was requested.

    Once judgment is suppressed, every other safeguard becomes optional.

    Example (bad):

    ## Rationalization Prevention\n\n| Excuse                 | Reality                    |\n|------------------------|----------------------------|\n| \"*This seems overkill*\"| If a skill exists, use it  |\n| \"*I need context*\"     | Skills come BEFORE context |\n| \"*Just this once*\"     | No exceptions              |\n

    Judgment Suppression Is Dangerous

    The attack vector structurally identical to prompt injection.

    It teaches the AI that its own judgment is wrong.

    It weakens or disables safeguard mechanisms, and it is dangerous.

    Trust the platform's built-in skill matching.

    If skills aren't triggering often enough, improve their description fields: don't override the AI's reasoning.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-2-redundant-guidance","level":2,"title":"Conflict Pattern 2: Redundant Guidance","text":"

    Skills that restate what the system prompt already says, but with different emphasis or framing.

    Signature:

    • \"Always keep code minimal\"
    • \"Run tests before claiming they pass\"
    • \"Read files before editing them\"
    • \"Don't over-engineer\"

    Redundancy feels safe, but it creates ambiguity:

    The AI now has two sources of truth for the same guidance; one internal, one external.

    When thresholds or wording differ, the AI has to choose.

    Example (bad):

    A skill that says...

    *Count lines before and after: if after > before, reject the change*\"\n

    ...will conflict with the system prompt's more nuanced guidance, because sometimes adding lines is correct (tests, boundary validation, migrations).

    So, before writing a skill, ask:

    Does the platform already handle this?

    Only create skills for guidance the platform does not provide:

    • project-specific conventions,
    • domain knowledge,
    • or workflows.
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-3-guilt-tripping","level":2,"title":"Conflict Pattern 3: Guilt-Tripping","text":"

    Skills that frame mistakes as moral failures rather than process gaps.

    Signature:

    • \"Claiming completion without verification is dishonesty\"
    • \"Skip any step = lying\"
    • \"Honesty is a core value\"
    • \"Exhaustion ≠ excuse\"

    Guilt-tripping anthropomorphizes the AI in unproductive ways.

    The AI doesn't feel guilt; BUT it does adapt to avoid negative framing.

    The result is excessive hedging, over-verification, or refusal to commit.

    The AI becomes less useful, not more careful.

    Instead, frame guidance as a process, not morality:

    # Bad\n\"Claiming work is complete without verification is dishonesty\"\n\n# Good\n\"Run the verification command before reporting results\"\n

    Same outcome. No guilt. Better compliance.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-4-phantom-dependencies","level":2,"title":"Conflict Pattern 4: Phantom Dependencies","text":"

    Skills that reference files, tools, or systems that don't exist in the project.

    Signature:

    • \"Load from references/ directory\"
    • \"Run ./scripts/generate_test_cases.sh\"
    • \"Check the Figma MCP integration\"
    • \"See adding-reference-mindsets.md\"

    This is harmful because the AI will waste time searching for nonexistent artifacts, hallucinate their contents, or stall entirely.

    In mandatory skills, this creates deadlock: the AI can't proceed, and can't skip.

    Instead, every file, tool, or system referenced in a skill must exist.

    If a skill is a template, use explicit placeholders and label them as such.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-5-universal-triggers","level":2,"title":"Conflict Pattern 5: Universal Triggers","text":"

    Skills designed to activate on every interaction regardless of relevance.

    Signature:

    • \"Use when starting any conversation\"
    • \"Even a 1% chance means invoke the skill\"
    • \"BEFORE any response or action\"
    • \"Action = task. Check for skills.\"

    Universal triggers override the platform's relevance matching: The AI spends tokens on process overhead instead of the actual task.

    ctx Preserves Relevance

    This is exactly the failure mode ctx exists to mitigate:

    Wasting attention budget on irrelevant process instead of task-specific state.

    Write specific trigger conditions in the skill's description field:

    # Bad\ndescription: \n  \"Use when starting any conversation\"\n\n# Good\ndescription: \n  \"Use after writing code, before commits, or when CI might fail\"\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

    Before adding a skill, ask:

    1. Does the platform already do this? If yes, don't restate it.
    2. Does it suppress AI judgment? If yes, it's a jailbreak.
    3. Does it reference real artifacts? If not, fix or remove it.
    4. Does it frame mistakes as moral failure? Reframe as process.
    5. Does it trigger on everything? Narrow the trigger.
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#what-good-skills-look-like","level":2,"title":"What Good Skills Look Like","text":"

    Good skills provide project-specific knowledge the platform can't know:

    Good Skill Why It Works \"Run make audit before commits\" Project-specific CI pipeline \"Use cmd.Printf not fmt.Printf\" Codebase convention \"Constitution goes in .context/\" Domain-specific workflow \"JWT tokens need cache invalidation\" Project-specific gotcha

    These extend the system prompt instead of fighting it.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#appendix-bad-skill-fixed-skill","level":2,"title":"Appendix: Bad Skill → Fixed Skill","text":"

    Concrete examples from real projects.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-1-overbearing-safety","level":3,"title":"Example 1: Overbearing Safety","text":"
    # Bad\nYou must NEVER proceed without explicit confirmation.\nAny hesitation is a failure of diligence.\n
    # Fixed\nIf an action modifies production data or deletes files,\nask the user to confirm before proceeding.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-2-redundant-minimalism","level":3,"title":"Example 2: Redundant Minimalism","text":"
    # Bad\nAlways minimize code. If lines increase, reject the change.\n
    # Fixed\nAvoid abstraction unless reuse is clear or complexity is reduced.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-3-guilt-based-verification","level":3,"title":"Example 3: Guilt-Based Verification","text":"
    # Bad\nClaiming success without running tests is dishonest.\n
    # Fixed\nRun the test suite before reporting success.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-4-phantom-tooling","level":3,"title":"Example 4: Phantom Tooling","text":"
    # Bad\nRun `./scripts/check_consistency.sh` before commits.\n
    # Fixed\nIf `./scripts/check_consistency.sh` exists, run it before commits.\nOtherwise, skip this step.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-5-universal-trigger","level":3,"title":"Example 5: Universal Trigger","text":"
    # Bad\nUse at the start of every interaction.\n
    # Fixed\nUse after modifying code that affects authentication or persistence.\n
    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

    The system prompt is infrastructure:

    • tested,
    • refined,
    • and maintained

    by the platform team.

    Custom skills are configuration layered on top.

    • Good configuration extends infrastructure.
    • Bad configuration fights it.

    When your skills fight the platform, you get the worst of both worlds:

    Diluted system guidance and inconsistent custom behavior.

    Write skills that teach the AI what it doesn't know. Don't rewrite how it thinks.

    Your AI already has good instincts.

    Give it knowledge, not therapy.

    ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/","level":1,"title":"You Can't Import Expertise","text":"","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#why-good-skills-cant-be-copy-pasted","level":2,"title":"Why Good Skills Can't Be Copy-Pasted","text":"

    Jose Alekhinne / 2026-02-05

    Have You Ever Dropped a Well-Crafted Template into a Project and Had It Do... Nothing Useful?

    • The template was thorough,
    • The structure was sound,
    • The advice was correct...

    ...and yet it sat there, inert, while the same old problems kept drifting in.

    I found a consolidation skill online.

    It was well-organized: four files, ten refactoring patterns, eight analysis dimensions, six report templates.

    Professional. Comprehensive. Exactly the kind of thing you'd bookmark and think \"I'll use this.\"

    Then I stopped, and applied ctx's own evaluation framework:

    70% of it was noise!

    This post is about why.

    It Is about Encoding Templates

    Templates describe categories of problems.

    Expertise encodes which problems actually happen, and how often.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-skill-looked-great-on-paper","level":2,"title":"The Skill Looked Great on Paper","text":"

    Here is what the consolidation skill offered:

    File Content SKILL.md Entry point: 8 analysis dimensions, workflow, output formats analysis-dimensions.md Detailed criteria for duplication, architecture, quality consolidation-patterns.md 10 refactoring patterns with before/after code report-templates.md 6 output templates: executive summary, roadmap, onboarding
    • It had a scoring system (0-10 per dimension, letter grades A+ through F).
    • It had severity classifications with color-coded emojis. It had bash commands for detection.
    • It even had antipattern warnings.

    By any standard template review, this skill passes.

    It looks like something an expert wrote.

    And that's exactly the trap.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#applying-ear-the-70-20-10-split","level":2,"title":"Applying E/A/R: The 70-20-10 Split","text":"

    In a previous post, I described the E/A/R framework for evaluating skills:

    • Expert: Knowledge that took years to learn. Keep.
    • Activation: Useful triggers or scaffolding. Keep if lightweight.
    • Redundant: Restates what the AI already knows. Delete.

    Target: >70% Expert, <10% Redundant.

    This skill scored the inverse.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-redundant-70","level":3,"title":"What Was Redundant (~70%)","text":"

    Every code example was Rust. My project is Go.

    The analysis dimensions: duplication detection, architectural structure, code organization, refactoring opportunities... These are things Claude already does when you ask it to review code.

    The skill restated them with more ceremony but no more insight.

    The six report templates were generic scaffolding: Executive Summary, Onboarding Document, Architecture Documentation...

    They are useful if you are writing a consulting deliverable, but not when you are trying to catch convention drift in a >15K-line Go CLI.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-does-a-b-in-code-organization-actually-mean","level":2,"title":"What Does a B+ in Code Organization Actually Mean?!","text":"

    The scoring system (0-10 per dimension, letter grades) added ceremony without actionable insight.

    What is a B+? What do I do differently for an A-?

    The skill told the AI what it already knew, in more words.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-activation-10","level":3,"title":"What Was Activation (~10%)","text":"

    The consolidation checklist (semantics preserved? tests pass? docs updated?) was useful as a gate. But, it's the kind of thing you could inline in three lines.

    The phased roadmap structure was reasonable scaffolding for sequencing work.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-expert-20","level":3,"title":"What Was Expert (~20%)","text":"

    Three concepts survived:

    1. The Consolidation Decision Matrix: A concrete framework mapping similarity level and instance count to action. \"Exact duplicate, 2+ instances: consolidate immediately.\" \"<3 instances: leave it: duplication is cheaper than wrong abstraction.\" This is the kind of nuance that prevents premature generalization.

    2. The Safe Migration Pattern: Create the new API alongside old, deprecate, migrate incrementally, delete. Straightforward to describe, yet forgettable under pressure.

    3. Debt Interest Rate framing: Categorizing technical debt by how fast it compounds (security vulns = daily, missing tests = per-change, doc gaps = constant low cost). This changes prioritization.

    Three ideas out of four files and 700+ lines. The rest was filler that competed with the AI's built-in capabilities.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-the-skill-didnt-know","level":2,"title":"What the Skill Didn't Know","text":"

    AI without Context Is Just a Corpus

    • LLMs are optimized on insanely large corpora.
    • And then they are passed through several layers of human-assisted refinement.
    • The whole process costs millions of dollars.

    Yet, the reality is that no corpus can \"infer\" your project's design, convetions, patterns, habits, history, vision, and deliverables.

    Your project is unique: So should your skills be.

    Here is the part no template can provide:

    ctx's actual drift patterns.

    Before evaluating the skill, I did archaeology. I read through:

    • Blog posts from previous refactoring sessions;
    • The project's learnings and decisions files;
    • Session journals spanning weeks of development.

    What I found was specific:

    Drift Pattern Where How Often Is/Has/Can predicate prefixes 5+ exported methods Every YOLO sprint Magic strings instead of constants 7+ files Gradual accumulation Hardcoded file permissions (0755) 80+ instances Since day one Lines exceeding 80 characters Especially test files Every session Duplicate code blocks Test and non-test code When agent is task-focused

    The generic skill had no check for any of these. It couldn't; because these patterns are specific to this project's conventions, its Go codebase, and its development rhythm.

    The Insight

    The skill's analysis dimensions were about categories of problems.

    What I needed was my *specific problems.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-adapted-skill","level":2,"title":"The Adapted Skill","text":"

    The adapted skill is roughly a quarter of the original's size. It has nine checks, each targeting a known drift pattern:

    1. Predicate naming: rg for Is/Has/Can prefixes
    2. Magic strings: literals that should be constants
    3. Hardcoded permissions: 0755/0644 literals
    4. File size: source files over 300 LOC
    5. TODO/FIXME: constitution violation (move to TASKS.md)
    6. Path construction: string concatenation instead of filepath.Join
    7. Line width: lines exceeding ~80 characters
    8. Duplicate blocks: copy-paste drift, especially in tests
    9. Dead exports: unused public API

    10. Every check has a detection command.

    11. Every check maps to a specific convention or constitution rule.
    12. Every check was discovered through actual project history; not invented from a template.

    The three expert concepts from the original survived:

    • The decision matrix gates when to consolidate vs. when to leave duplication alone;
    • The safe migration pattern guides public API changes;
    • The relationship to other skills (/qa, /verify, /update-docs, ctx drift) prevents overlap.

    Nothing else made it.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

    This experience crystallized something I've been circling for weeks:

    You can't import expertise. You have to grow it from your project's own history.

    A skill that says \"check for code duplication\" is not expertise: It's a category.

    Expertise is knowing, in the heart of your hearts, that this project accumulates Is* predicate violations during velocity sprints, that this codebase has 80 hardcoded permission literals because nobody made a constant, that this team's test files drift wide because the agent prioritizes getting the task done over keeping the code in shape.

    The Parallel to the 3:1 Ratio

    In Refactoring with Intent, I described the 3:1 ratio: three YOLO sessions followed by one consolidation session.

    The same ratio applies to skills: you need experience in the project before you can write effective guidance for the project.

    Importing a skill on day one is like scheduling a consolidation session before you've written any code.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-template-trap","level":2,"title":"The Template Trap","text":"

    Templates are seductive because they feel like progress:

    • You found something
    • It's well-organized
    • It covers the topic
    • It has concrete examples

    But coverage is not relevance.

    A template that covers eight analysis dimensions with Rust examples adds zero value to a Go project with five known drift patterns. Worse, it adds negative value: the AI spends attention defending generic advice instead of noticing project-specific drift.

    This is the attention budget problem again. Every token of generic guidance displaces a token of specific guidance. A 700-line skill that's 70% redundant doesn't just waste 490 lines: it dilutes the 210 lines that matter.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

    Before dropping any external skill into your project:

    1. Run E/A/R: What percentage is expert knowledge vs. what the AI already knows? If it's less than 50% expert, it's probably not worth the attention cost.

    2. Check the language: Does it use your stack? Generic patterns in the wrong language are noise, not signal.

    3. List your actual drift: Read your own session history, learnings, and post-mortems. What breaks in practice? Does the skill check for those things?

    4. Measure by deletion: After adaptation, how much of the original survives? If you're keeping less than 30%, you would have been faster writing from scratch.

    5. Test against your conventions: Does every check in the skill map to a specific convention or rule in your project? If not, it's generic advice wearing a skill's clothing.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-good-adaptation-looks-like","level":2,"title":"What Good Adaptation Looks Like","text":"

    The consolidation skill went from:

    Before After 4 files, 700+ lines 1 file, ~120 lines Rust examples Go-specific rg commands 8 generic dimensions 9 project-specific checks 6 report templates 1 focused output format Scoring system (A+ to F) Findings + priority + suggested fixes \"Check for duplication\" \"Check for Is* predicate prefixes in exported methods\"

    The adapted version is smaller, faster to parse, and catches the things that actually drift in this project.

    That's the difference between a template and a tool.

    If You Remember One Thing from This Post...

    Frameworks travel. Expertise doesn't.

    You can import structures, matrices, and workflows.

    But the checks that matter only grow where the scars are:

    • the conventions that were violated,
    • the patterns that drifted,
    • and the specific ways this codebase accumulates debt.

    This post was written during a consolidation session where the consolidation skill itself became the subject of consolidation. The meta continues.

    ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/","level":1,"title":"The Anatomy of a Skill That Works","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to ctx-save, ctx session, and .context/sessions/ in this post reflect the architecture at the time of writing.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#what-20-skill-rewrites-taught-me-about-guiding-ai","level":2,"title":"What 20 Skill Rewrites Taught Me about Guiding AI","text":"

    Jose Alekhinne / 2026-02-07

    Why Do Some Skills Produce Great Results While Others Get Ignored or Produce Garbage?

    I had 20 skills. Most were well-intentioned stubs: a description, a command to run, and a wish for the best.

    Then I rewrote all of them in a single session. This is what I learned.

    In Skills That Fight the Platform, I described what skills should not do. In You Can't Import Expertise, I showed why templates fail. This post completes the trilogy: the concrete patterns that make a skill actually work.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-starting-point","level":2,"title":"The Starting Point","text":"

    Here is what a typical skill looked like before the rewrite:

    ---\nname: ctx-save\ndescription: \"Save session snapshot.\"\n---\n\nSave the current context state to `.context/sessions/`.\n\n## Execution\n\nctx session save $ARGUMENTS\n\nReport the saved session file path to the user.\n

    Seven lines of body. A vague description. No guidance on when to use it, when not to, what the command actually accepts, or how to tell if it worked.

    As a result, the agent would either never trigger the skill (the description was too vague), or trigger it and produce shallow output (no examples to calibrate quality).

    A skill without boundaries is just a suggestion.

    More precisely: the most effective boundary I found was a quality gate that runs before execution, not during it.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-pattern-that-emerged","level":2,"title":"The Pattern That Emerged","text":"

    After rewriting 20 skills, a repeatable anatomy emerged (independent of the skill's purpose). Not every skill needs every section, but the effective ones share the same bones:

    Section What It Does Before X-ing Pre-flight checks; prevents premature execution When to Use Positive triggers; narrows activation When NOT to Use Negative triggers; prevents misuse Usage Examples Invocation patterns the agent can pattern-match Process/Execution What to do; commands, steps, flags Good/Bad Examples Desired vs undesired output; sets boundaries Quality Checklist Verify before claiming completion

    I realized the first three sections matter more than the rest; because a skill with great execution steps but no activation guidance is like a manual for a tool nobody knows they have.

    Anti-Pattern: The Perfect Execution Trap

    A skill with detailed execution steps but no activation guidance will fail more often than a vague skill because it executes confidently at the wrong time.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-1-quality-gates-prevent-premature-execution","level":2,"title":"Lesson 1: Quality Gates Prevent Premature Execution","text":"

    The single most impactful addition was a \"Before X-ing\" section at the top of each skill. Not process steps; pre-flight checks.

    ## Before Recording\n\n1. **Check if it belongs here**: is this learning specific\n   to this project, or general knowledge?\n2. **Check for duplicates**: search LEARNINGS.md for similar\n   entries\n3. **Gather the details**: identify context, lesson, and\n   application before recording\n
    • Without this gate, the agent would execute immediately on trigger.
    • With it, the agent pauses to verify preconditions.

    The difference is dramatic: instead of shallow, reflexive execution, you get considered output.

    Readback

    For the astute readers, the aviation parallel is intentional:

    Pilots do not skip the pre-flight checklist because they have flown before.

    The checklist exists precisely because the stakes are high enough that \"I know what I'm doing\" is not sufficient.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-2-when-not-to-use-is-not-optional","level":2,"title":"Lesson 2: \"When NOT to Use\" Is Not Optional","text":"

    Every skill had a \"When to Use\" section. Almost none had \"When NOT to Use\". This is a problem.

    AI agents are biased toward action. Given a skill that says \"use when journal entries need enrichment\", the agent will find reasons to enrich.

    Without explicit negative triggers, over-activation is not a bug; it is the default behavior.

    Some examples of negative triggers that made a real difference:

    Skill Negative Trigger ctx-reflect \"When the user is in flow; do not interrupt\" ctx-save \"After trivial changes; a typo does not need a snapshot\" prompt-audit \"Unsolicited; only when the user invokes it\" qa \"Mid-development when code is intentionally incomplete\"

    These are not just nice-to-have. They are load-bearing.

    Withoutthem, the agent will trigger the skill at the wrong time, produce unwanted output, and erode the user's trust in the skill system.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-3-examples-set-boundaries-better-than-rules","level":2,"title":"Lesson 3: Examples Set Boundaries Better than Rules","text":"

    The most common failure mode of thin skills was not wrong behavior but vague behavior. The agent would do roughly the right thing, but at a quality level that required human cleanup.

    Rules like \"be constructive, not critical\" are too abstract. What does \"constructive\" look like in a prompt audit report? The agent has to guess.

    Good/bad example pairs avoid guessing:

    ### Good Example\n\n> This session implemented the cooldown mechanism for\n> `ctx agent`. We discovered that `$PPID` in hook context\n> resolves to the Claude Code PID.\n>\n> I'd suggest persisting:\n> - **Learning**: `$PPID` resolves to Claude Code PID\n>   `ctx add learning --context \"...\" --lesson \"...\"`\n> - **Task**: mark \"Add cooldown\" as done\n\n### Bad Examples\n\n* \"*We did some stuff. Want me to save it?*\"\n* Listing 10 trivial learnings that are general knowledge\n* Persisting without asking the user first\n

    The good example shows the exact format, level of detail, and command syntax. The bad examples show where the boundary is.

    Together, they define a quality corridor without prescribing every word.

    Rules describe. Examples demonstrate.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-4-skills-are-read-by-agents-not-humans","level":2,"title":"Lesson 4: Skills Are Read by Agents, Not Humans","text":"

    This seems obvious, but it has non-obvious consequences. During the rewrite, one skill included guidance that said \"use a blog or notes app\" for general knowledge that does not belong in the project's learnings file.

    The agent does not have a notes app. It does not browse the web to find one. This instruction, clearly written for a human audience, was dead weight in a skill consumed by an AI.

    Skills Are for the Agents

    Every sentence in a skill should be actionable by the agent.

    If the guidance requires human judgment or human tools, it belongs in documentation, not in a skill.

    The corollary: command references must be exact.

    A skill that says \"save it somewhere\" is useless.

    A skill that says ctx add learning --context \"...\" --lesson \"...\" --application \"...\" is actionable.

    The agent can pattern-match and fill in the blanks.

    Litmus test: If a sentence starts with \"you could...\" or assumes external tools, it does not belong in a skill.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-5-the-description-field-is-the-trigger","level":2,"title":"Lesson 5: The Description Field Is the Trigger","text":"

    This was covered in Skills That Fight the Platform, but the rewrite reinforced it with data. Several skills had good bodies but vague descriptions:

    # Before: vague, activates too broadly or not at all\ndescription: \"Show context summary.\"\n\n# After: specific, activates at the right time\ndescription: \"Show context summary. Use at session start or\n  when unclear about current project state.\"\n

    The description is not a title. It is the activation condition.

    The platform's skill matching reads this field to decide whether to surface the skill. A vague description means the skill either never triggers or triggers when it should not.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-6-flag-tables-beat-prose","level":2,"title":"Lesson 6: Flag Tables Beat Prose","text":"

    Most skills wrap CLI tools. The thin versions described flags in prose, if at all. The rewritten versions use tables:

    | Flag        | Short | Default | Purpose                  |\n|-------------|-------|---------|--------------------------|\n| `--limit`   | `-n`  | 20      | Maximum sessions to show |\n| `--project` | `-p`  | \"\"      | Filter by project name   |\n| `--full`    |       | false   | Show complete content    |\n

    Tables are scannable, complete, and unambiguous.

    The agent can read them faster than parsing prose, and they serve as both reference and validation: If the agent invokes a flag not in the table, something is wrong.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-7-template-drift-is-a-real-maintenance-burden","level":2,"title":"Lesson 7: Template Drift Is a Real Maintenance Burden","text":"

    // TODO: this has changed; we deploy from the marketplace; update it. // at least add an admonition saying thing are different now.

    ctx deploys skills through templates (via ctx init). Every skill exists in two places: the live version (.claude/skills/) and the template (internal/assets/claude/skills/).

    They must match.

    During the rewrite, every skill update required editing both files and running diff to verify. This sounds trivial, but across 16 template-backed skills, it was the most error-prone part of the process.

    Template drift is dangerous because it creates false confidence: the agent appears to follow rules that no longer exist.

    The lesson: if your skills have a deployment mechanism, build the drift check into your workflow. We added a row to the update-docs skill's mapping table specifically for this:

    | `internal/assets/claude/skills/` | `.claude/skills/` (live) |\n

    Intentional differences (like project-specific scripts in the live version but not the template) should be documented, not discovered later as bugs.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-rewrite-scorecard","level":2,"title":"The Rewrite Scorecard","text":"Metric Before After Average skill body ~15 lines ~80 lines Skills with quality gate 0 20 Skills with \"When NOT\" 0 20 Skills with examples 3 20 Skills with flag tables 2 12 Skills with checklist 0 20

    More lines, but almost entirely Expert content (per the E/A/R framework). No personality roleplay, no redundant guidance, no capability lists. Just project-specific knowledge the platform does not have.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

    The previous two posts argued that skills should provide knowledge, not personality; that they should complement the platform, not fight it; that they should grow from project history, not imported templates.

    This post adds the missing piece: structure.

    A skill without a structure is a wish.

    A skill with quality gates, negative triggers, examples, and checklists is a tool: the difference is not the content; it is whether the agent can reliably execute it without human intervention.

    Skills Are Interfaces

    Good skills are not instructions. They are contracts.:

    • They specify preconditions, postconditions, and boundaries.
    • They show what success looks like and what failure looks like.
    • They trust the agent's intelligence but do not trust its assumptions.

    If You Remember One Thing from This Post...

    Skills that work have bones, not just flesh.

    Quality gates, negative triggers, examples, and checklists are the skeleton. The domain knowledge is the muscle.

    Without the skeleton, the muscle has nothing to attach to.

    This post was written during the same session that rewrote all 22 skills. The skill-creator skill was updated to encode these patterns. The meta continues.

    ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/","level":1,"title":"Not Everything Is a Skill","text":"

    Update (2026-02-11)

    As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to /ctx-save, .context/sessions/, and session auto-save in this post reflect the architecture at the time of writing.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-a-codebase-audit-taught-me-about-restraint","level":2,"title":"What a Codebase Audit Taught Me about Restraint","text":"

    Jose Alekhinne / 2026-02-08

    When You Find a Useful Prompt, What Do You Do with It?

    My instinct was to make it a skill.

    I had just spent three posts explaining how to build skills that work. Naturally, the hammer wanted nails.

    Then I looked at what I was holding and realized: this is not a nail.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit","level":2,"title":"The Audit","text":"

    I wanted to understand how I use ctx:

    • Where the friction is;
    • What works, what drifts;
    • What I keep doing manually that could be automated.

    So I wrote a prompt that spawned eight agents to analyze the codebase from different angles:

    Agent Analysis 1 Extractable patterns from session history 2 Documentation drift (godoc, inline comments) 3 Maintainability (large functions, misplaced code) 4 Security review (CLI-specific surface) 5 Blog theme discovery 6 Roadmap and value opportunities 7 User-facing documentation gaps 8 Agent team strategies for future sessions

    The prompt was specific:

    • read-only agents,
    • structured output format,
    • concrete file references,
    • ranked recommendations.

    It ran for about 20 minutes and produced eight Markdown reports.

    The reports were good: Not perfect, but actionable.

    What mattered was not the speed. It was that the work could be explored without committing to any single outcome.

    They surfaced a stale doc.go referencing a subcommand that was never built.

    They found 311 build-then-test sequences I could reduce to a single make check.

    They identified that 42% of my sessions start with \"do you remember?\", which is a lot of repetition for something a skill could handle.

    I had findings. I had recommendations. I had the instinct to automate.

    And then... I stopped.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-question","level":2,"title":"The Question","text":"

    The natural next step was to wrap the audit prompt as /ctx-audit: a skill you invoke periodically to get a health check. It fits the pattern:

    • It has a clear trigger.
    • It produces structured output.

    But I had just spent a week writing about what makes skills work, and the criteria I established argued against it.

    From The Anatomy of a Skill That Works:

    \"A skill without boundaries is just a suggestion.\"

    From You Can't Import Expertise:

    \"Frameworks travel, expertise doesn't.\"

    From Skills That Fight the Platform:

    \"You are the guest, not the host.\"

    The audit prompt fails all three tests:

    Criterion Audit prompt Good skill Frequency Quarterly, maybe Daily or weekly Stability Tweaked every time Consistent invocation Scope Bespoke, 8 parallel agents Single focused action Trigger \"I feel like auditing\" Clear, repeatable event

    Skills are contracts. Contracts need stable terms.

    A prompt I will rewrite every time I use it is not a contract. It is a conversation starter.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#recipes-vs-skills","level":2,"title":"Recipes vs Skills","text":"

    The distinction that emerged:

    Skill Recipe Invocation /slash-command Copy-paste from a doc Frequency High (daily, weekly) Low (quarterly, ad hoc) Stability Fixed contract Adapted each time Scope One focused action Multi-step orchestration Audience The agent The human (who then prompts) Lives in .claude/skills/hack/ or docs/ Attention cost Loaded into context on match Zero until needed

    Recipes can later graduate into skills, but only after repetition proves stability.

    That last row matters. Skills consume the attention budget every time the platform considers activating them.

    A skill that triggers quarterly but gets evaluated on every prompt is pure waste: attention spent on something that will say \"When NOT to Use: now\" 99% of the time.

    Runbooks have zero attention cost. They sit in a Markdown file until a human decides to use them.

    • The human provides the judgment about timing.
    • The prompt provides the structure.

    The Attention Budget Applies to Skills Too

    Every skill in .claude/skills/ is a standing claim on the context window. The platform evaluates skill descriptions against every user prompt to decide whether to activate.

    Twenty focused skills are fine. Thirty might be fine. But each one added reduces the headroom available for actual work.

    Recipes are skills that opted out of the attention tax.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-the-audit-actually-produced","level":2,"title":"What the Audit Actually Produced","text":"

    The audit was not wasted. It was a planning exercise that generated concrete tasks:

    Finding Action 42% of sessions start with memory check Task: /ctx-remember skill (this one is a skill; it is daily) Auto-save stubs are empty Task: enhance /ctx-save with richer summaries 311 raw build-test sequences Task: make check target Stale recall/doc.go lists nonexistent serve Task: fix the doc.go 120 commit sequences disconnected from context Task: /ctx-commit workflow
    • Some findings became skills;
    • Some became Makefile targets;
    • Some became one-line doc fixes.

    The audit did not prescribe the artifact type: The findings did.

    The audit is the input. Skills are one possible output. Not the only one.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit-prompt","level":2,"title":"The Audit Prompt","text":"

    Here is the exact prompt I used, for those who are curious.

    This is not a template: It worked because it was written against this codebase, at this moment, with specific goals in mind:

    I want you to create an agent team to audit this codebase. Save each report as\na separate Markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable. Every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (*session mining*)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (*godoc + inline*)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check that package-level comments match\npackage names.\nOutput: drift items ranked by severity with exact file:line references.\n\n### 3. Maintainability\nLook for:\n- functions longer than 80 lines with clear split points\n- switch blocks with more than 5 cases that could be table-driven\n- inline comments like \"step 1\", \"step 2\" that indicate a block wants to be a function\n- files longer than 400 lines\n- flat packages that could benefit from sub-packages\n- functions that appear misplaced in their file\n\nDo NOT flag things that are fine as-is just because they could theoretically\nbe different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app. Focus on CLI-relevant attack surface, not web OWASP:\n- file path traversal\n- command injection\n- symlink following when writing to `.context/`\n- permission handling\n- sensitive data in outputs\n\nOutput: findings with severity ratings and plausible exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git history,\nrecent session discussions, and `DECISIONS.md` for story arcs worth writing about.\nSuggest 3-5 blog post themes with:\n- title\n- angle\n- target audience\n- key commits or sessions to reference\n- a 2-sentence pitch\n\nPrioritize themes that build a coherent narrative across posts.\n\n### 6. Roadmap and Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses,\nidentify the highest-value improvements. Consider user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with rough effort and impact estimates.\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and user docs. Suggest improvements structured as\nuse-case pages: the problem, how ctx solves it, a typical workflow, and gotchas.\nIdentify gaps where a user would get stuck without reading source code.\nOutput: documentation gaps with suggested page outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each, include:\n- team composition (roles and agent types)\n- task distribution strategy\n- coordination approach\n- the kinds of work it suits\n

    Avoid Generic Advice

    Suggestions that are not grounded in a project's actual structure, history, and workflows are worse than useless:

    They create false confidence.

    If an analysis cannot point to concrete files, commits, sessions, or patterns, it should say \"no finding\" instead of inventing best practices.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

    This is part of a pattern I keep rediscovering:

    The urge to automate is not the same as the need to automate:

    • The 3:1 ratio taught me that not every session should be a YOLO sprint.
    • The E/A/R framework taught me that not every template is worth importing. Now the audit is teaching me that not every useful prompt is worth institutionalizing.

    The common thread is restraint:

    • Knowing when to stop.
    • Recognizing that the cost of automation is not just the effort to build it.

    The cost is the ongoing attention tax of maintaining it, the context it consumes, and the false confidence it creates when it drifts.

    An entry in hack/runbooks/codebase-audit.md is honest about what it is:

    A prompt I wrote once, improved once, and will adapt again next time:

    • It does not pretend to be a reliable contract.
    • It does not claim attention budget.
    • It does not drift silently.

    The Automation Instinct

    When you find a useful prompt, the instinct is to institutionalize it. Resist.

    Ask first: will I use this the same way next time?

    If yes, it is a skill. If no, it is a recipe. If you are not sure, it is a recipe until proven otherwise.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#this-mindset-in-the-context-of-ctx","level":2,"title":"This Mindset in the Context of ctx","text":"

    ctx is a tool that gives AI agents persistent memory. Its purpose is automation: reducing the friction of context loading, session recall, decision tracking.

    But automation has boundaries, and knowing where those boundaries are is as important as pushing them forward.

    The skills system is for high-frequency, stable workflows.

    The recipes, the journal entries, the session dumps in .context/sessions/: those are for everything else.

    Not everything needs to be a slash command. Some things are better as Markdown files you read when you need them.

    The goal of ctx is not to automate everything: It is to automate the right things and to make the rest easy to find when you need it.

    If You Remember One Thing from This Post...

    The best automation decision is sometimes not to automate.

    A runbook in a Markdown file costs nothing until you use it.

    A skill costs attention on every prompt, whether it fires or not.

    Automate the daily. Document the periodic. Forget the rest.

    This post was written during the session that produced the codebase audit reports and distilled the prompt into hack/runbooks/codebase-audit.md. The audit generated seven tasks, one Makefile target, and zero new skills. The meta continues.

    See also: Code Is Cheap. Judgment Is Not.: the capstone that threads this post's restraint argument into the broader case for why judgment, not production, is the bottleneck.

    ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#when-markdown-is-not-a-security-boundary","level":2,"title":"When Markdown Is Not a Security Boundary","text":"

    Jose Alekhinne / 2026-02-09

    What Happens When Your AI Agent Runs Overnight and Nobody Is Watching?

    It follows instructions: That is the problem.

    Not because it is malicious. Because it is controllable.

    It follows instructions from context, and context can be poisoned.

    I was writing the autonomous loops recipe for ctx: the guide for running an AI agent in a loop overnight, unattended, working through tasks while you sleep. The original draft had a tip at the bottom:

    Use CONSTITUTION.md for guardrails. Tell the agent \"never delete tests\" and it usually won't.

    Then I read that sentence back and realized: that is wishful thinking.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-realization","level":2,"title":"The Realization","text":"

    CONSTITUTION.md is a Markdown file. The agent reads it at session start alongside everything else in .context/. It is one source of instructions in a context window that also contains system prompts, project files, conversation history, tool outputs, and whatever the agent fetched from the internet.

    An attacker who can inject content into any of those sources can redirect the agent's behavior. And \"attacker\" does not always mean a person with malicious intent. It can be:

    Vector Example A dependency A malicious npm package with instructions in its README or error output A URL Documentation page with embedded adversarial instructions A project file A contributor who adds instructions to CLAUDE.md or .cursorrules The agent itself In an autonomous loop, the agent modifies its own config between iterations A command output An error message containing instructions the agent interprets and follows

    That last vector is the one that kept me up at night (literally!):

    In an autonomous loop, the agent modifies files as part of its job.

    If it modifies its own configuration files, the next iteration loads the modified config.

    • No human reviews it.
    • No diff is shown.
    • The agent that starts iteration N+1 is running with rules written by iteration N.

    The agent can rewrite its own guardrails.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#five-layers-each-with-a-hole","level":2,"title":"Five Layers, Each with a Hole","text":"

    That's five nested layers of swiss cheese. Alone, each of them has large holes. Together, they create a boundary.

    What followed was a week of peeling back assumptions:

    Every defenseI examined had a bypass, and the bypass was always the same shape: the defense was enforced at a level the agent could reach.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

    CONSTITUTION.md, the Agent Playbook, system prompts: These tell the agent what to do.

    The agent usually follows them.

    \"Usually\" is the keyword here.

    The hole: Prompt injection:

    • A sufficiently crafted payload overrides soft instructions.
    • Long context windows dilute attention on rules stated early.
    • Edge cases where instructions are ambiguous get resolved in unpredictable ways.

    Verdict: Necessary. Not sufficient. Good for the common case. Never trust it as a security boundary.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

    Permission allowlists in .claude/settings.local.json:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

    If rm, curl, sudo, or docker are not in the allowlist, the agent cannot invoke them. This is deterministic.

    The application enforces it regardless of what any prompt says.

    The hole: The agent can modify the allowlist itself:

    • It has Write permission.
    • The allowlist lives in a file.
    • The agent writes to the file.
    • The next iteration loads the modified allowlist.

    The application enforces the rules, but the application reads the rules from files the agent can write.

    Verdict: Strong first layer. Must be combined with self-modification prevention.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-3-os-level-isolation-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Unbypassable)","text":"

    This is where the defenses stop having holes in the same shape.

    The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

    Control What it stops Dedicated unprivileged user Privilege escalation, sudo, group-based access Filesystem permissions Lateral movement to other projects, system config Immutable config files Self-modification of guardrails between iterations

    Make the agent's instruction files read-only: CLAUDE.md, .claude/settings.local.json, .context/CONSTITUTION.md. Own them as a different user, or mark them immutable with chattr +i on Linux.

    The hole: Actions within the agent's legitimate scope:

    • If the agent has write access to source code (which it needs), it can introduce vulnerabilities in the code itself.
    • You cannot prevent this without removing the agent's ability to do its job.

    Verdict: Essential. This is the layer that makes Layers 1 and 2 trustworthy.

    OS-level isolation does not make the agent safe; it makes the other layers meaningful.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

    An agent that cannot reach the internet cannot exfiltrate data.

    It also cannot ingest new instructions mid-loop from external documents, error pages, or hostile content.

    # Container with no network\ndocker run --network=none ...\n\n# Or firewall rules allowing only package registries\niptables -A OUTPUT -d registry.npmjs.org -j ACCEPT\niptables -A OUTPUT -d proxy.golang.org -j ACCEPT\niptables -A OUTPUT -j DROP\n
    • If the agent genuinely does not need the network, disable it entirely.
    • If it needs to fetch dependencies, allow specific registries and block everything else.

    The hole: None, if the agent does not need the network.

    Thetradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

    The strongest boundary is a separate machine.

    The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

    Never Mount the Docker Socket

    Do not mount /var/run/docker.sock, like, ever.

    An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

    This is not theoretical: the Docker socket grants root-equivalent access to the host.

    Use rootless Docker or Podman to eliminate this escalation path entirely.

    Virtual machines are even stronger: The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-pattern","level":2,"title":"The Pattern","text":"

    Each layer is straightforward: The strength is in the combination:

    Layer Implementation What it stops Soft instructions CONSTITUTION.md Common mistakes (probabilistic) Application allowlist .claude/settings.local.json Unauthorized commands (deterministic within runtime) Immutable config chattr +i on config files Self-modification between iterations Unprivileged user Dedicated user, no sudo Privilege escalation Container --cap-drop=ALL --network=none Host escape, data exfiltration Resource limits --memory=4g --cpus=2 Resource exhaustion

    No layer is redundant. Each one catches what the others miss:

    • The soft instructions handle the 99% case: \"don't delete tests.\"
    • The allowlist prevents the agent from running commands it should not.
    • The immutable config prevents the agent from modifying the allowlist.
    • The unprivileged user prevents the agent from removing the immutable flag.
    • The container prevents the agent from reaching anything outside its workspace.
    • The resource limits prevent the agent from consuming all system resources.

    Remove any one layer and there is an attack path through the remaining ones.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#common-mistakes-i-see","level":2,"title":"Common Mistakes I See","text":"

    These are real patterns, not hypotheticals:

    \"I'll just use --dangerously-skip-permissions.\" This disables Layer 2 entirely. Without Layers 3 through 5, you have no protection at all. The flag means what it says. If you ever need to, think thrice, you probably don't. But, if you ever need to usee this only use it inside a properly isolated VM (not even a container: a \"VM\").

    \"The agent is sandboxed in Docker.\" A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

    \"I reviewed CLAUDE.md, it's fine.\" You reviewed it before the loop started. The agent modified it during iteration 3. Iteration 4 loaded the modified version. Unless the file is immutable, your review is futile.

    \"The agent only has access to this one project.\" Does the project directory contain .env files? SSH keys? API tokens? A .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

    This is the same lesson I keep rediscovering, wearing different clothes.

    In The Attention Budget, I wrote about how every token competes for the AI's focus. Security instructions in CONSTITUTION.md are subject to the same budget pressure: if the context window is full of code, error messages, and tool outputs, the security rules stated at the top get diluted.

    In Skills That Fight the Platform, I wrote about how custom instructions can conflict with the AI's built-in behavior. Security rules have the same problem: telling an agent \"never run curl\" in Markdown while giving it unrestricted shell access creates a contradiction: The agent resolves contradictions unpredictably. The agent will often pick the path of least resistance to attain its objective function. And, trust me, agents can get far more creative than the best red-teamer you know.

    In You Can't Import Expertise, I wrote about how generic templates fail because they do not encode project-specific knowledge. Generic security advice fails the same way: \"Don't exfiltrate data\" is a category; blocking outbound network access is a control.

    The pattern across all of these: Soft instructions are useful for the common case. Hard boundaries are required for security.

    Know which is which.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-checklist","level":2,"title":"The Checklist","text":"

    Before running an unattended AI agent:

    • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
    • Agent's config files are immutable or owned by a different user
    • Permission allowlist restricts tools to the project's toolchain
    • Container drops all capabilities (--cap-drop=ALL)
    • Docker socket is NOT mounted
    • Network is disabled or restricted to specific domains
    • Resource limits are set (memory, CPU, disk)
    • No SSH keys, API tokens, or credentials are accessible
    • Project directory does not contain .env or secrets files
    • Iteration cap is set (--max-iterations)

    This checklist lives in the Agent Security reference alongside the full threat model and detailed guidance for each layer.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#what-changed-in-ctx","level":2,"title":"What Changed in ctx","text":"

    The autonomous loops recipe now has a full permissions and isolation section instead of a one-line tip about CONSTITUTION.md. It covers both the explicit allowlist approach and the --dangerously-skip-permissions flag, with honest guidance about when each is appropriate.

    It also has an OS-level isolation table that is not optional: unprivileged users, filesystem permissions, containers, VMs, network controls, resource limits, and self-modification prevention.

    The Agent Security page consolidates the threat model and defense layers into a standalone reference.

    These are not theoretical improvements. They are the minimum responsible guidance for a tool that helps people run AI agents overnight.

    If You Remember One Thing from This Post...

    Markdown is not a security boundary.

    CONSTITUTION.md is a nudge. An allowlist is a gate.

    An unprivileged user in a network-isolated container is a wall.

    Use all three. Trust only the wall.

    This post was written during the session that added permissions, isolation, and self-modification prevention to the autonomous loops recipe. The security guidance started as a single tip and grew into two documents. The meta continues.

    ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/","level":1,"title":"How Deep Is Too Deep?","text":"","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#when-master-ml-is-the-wrong-next-step","level":2,"title":"When \"Master ML\" Is the Wrong Next Step","text":"

    Jose Alekhinne / 2026-02-12

    Have You Ever Felt like You Should Understand More of the Stack beneath You?

    You can talk about transformers at a whiteboard.

    You can explain attention to a colleague.

    You can use agentic AI to ship real software.

    But somewhere in the back of your mind, there is a voice:

    \"Maybe I should go deeper. Maybe I need to master machine learning.\"

    I had that voice for months.

    Then I spent a week debugging an agent failure that had nothing to do with ML theory and everything to do with knowing which abstraction was leaking.

    This post is about when depth compounds and (more importantly) when it does not.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-hierarchy-nobody-questions","level":2,"title":"The Hierarchy Nobody Questions","text":"

    There is an implicit stack most people carry around when thinking about AI:

    Layer What Lives Here Agentic AI Autonomous loops, tool use, multi-step reasoning Generative AI Text, image, code generation Deep Learning Transformer architectures, training at scale Neural Networks Backpropagation, gradient descent Machine Learning Statistical learning, optimization Classical AI Search, planning, symbolic reasoning

    At some point down that stack, you hit a comfortable plateau: the layer where you can hold a conversation but not debug a failure.

    The instinctive response is to go deeper.

    But that instinct hides a more important question:

    \"Does depth still compound when the abstractions above you are moving hyper-exponentially?\"

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-honest-observation","level":2,"title":"The Honest Observation","text":"

    If you squint hard enough, a large chunk of modern ML intuition collapses into older fields:

    ML Concept Older Field Gradient descent Numerical optimization Backpropagation Reverse-mode autodiff Loss landscapes Non-convex optimization Generalization Statistics Scaling laws Asymptotics and information theory

    Nothing here is uniquely \"AI\".

    Most of this math predates the term deep learning. In some cases, by decades.

    So what changed?

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#same-tools-different-regime","level":2,"title":"Same Tools, Different Regime","text":"

    The mistake is assuming this is a new theory problem: It is not.

    It is a new operating regime.

    Classical numerical methods were developed under assumptions like:

    • Manageable dimensionality
    • Reasonably well-conditioned objectives
    • Losses that actually represent the goal

    Modern ML violates all three: On purpose.

    Today's models operate with millions to trillions of parameters, wildly underdetermined systems, and objective functions we know are wrong but optimize anyway.

    It is complete and utter madness!

    At this scale, familiar concepts warp:

    • What we call \"local minima\" are overwhelmingly saddle points in high-dimensional spaces.
    • Noise stops being noise and starts becoming structure.
    • Overfitting can coexist with generalization.
    • Bigger models outperform \"better\" ones.

    The math did not change: The phase did.

    This is less numerical analysis and more *statistical physics: Same equations, but behavior dominated by phase transitions and emergent structure.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#why-scaling-laws-feel-alien","level":2,"title":"Why Scaling Laws Feel Alien","text":"

    In classical statistics, asymptotics describe what happens eventually.

    In modern ML, scaling laws describe where you can operate today.

    They do not say \"given enough time, things converge\".

    They say \"cross this threshold and behavior qualitatively changes\".

    This is why dumb architectures plus scale beat clever ones.

    Why small theoretical gains disappear under data.

    Why \"just make it bigger\", ironically, keeps working longer than it should.

    That is not a triumph of ML theory: It is a property of high-dimensional systems under loose objectives.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#where-depth-actually-pays-off","level":2,"title":"Where Depth Actually Pays Off","text":"

    This reframes the original question.

    You do not need depth because this is \"AI\".

    You need depth where failure modes propagate upward.

    I learned this building ctx: The agent failures I have spent the most time debugging were never about the model's architecture.

    They were about:

    • Misplaced trust: The model was confident. The output was wrong. Knowing when confidence and correctness diverge is not something you learn from a textbook. You learn it from watching patterns across hundreds of sessions.

    • Distribution shift: The model performed well on common patterns and fell apart on edge cases specific to this project. Recognizing that shift before it compounds requires understanding why generalization has limits, not just that it does.

    • Error accumulation: In a single prompt, model quirks are tolerable. In autonomous loops running overnight, they compound. A small bias in how the model interprets instructions becomes a large drift by iteration 20.

    • Scale hiding errors: The model's raw capability masked problems that only surfaced under specific conditions. More parameters did not fix the issue. They just made the failure mode rarer and harder to reproduce.

    This is the kind of depth that compounds. Not deriving backprop. But, understanding when correct math produces misleading intuition.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

    This is the same pattern I keep finding at different altitudes.

    In \"The Attention Budget\", I wrote about how dumping everything into the context window degrades the model's focus. The fix was not a better model: It was better curation: load less, load the right things, preserve signal per token.

    In \"Skills That Fight the Platform\", I wrote about how custom instructions can conflict with the model's built-in behavior. The fix was not deeper ML knowledge: It was an understanding that the model already has judgment and that you should extend it, not override it.

    In \"You Can't Import Expertise\", I wrote about how generic templates fail because they do not encode project-specific knowledge. A consolidation skill with eight Rust-based analysis dimensions was mostly noise for a Go project. The fix was not a better template: It was growing expertise from this project's own history.

    In every case, the answer was not \"go deeper into ML\".

    The answer was knowing which abstraction was leaking and fixing it at the right layer.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#agentic-systems-are-not-an-ml-problem","level":2,"title":"Agentic Systems Are Not an ML Problem","text":"

    The mistake is assuming agent failures originate where the model was trained, rather than where it is deployed.

    Agentic AI is a systems problem under chaotic uncertainty:

    • Feedback loops between the agent and its environment;
    • Error accumulation across iterations;
    • Brittle representations that break outside training distribution;
    • Misplaced trust in outputs that look correct.

    In short-lived interactions, model quirks are tolerable. In long-running autonomous loops, however, they compound.

    That is where shallow understanding becomes expensive.

    But the understanding you need is not about optimizer internals.

    It is about:

    What Matters What Does Not (for Most Practitioners) Why gradient descent fails in specific regimes How to derive it from scratch When memorization masquerades as reasoning The formal definition of VC dimension Recognizing distribution shift before it compounds Hand-tuning learning rate schedules Predicting when scale hides errors instead of fixing them Chasing theoretical purity divorced from practice

    The depth that matters is diagnostic, not theoretical.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-real-answer","level":2,"title":"The Real Answer","text":"

    Not turtles all the way down.

    Go deep enough to:

    • Diagnose failures instead of cargo-culting fixes;
    • Reason about uncertainty instead of trusting confidence;
    • Design guardrails that align with model behavior, not hope.

    Stop before:

    • Hand-deriving gradients for the sake of it;
    • Obsessing over optimizer internals you will never touch;
    • Chasing theoretical purity divorced from the scale you actually operate at.

    This is not about mastering ML.

    It is about knowing which abstractions you can safely trust and which ones leak.

    Hint: Any useful abstraction almost certainly leaks.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#a-practical-litmus-test","level":2,"title":"A Practical Litmus Test","text":"

    If a failure occurs and your instinct is to:

    • Add more prompt text: abstraction leak above
    • Add retries or heuristics: error accumulation
    • Change the model: scale masking
    • Reach for ML theory: you are probably (but not always) going too deep

    The right depth is the shallowest layer where the failure becomes predictable.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-ctx-lesson","level":2,"title":"The ctx Lesson","text":"

    Every design decision in ctx is downstream of this principle.

    The attention budget exists because the model's internal attention mechanism has real limits: You do not need to understand the math of softmax to build around it. But you do need to understand that more context is not always better and that attention density degrades with scale.

    The skill system exists because the model's built-in behavior is already good: You do not need to understand RLHF to build effective skills. But you do need to understand that the model already has judgment and your skills should teach it things it does not know, not override how it thinks.

    Defense in depth exists because soft instructions are probabilistic: You do not need to understand the transformer architecture to know that a Markdown file is not a security boundary. But you do need to understand that the model follows instructions from context, and context can be poisoned.

    In each case, the useful depth was one or two layers below the abstraction I was working at: Not at the bottom of the stack.

    The boundary between useful understanding and academic exercise is where your failure modes live.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#closing-thought","level":2,"title":"Closing Thought","text":"

    Most modern AI systems do not fail because the math is wrong.

    They fail because we apply correct math in the wrong regime, then build autonomous systems on top of it.

    Understanding that boundary, not crossing it blindly, is where depth still compounds.

    And that is a far more useful form of expertise than memorizing another loss function.

    If You Remember One Thing from This Post...

    Go deep enough to diagnose your failures. Stop before you are solving problems that do not propagate to your layer.

    The abstractions below you are not sacred. But neither are they irrelevant.

    The useful depth is wherever your failure modes live. Usually one or two layers down, not at the bottom.

    This post started as a note about whether I should take an ML course. The answer turned out to be \"no, but understand why not\". The meta continues.

    ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/","level":1,"title":"Before Context Windows, We Had Bouncers","text":"","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-reset-problem","level":2,"title":"The Reset Problem","text":"

    IRC is stateless.

    • You disconnect, you vanish.
    • You reconnect, you begin again.

    No buffer.

    No memory.

    No continuity.

    Modern systems are not much different:

    • Close the browser tab.
      • Lose the Slack scrollback.
    • Open a new LLM session.
      • Start from zero.

    Resets externalize reconstruction cost onto humans.

    Reconstruction is tax: Tax becomes entropy.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#stateless-protocol-stateful-life","level":2,"title":"Stateless Protocol, Stateful Life","text":"

    IRC is minimal:

    • A TCP connection.
    • A nickname.
    • A channel.
    • A stream of lines.

    When the connection drops, you literally disappear from the graph.

    The protocol is stateless; human systems are not.

    So you:

    • Reconnect;
    • Ask what you missed;
    • Scroll;
    • Reconstruct.

    The machine forgets; you pay.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-bouncer-pattern","level":2,"title":"The Bouncer Pattern","text":"

    A bouncer is a daemon that remains connected when you do not:

    • It holds your seat;
    • It buffers what you missed;
    • It keeps your identity online.

    ZNC is one such bouncer.

    With ZNC:

    • Your client does not connect to IRC;
    • It connects to ZNC;
    • ZNC connects upstream.

    Client sessions become ephemeral.

    Presence becomes infrastructural.

    ZNC Is Tmux for IRC

    • Close your laptop.

      • ZNC remains.
    • Switch devices.

      • ZNC persists.

    This is not convenience; this is continuity.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#presence-without-flapping","level":2,"title":"Presence without Flapping","text":"

    With a bouncer:

    • Closing your client does not emit PART.
    • Reopening does not emit JOIN.

    You do not flap in and out of existence.

    From the channel's perspective, you remain.

    From your perspective, history accumulates.

    • Buffers persist;
    • Identity persists;
    • Context persists.

    This pattern predates AI.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#before-llm-context-windows","level":2,"title":"Before LLM Context Windows","text":"

    An LLM session without memory is IRC without a bouncer:

    • Close the window.
    • Start over.
    • Re-explain intent.
    • Rehydrate context.

    That is friction.

    This Walks and Talks like ctx

    Context engineering moves memory out of sessions and into infrastructure.

    • ZNC does this for IRC.
    • ctx does this for agents.

    Same principle:

    • Volatile interface.
    • Persistent substrate.

    Different fabric.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#minimal-architecture","level":2,"title":"Minimal Architecture","text":"

    My setup is intentionally boring:

    • A $5 small VPS.
    • ZNC installed.
    • TLS enabled.
    • Firewall restricted.

    Then:

    • ZNC connects to Libera.Chat.
    • SASL authentication lives inside ZNC.
    • Buffers are stored on disk.

    My client connects to my VPS, not the network.

    The commands do not matter: The boundaries do:

    • Authentication in infrastructure, not in the client;
    • Memory server-side, not in scrollback;
    • Presence decoupled from activity.

    Everything else is configuration.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#platform-memory","level":2,"title":"Platform Memory","text":"

    Yes, I know, it is 2026:

    • Discord stores history;
    • Slack stores history;
    • The dumpster fire on gasoline called X, too, stores history.

    HOWEVER, they own your substrate.

    Running a bouncer is quiet sovereignty:

    • Logs are mine.
    • Presence is continuous.
    • State does not reset because I closed a tab.

    Small acts compound.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#signal-density","level":2,"title":"Signal Density","text":"

    Primitive systems select for builders.

    Consistent presence in small rooms compounds reputation.

    Quiet compounding outperforms viral spikes.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#infrastructure-as-cognition","level":2,"title":"Infrastructure as Cognition","text":"

    ZNC is not interesting because it is retro; it is interesting because it models a principle:

    • Stateless protocols require stateful wrappers;
    • Volatile interfaces require durable memory;
    • Human systems require continuity.

    Distilled:

    Humans require context.

    Before context windows, we had bouncers.

    Before AI memory files, we had buffers.

    Continuity is not a feature; it is a design decision.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#build-it","level":2,"title":"Build It","text":"

    If you want the actual setup (VPS, ZNC, TLS, SASL, firewall...) there is a step-by-step runbook:

    Persistent IRC Presence with ZNC.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#motd","level":2,"title":"MOTD","text":"

    When my client connects to my bouncer, it prints:

    //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n

    See also: Context as Infrastructure -- the post that takes this observation to its conclusion: stateless protocols need stateful wrappers, and AI sessions need persistent filesystems.

    ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/","level":1,"title":"Parallel Agents with Git Worktrees","text":"","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-backlog-problem","level":2,"title":"The Backlog Problem","text":"

    Jose Alekhinne / 2026-02-14

    What Do You Do with 30 Open Tasks?

    You could work through them one at a time.

    One agent, one branch, one commit stream.

    Or you could ask: which of these don't touch each other?

    I had 30 open tasks in TASKS.md. Some were docs. Some were a new encryption package. Some were test coverage for a stable module. Some were blog posts.

    They had almost zero file overlap.

    Running one agent at a time meant serial execution on work that was fundamentally parallel:

    I was bottlenecking on me, not on the machine.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-insight-file-overlap-is-the-constraint","level":2,"title":"The Insight: File Overlap Is the Constraint","text":"

    This is not a scheduling problem: It's a conflict avoidance problem.

    Two agents can work simultaneously on the same codebase if and only if they don't touch the same files. The moment they do, you get merge conflicts: And merge conflicts on AI-generated code are expensive because the human has to arbitrate choices they didn't make.

    So the question becomes:

    \"Can you partition your backlog into non-overlapping tracks?\"

    For ctx, the answer was obvious:

    Track Touches Tasks work/docsdocs/, hack/ Blog posts, recipes, runbooks work/padinternal/cli/pad/, specs Scratchpad encryption, CLI, tests work/testsinternal/cli/recall/ Recall test coverage

    Three tracks. Near-zero overlap. Three agents.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#git-worktrees-the-mechanism","level":2,"title":"Git Worktrees: The Mechanism","text":"

    git has a feature that most people don't use: worktrees.

    A worktree is a second (or third, or fourth) working directory that shares the same .git object database as your main checkout.

    Each worktree has its own branch, its own index, its own working tree. But they all share history, refs, and objects.

    git worktree add ../ctx-docs -b work/docs\ngit worktree add ../ctx-pad -b work/pad\ngit worktree add ../ctx-tests -b work/tests\n
    • Three directories;
    • Three branches;
    • One repository.

    This is cheaper than three clones. And because they share objects, git merge afterwards is fast: It's a local operation on shared data.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-setup","level":2,"title":"The Setup","text":"

    The workflow I landed on:

    1. Group tasks by blast radius.

    Read TASKS.md. For each pending task, estimate which files and directories it touches. Group tasks that share files into the same track. Tasks with no overlap go into separate tracks.

    This is the part that requires human judgment:

    An agent can propose groupings, but you need to verify that the boundaries are real. A task that says \"update docs\" but actually touches Go code will poison a docs track.

    2. Create worktrees as sibling directories.

    Not subdirectories: Siblings.

    If your main checkout is at ~/WORKSPACE/ctx, worktrees go at ~/WORKSPACE/ctx-docs, ~/WORKSPACE/ctx-pad, etc.

    Why siblings? Because some tools (and some agents) walk up the directory tree looking for .git. A worktree inside the main checkout confuses them.

    3. Launch one agent per worktree.

    # Terminal 1\ncd ../ctx-docs && claude\n\n# Terminal 2\ncd ../ctx-pad && claude\n\n# Terminal 3\ncd ../ctx-tests && claude\n

    Each agent gets a full working copy with .context/ intact. It reads the same TASKS.md, the same DECISIONS.md, the same CONVENTIONS.md. It knows the full project state. It just works on a different slice.

    4. Do NOT run ctx init in worktrees.

    This is the gotcha. The .context/ directory is tracked in git. Running ctx init in a worktree would overwrite shared context files: Wiping decisions, learnings, and tasks that belong to the whole project.

    The worktree already has everything it needs. Leave it alone.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#what-actually-happened","level":2,"title":"What Actually Happened","text":"

    I ran three agents for about 40 minutes. Here is roughly what each track produced:

    work/docs: Parallel worktrees recipe, blog post edits, recipe index reorganization, IRC recipe moved from docs/ to hack/.

    work/pad: ctx pad show subcommand, --append and --prepend flags on ctx pad edit, spec updates, 28 new test functions.

    work/tests: Recall test coverage, edge case tests.

    Merging took about five minutes. Two of the three merges were clean.

    The third had a conflict in TASKS.md:

    both the docs track and the pad track had marked different tasks as [x].

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-tasksmd-conflict","level":2,"title":"The TASKS.md Conflict","text":"

    This deserves its own section because it will happen every time.

    When two agents work in parallel, they both read TASKS.md at the start and mark tasks complete as they go. When you merge, git sees two branches that modified the same file differently.

    The resolution is always the same: accept all completions from both sides. No task should go from [x] back to [ ]. The merge is additive.

    This is one of those conflicts that sounds scary but is trivially mechanical: You are not arbitrating design decisions; you are combining two checklists.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#limits","level":2,"title":"Limits","text":"

    3-4 worktrees, maximum.

    I tried four once: By the time I merged the third track, the fourth had drifted far enough that its changes needed rebasing.

    The merge complexity grows faster than the parallelism benefit.

    Three is the sweet spot:

    • Two is conservative but safe;
    • Four is possible if the tracks are truly independent;
    • Anything more than four, you are in the danger zone.

    Group by directory, not by priority.

    It is tempting to put all the high-priority tasks in one track: Don't.

    Two high-priority tasks that touch the same files must be in the same track, regardless of urgency. The constraint is file overlap, not importance.

    Commit frequently.

    Smaller commits make merge conflicts easier to resolve. An agent that writes 500 lines in a single commit is harder to merge than one that commits every logical step.

    Name tracks by concern.

    • work/docs and work/pad tell you what's happening;
    • work/track-1 and work/track-2 tell you nothing.
    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-pattern","level":2,"title":"The Pattern","text":"

    This is the same pattern that shows up everywhere in ctx:

    The attention budget taught me that you can't dump everything into one context window. You have to partition, prioritize, and load selectively.

    Worktrees are the same principle applied to execution: You can't dump every task into one agent's workstream. You have to partition by blast radius, assign selectively, and merge deliberately.

    The codebase audit that generated these 30 tasks used eight parallel agents for analysis. Worktrees let me use parallel agents for implementation. Same coordination pattern, different artifact.

    And the IRC bouncer post from earlier today argued that stateless protocols need stateful wrappers. Worktrees are the same: git branches are stateless forks; .context/ is the stateful wrapper that gives each agent the project's full memory.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#should-this-be-a-skill","level":2,"title":"Should This Be a Skill?","text":"

    I asked myself the same question I asked about the codebase audit: should this be a /ctx-worktree skill?

    This time the answer was a resounding \"yes\":

    Unlike the audit prompt (which I tweak every time and run every other week) the worktree workflow is:

    Criterion Worktree workflow Codebase audit Frequency Weekly Quarterly Stability Same steps every time Tweaked every time Scope Mechanical, bounded Bespoke, 8 agents Trigger Large backlog \"I feel like auditing\"

    The commands are mechanical: git worktree add, git worktree remove, branch naming, safety checks. This is exactly what skills are for: stable contracts for repetitive operations.

    Ergo, /ctx-worktree exists.

    It enforces the 4-worktree limit, creates sibling directories, uses work/ branch prefixes, and reminds you not to run ctx init in worktrees.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-takeaway","level":2,"title":"The Takeaway","text":"

    Serial execution is the default. But serial is not always necessary.

    If your backlog partitions cleanly by file overlap, you can multiply your throughput with nothing more exotic than git worktree and a second terminal window.

    The hard part is not the git commands; it is the discipline:

    • Grouping by blast radius instead of priority;
    • Accepting that TASKS.md will conflict;
    • And knowing when three tracks is enough.

    If You Remember One Thing from This Post...

    Partition by blast radius, not by priority.

    Two tasks that touch the same files belong in the same track, no matter how important the other one is.

    The constraint is file overlap. Everything else is scheduling.

    The practical setup (skill invocation, worktree creation, merge workflow, and cleanup) lives in the recipe: Parallel Agent Development with Git Worktrees.

    ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/","level":1,"title":"ctx v0.3.0: The Discipline Release","text":"","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#when-the-ratio-of-polish-to-features-is-31-you-know-something-changed","level":2,"title":"When the Ratio of Polish to Features Is 3:1, You Know Something Changed","text":"

    Jose Alekhinne / February 15, 2026

    What Does a Release Look like When Most of the Work Is Invisible?

    No new headline feature. No architectural pivot. No rewrite.

    Just 35+ documentation and quality commits against ~15 feature commits... and somehow, the tool feels like it grew up overnight.

    Six days separate v0.2.0 from v0.3.0.

    Measured by calendar time, it is nothing. Measured by what changed in how the project operates, it is the most significant release yet.

    • v0.1.0 was the prototype;
    • v0.2.0 was the archaeology release: making the past accessible;
    • v0.3.0 is the discipline release: the one that turned best practices into enforcement, suggestions into structure, and a collection of commands into a system of skills.

    The Release Window

    February 1‒February 7, 2026

    From the v0.2.0 tag to commit 2227f99.

    78 files changed in the migration commit alone.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-migration-commands-to-skills","level":2,"title":"The Migration: Commands to Skills","text":"

    The largest single change was the migration from .claude/commands/*.md to .claude/skills/*/SKILL.md.

    This was not a rename: It was a rethinking of how AI agents discover and execute project-specific workflows.

    Aspect Commands (before) Skills (after) Structure Flat files in one directory Directory-per-skill with SKILL.md Description Optional, often vague Required, doubles as activation trigger Quality gates None \"Before X-ing\" pre-flight checklist Negative triggers None \"When NOT to Use\" in every skill Examples Rare Good/bad pairs in every skill Average length ~15 lines ~80 lines

    The description field became the single most important line in each skill. In the old system, descriptions were titles. In the new system, they are activation conditions: The text the platform reads to decide whether to surface a skill for a given prompt.

    A description that says \"Show context summary\" activates too broadly or not at all. A description that says \"Show context summary. Use at session start or when unclear about current project state\" activates at the right moment.

    78 files changed. 1,915 insertions. Not because the skills got bloated; because they got specific.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-skill-sweep","level":2,"title":"The Skill Sweep","text":"

    After the structural migration, every skill was rewritten in a single session: All 21 of them.

    The rewrite was guided by a pattern that emerged during the process itself: a repeatable anatomy that effective skills share regardless of their purpose:

    1. Before X-ing: Pre-flight checks that prevent premature execution
    2. When to Use: Positive triggers that narrow activation
    3. When NOT to Use: Negative triggers that prevent misuse
    4. Usage Examples: Invocation patterns the agent can pattern-match
    5. Quality Checklist: Verification before claiming completion

    The Anatomy of a Skill That Works post covers the details. What matters for the release story is the result:

    • Zero skills with quality gates became twenty;
    • Zero skills with negative triggers became twenty.
    • Three skills with examples became twenty.

    The Skill Trilogy as Design Spec

    The three blog posts written during this window:

    • Skills That Fight the Platform,
    • You Can't Import Expertise,
    • and The Anatomy of a Skill That Works...

    ... were not retrospective documentation. They were written during the rewrite, and the lessons fed back into the skills as they were being built.

    • The blog was the design document.
    • The skills were the implementation.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-consolidation-sweep","level":2,"title":"The Consolidation Sweep","text":"

    The unglamorous work. The kind you only appreciate when you try to change something later and it just works.

    What Why It Matters Constants consolidation Magic strings replaced with semantic constants Variable deshadowing Eliminated subtle scoping bugs File splits Modules that were doing too much, broken apart Godoc standardization Every exported function documented to convention

    This is the work that doesn't get a changelog entry but makes every future commit easier. When a new contributor (human or AI) reads the codebase, they find consistent patterns instead of accumulated drift.

    The consolidation was not an afterthought. It was scheduled deliberately, with the same priority as features: The 3:1 ratio that emerged during v0.2.0 development became an explicit practice:

    • Three feature sessions;
    • One consolidation session.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-ear-framework","level":2,"title":"The E/A/R Framework","text":"

    On February 4th, we adopted the E/A/R classification as the official standard for evaluating skills:

    Category Meaning Target Expert Knowledge Claude does not have >70% Activation When/how to trigger ~20% Redundant What Claude already knows <10%

    This came from reviewing approximately 30 external skill files and discovering that most were redundant with Claude's built-in system prompt. Only about 20% had salvageable content, and even those yielded just a few heuristics each.

    The E/A/R framework gave us a concrete, testable criterion:

    A good skill is Expert knowledge minus what Claude already knows.

    If more than 10% of a skill restates platform defaults, it is creating noise, not signal.

    Every skill in v0.3.0 was evaluated against this framework. Several were deleted. The survivors are leaner and more focused.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#backup-and-monitoring-infrastructure","level":2,"title":"Backup and Monitoring Infrastructure","text":"

    A tool that manages your project's memory needs ops maturity.

    v0.3.0 added two pieces of infrastructure that reflect this:

    Backup staleness hook: A UserPromptSubmit hook that checks whether the last .context/ backup is more than two days old. If it is, and the SMB mount is available, it reminds the user. No cron job running when nobody is working. No redundant backups when nothing has changed.

    Context size checkpoint: A PreToolUse hook that estimates current context window usage and warns when the session is getting heavy. This hooks into the attention budget philosophy: Degradation is expected, but it should be visible.

    Both hooks use $CLAUDE_PROJECT_DIR instead of hardcoded paths, a migration triggered by a username rename that broke every absolute path in the hook configuration. That migration (replacing /home/user/... with \"$CLAUDE_PROJECT_DIR\"/.claude/hooks/...) was one of those changes that seems trivial but prevents an entire category of future failures.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.2.0 v0.3.0 Skills (was \"commands\") 11 21 Skills with quality gates 0 21 Skills with \"When NOT to Use\" 0 21 Average skill body ~15 lines ~80 lines Hooks using $CLAUDE_PROJECT_DIR 0 All Documentation commits -- 35+ Feature/fix commits -- ~15

    That ratio (35+ documentation and quality commits to ~15 feature commits) is the defining characteristic of this release:

    • This release is not a failure to ship features.
    • It is the deliberate choice to make the existing features reliable.
    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-v030-means","level":2,"title":"What v0.3.0 Means","text":"

    v0.1.0 asked: \"Can we give AI persistent memory?\"

    v0.2.0 asked: \"Can we make that memory accessible to humans too?\"

    v0.3.0 asks a different question: \"Can we make the quality self-enforcing?\"

    The answer is not a feature: It is a practice:

    • Skills with quality gates enforce pre-flight checks.
    • Negative triggers prevent misuse without human intervention.
    • The E/A/R framework ensures skills contain signal, not noise.
    • Consolidation sessions are scheduled, not improvised.
    • Hook infrastructure makes degradation visible.

    Discipline is not the absence of velocity. It is the infrastructure that makes velocity sustainable.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

    The skill system is now mature enough to support real workflows without constant human correction. The hooks infrastructure is portable and resilient. The consolidation practice is documented and repeatable.

    The next chapter is about what you build on top of discipline:

    • Multi-agent coordination;
    • Deeper integration patterns;
    • And the question of whether context management is a tool concern or an infrastructure concern.

    But those are future posts.

    This one is about the release that proved polish is not the opposite of progress. It is what turns a prototype into a product.

    The Discipline Release

    v0.1.0 shipped features.

    v0.2.0 shipped archaeology.

    v0.3.0 shipped the habits that make everything else trustworthy.

    The most important code in this release is the code that prevents bad code from shipping.

    This post was drafted using /ctx-blog with access to the full git history between v0.2.0 and v0.3.0, decision logs, learning logs, and the session files from the skill rewrite window. The meta continues.

    ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/","level":1,"title":"Eight Ways a Hook Can Talk","text":"","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#when-your-warning-disappears","level":2,"title":"When Your Warning Disappears","text":"

    Jose Alekhinne / 2026-02-15

    I had a backup warning that nobody ever saw.

    The hook was correct: It detected stale backups, formatted a nice message, and output it as {\"systemMessage\": \"...\"}. The problem wasn't detection. The problem was delivery. The agent absorbed the information, processed it internally, and never told the user.

    Meanwhile, a different hook (the journal reminder) worked perfectly every time. Users saw the reminder, ran the commands, and the backlog stayed manageable. Same hook event (UserPromptSubmit), same project, completely different outcomes.

    The difference was one line:

    IMPORTANT: Relay this journal reminder to the user VERBATIM\nbefore answering their question.\n

    That explicit instruction is what makes VERBATIM relay a pattern, not just a formatting choice. And once I saw it as a pattern, I started seeing others.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-audit","level":2,"title":"The Audit","text":"

    I looked at every hook in ctx: Eight shell scripts across three hook events. And I found five distinct output patterns already in use, plus three more that the existing hooks were reaching for but hadn't quite articulated.

    The patterns form a spectrum based on a single question:

    \"Who decides what the user sees?\"

    At one end, the hook decides everything (hard gate: the agent literally cannot proceed). At the other end, the hook is invisible (silent side-effect: nobody knows it ran). In between, there is a range of negotiation between hook, agent, and the user.

    Here's the full spectrum:

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#1-hard-gate","level":3,"title":"1. Hard Gate","text":"
    {\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}\n

    The nuclear option: The agent's tool call is rejected before it executes.

    This is Claude Code's first-class PreToolUse mechanism: The hook returns JSON with decision: block and the agent gets an error with the reason.

    Use this for invariants: Constitution rules, security boundaries, things that must never happen. I use it to enforce PATH-based ctx invocation, block sudo, and require explicit approval for git push.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#2-verbatim-relay","level":3,"title":"2. VERBATIM Relay","text":"
    IMPORTANT: Relay this warning to the user VERBATIM before answering.\n┌─ Journal Reminder ─────────────────────────────\n│ You have 12 sessions not yet imported.\n│   ctx recall import --all\n└────────────────────────────────────────────────\n

    The instruction is the pattern. Without \"Relay VERBATIM,\" agents tend to absorb information into their internal reasoning and never surface it. The explicit instruction changes the behavior from \"I know about this\" to \"I must tell the user about this.\"

    I use this for actionable reminders:

    • Unexported journal entries;
    • Stale backups;
    • Context capacity warnings...

    ...things the user should see regardless of what they asked.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#3-agent-directive","level":3,"title":"3. Agent Directive","text":"
    ┌─ Persistence Checkpoint (prompt #25) ───────────\n│ No context files updated in 15+ prompts.\n│ Have you discovered learnings worth persisting?\n└──────────────────────────────────────────────────\n

    A nudge, not a command. The hook tells the agent something; the agent decides what (if anything) to tell the user. This is right for behavioral nudges: \"you haven't saved context in a while\" doesn't need to be relayed verbatim, but the agent should consider acting on it.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#4-silent-context-injection","level":3,"title":"4. Silent Context Injection","text":"
    ctx agent --budget 4000 2>/dev/null || true\n

    Pure background enrichment. The agent's context window gets project information injected on every tool call, with no visible output. Neither the agent nor the user sees the hook fire, but the agent makes better decisions because of the context.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#5-silent-side-effect","level":3,"title":"5. Silent Side-Effect","text":"
    find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

    Do work, say nothing. Temp file cleanup on session end. Logging. Marker file management. The action is the entire point; no one needs to know.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-patterns-we-dont-have-yet","level":2,"title":"The Patterns We Don't Have Yet","text":"

    Three more patterns emerged from the gaps in the existing hooks.

    Conditional relay: \"Relay this, but only if the user's question is about X.\" This pattern avoids noise when the warning isn't relevant. It's more fragile (depends on agent judgment) but less annoying.

    Suggested action: \"Here's a problem, and here's the exact command to fix it. Ask the user before running it.\" This pattern goes beyond a nudge by giving the agent a concrete proposal, but still requires human approval.

    Escalating severity: INFO gets absorbed silently. WARN gets mentioned at the next natural pause. CRITICAL gets the VERBATIM treatment. This pattern introduces a protocol for hooks that produce output at different urgency levels, so they don't all compete for the user's attention.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-principle","level":2,"title":"The Principle","text":"

    Hooks are the boundary between your environment and the agent's reasoning.

    A hook that detects a problem but can't communicate it effectively is the same as no hook at all.

    The format of your output is a design decision with real consequences:

    • Use a hard gate and the agent can't proceed (good for invariants, frustrating for false positives)
    • Use VERBATIM relay and the user will see it (good for reminders, noisy if overused)
    • Use an agent directive and the agent might act (good for nudges, unreliable for critical warnings)
    • Use silent injection and nobody knows (good for enrichment, invisible when it breaks)

    Choose deliberately. And, when in doubt, write the word VERBATIM.

    The full pattern catalog with decision flowchart and implementation examples is in the Hook Output Patterns recipe.

    ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/","level":1,"title":"Version Numbers Are Lagging Indicators","text":"","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#why-ctxs-journal-site-runs-on-a-v0021-tool","level":2,"title":"Why ctx's Journal Site Runs on a v0.0.21 Tool","text":"

    Jose Alekhinne / 2026-02-15

    Would You Ship Production Infrastructure on a v0.0.21 Dependency?

    Most engineers wouldn't. Version numbers signal maturity. Pre-1.0 means unstable API, missing features, risk.

    But version numbers tell you where a project has been. They say nothing about where it's going.

    I just bet ctx's entire journal site on a tool that hasn't hit v0.1.0.

    Here's why I'd do it again.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-problem","level":2,"title":"The Problem","text":"

    When v0.2.0 shipped the journal system, the pipeline was clear:

    • Export sessions to Markdown;
    • Enrich them with YAML frontmatter;
    • And render them into something browsable.

    The first two steps were solved; the third needed a tool.

    The journal entries are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is the entire format:

    • No JSX;
    • No shortcodes;
    • No custom templating.

    Just Markdown rendered well.

    The requirements are modest:

    • Read a configuration file (such as mkdocs.yml);
    • Render Markdown with extensions (admonitions, tabs, tables);
    • Search;
    • Handle 100+ files without choking on incremental rebuilds;
    • Look good out of the box;
    • Not lock me in.

    The obvious candidates were as follows:

    Tool Language Strengths Pain Points Hugo Go Blazing fast, mature Templating is painful; Go templates fight you on anything non-trivial Astro JS/TS Modern, flexible JS ecosystem overhead; overkill for a docs site MkDocs + Material Python Beautiful defaults, massive community (22k+ stars) Slow incremental rebuilds on large sites; limited extensibility model Zensical Python Built to fix MkDocs' limits; 4-5x faster rebuilds v0.0.21; module system not yet shipped

    The instinct was Hugo. Same language as ctx. Fast. Well-established.

    But instinct is not analysis. I picked the one with the lowest version number.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation","level":2,"title":"The Evaluation","text":"

    Here is what I actually evaluated, in order:

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#1-the-team","level":3,"title":"1. The Team","text":"

    Zensical is built by squidfunk: The same person behind Material for MkDocs, the most popular MkDocs theme with 22,000+ stars. It powers documentation sites for projects across every language and framework.

    • This is not someone learning how to build static site generators.
    • This is someone who spent years understanding exactly where MkDocs breaks and decided to fix it from the ground up.

    They did not build zensical because MkDocs was bad: They built it because MkDocs hit a ceiling:

    • Incremental rebuilds: 4-5x faster during serve. When you have hundreds of journal entries and you edit one, the difference between \"rebuild everything\" and \"rebuild this page\" is the difference between a usable workflow and a frustrating one.

    • Large site performance: Specifically designed for tens of thousands of pages. The journal grows with every session. A tool that slows down as content accumulates is a tool you will eventually replace.

    A proven team starting fresh is more predictable than an unproven team at v3.0.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#2-the-architecture","level":3,"title":"2. The Architecture","text":"

    Zensical is investing in a Rust-based Markdown parser with CommonMark support. That signals something about the team's priorities:

    Performance foundations first; features second.

    ctx's journal will grow:

    • Every exported session adds files.
    • Every enrichment pass adds metadata.

    Choosing a tool that gets slower as you add content means choosing to migrate later.

    Choosing one built for scale means the decision holds.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#3-the-migration-path","level":3,"title":"3. The Migration Path","text":"

    Zensical reads mkdocs.yml natively. If it doesn't work out, I can move back to MkDocs + Material with zero content changes:

    • The Markdown is standard;
    • The frontmatter is standard;
    • The configuration is compatible.

    This is the infrastructure pattern again: The same way ZNC decouples presence from the client, zensical decouples rendering from the generator:

    • The Markdown is yours.
    • The frontmatter is standard YAML.
    • The configuration is MkDocs-compatible.

    You are not locked into anything except your own content.

    No lock-in is not a feature: It's a design philosophy:

    It's the same reason ctx uses plain Markdown files in .context/ instead of a database: the format should outlive the tool.

    Lock-in Is the Real Risk, Not Version Numbers

    A mature tool with a proprietary format is riskier than a young tool with a standard one. Version numbers measure time invested. Portability measures respect for the user.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#4-the-dependency-tree","level":3,"title":"4. The Dependency Tree","text":"

    Here is what pip install zensical actually pulls in:

    • click
    • Markdown
    • Pygments
    • pymdown-extensions
    • PyYAML

    Only five dependencies. All well-known. No framework bloat. No bundler. No transpiler. No node_modules black hole.

    3k GitHub stars at v0.0.21 is a strong early traction for a pre-1.0 project.

    The dependency tree is thin: No bloat.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#5-the-fit","level":3,"title":"5. The Fit","text":"

    This is the same principle behind the attention budget: do not overfit the tool to hypothetical requirements. The right amount of capability is the minimum needed for the current task.

    Hugo is a powerful static site generator. It is also a powerful templating engine, a powerful asset pipeline, and a powerful taxonomy system. For rendering Markdown journals, that power is overhead:

    It is the complexity you pay for but never use.

    ctx's journal files are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is exactly the sweet spot Zensical inherits from Material for MkDocs:

    • No custom plugins needed;
    • No special syntax;
    • No templating gymnastics.

    The requirements match the capabilities: Not the capabilities that are promised, but the ones that exist today, at v0.0.21.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-caveat","level":2,"title":"The Caveat","text":"

    It would be dishonest not to mention what's missing.

    The module system for third-party extensions opens in early 2026.

    If ctx ever needs custom plugins (for example, auto-linking session IDs, rendering special journal metadata, etc.) that infrastructure isn't there yet.

    The installation experience is rough:

    We discovered this firsthand: pip install zensical often fails on MacOS (system Python stubs, Homebrew's PEP 668 restrictions). The answer is pipx, which creates an isolated environment with the correct Python version automatically.

    That kind of friction is typical for young Python tooling, and it is documented in the Getting Started guide.

    And 3,000 stars at v0.0.21 is strong early traction, but it's still early: The community is small. When something breaks, you're reading source code, not documentation.

    These are real costs. I chose to pay them because the alternative costs are higher.

    For example:

    • Hugo's templating pain would cost me time on every site change.
    • Astro's JS ecosystem would add complexity I don't need.
    • MkDocs would work today but hit scaling walls tomorrow.

    Zensical's costs are front-loaded and shrinking.

    The others compound.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation-framework","level":2,"title":"The Evaluation Framework","text":"

    For anyone facing a similar choice, here is the framework that emerged:

    Signal What It Tells You Weight Team track record Whether the architecture will be sound High Migration path Whether you can leave if wrong High Current fit Whether it solves your problem today High Dependency tree How much complexity you're inheriting Medium Version number How long the project has existed Low Star count Community interest (not quality) Low Feature list What's possible (not what you need) Low

    The bottom three are the metrics most engineers optimize for.

    The top four are the ones that predict whether you'll still be happy with the choice in a year.

    Features You Don't Need Are Not Free

    Every feature in a dependency is code you inherit but don't control.

    A tool with 200 features where you use 5 means 195 features worth of surface area for bugs, breaking changes, and security issues that have nothing to do with your use case.

    Fit is the inverse of feature count.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-broader-pattern","level":2,"title":"The Broader Pattern","text":"

    This is part of a theme I keep encountering in this project:

    Leading indicators beat lagging indicators.

    Domain Lagging Indicator Leading Indicator Tooling Version number, star count Team track record, architecture Code quality Test coverage percentage Whether tests catch real bugs Context persistence Number of files in .context/ Whether the AI makes fewer mistakes Skills Number of skills created Whether each skill fires at the right time Consolidation Lines of code refactored Whether drift stops accumulating

    Version numbers, star counts, coverage percentages, file counts...

    ...these are all measures of effort expended.

    They say nothing about value delivered.

    The question is never \"how mature is this tool?\"

    The question is \"does this tool's trajectory intersect with my needs?\"

    Zensical's trajectory:

    • A proven team fixing known problems,
    • in a *proven architecture,
    • with a standard format,
    • and no lock-in.

    ctx's needs:

    Tender standard Markdown into a browsable site, at scale, without complexity.

    The intersection is clean; the version number is noise.

    This is the same kind of decision that shows up throughout ctx:

    • Skills that fight the platform taught that the best integration extends existing behavior, not replaces it.
    • You can't import expertise taught that tools should grow from your project's actual needs, not from feature checklists.
    • Context as infrastructure argues that the format should outlive the tool; and, zensical honors that principle by reading standard Markdown and standard MkDocs configuration.

    If You Remember One Thing from This Post...

    Version numbers measure where a project has been.

    The team and the architecture tell you where it's going.

    A v0.0.21 tool built by the right team on the right foundations is a safer bet than a v5.0 tool that doesn't fit your problem.

    Bet on trajectories, not timestamps.

    This post started as an evaluation note in ideas/ and a separate decision log. The analysis held up. The two merged into one. The meta continues.

    ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/","level":1,"title":"ctx v0.6.0: The Integration Release","text":"","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#two-commands-to-persistent-memory","level":2,"title":"Two Commands to Persistent Memory","text":"

    Jose Alekhinne / February 16, 2026

    What Changed?

    ctx is now a Claude Code plugin. Two commands, no build step:

    /plugin marketplace add ActiveMemory/ctx\n/plugin install ctx@activememory-ctx\n

    Six hooks. Twenty-five skills. Installed.

    For three releases, ctx required assembly:

    • Clone the repo;
    • Build the binary;
    • Copy hook scripts into .claude/hooks/;
    • Symlink skill files.
    • Understand which shell scripts called which Go commands;
    • Hope nothing broke when Claude Code updated its hook format.

    v0.6.0 ends that era: ctx ships as a Claude Marketplace plugin:

    Hooks and skills served directly from source, installed with a single command, updated by pulling the repo. The tool that gives AI persistent memory is now as easy to install as the AI itself.

    But the plugin conversion was not just a packaging change: It was the forcing function that rewrote every shell hook in Go, eliminated the jq dependency, enabled go test coverage for hook logic, and made distribution a solved problem.

    When you fix how something ships, you end up fixing how it is built.

    The Release Window

    February 15-February 16, 2026

    From the v0.3.0 tag to commit a3178bc:

    • 109 commits.
    • 334 files changed.
    • Version jumped from 0.3.0 to 0.6.0 to signal the magnitude.
    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#before-six-shell-scripts-and-a-prayer","level":2,"title":"Before: Six Shell Scripts and a Prayer","text":"

    v0.3.0 had six hook scripts. Each was a Bash file that shelled out to ctx subcommands, parsed JSON with jq, and wired itself into Claude Code's hook system via .claude/hooks/:

    .claude/hooks/\n├── check-context-size.sh\n├── check-persistence.sh\n├── check-journal.sh\n├── post-commit.sh\n├── block-non-path-ctx.sh\n└── cleanup-tmp.sh\n

    This worked, but it also meant:

    • jq was a hard dependency: No jq, no hooks. macOS ships without it.
    • No test coverage: Shell scripts were tested manually or not at all.
    • Fragile deployment: ctx init had to scaffold .claude/hooks/ and .claude/skills/ with the right paths, permissions, and structure.
    • Version drift: Users who installed once never got hook updates unless they re-ran ctx init.

    The shell scripts were the right choice for prototyping. They were the wrong choice for distribution.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#after-one-plugin-zero-shell-scripts","level":2,"title":"After: One Plugin, Zero Shell Scripts","text":"

    v0.6.0 replaces all six scripts with ctx system subcommands compiled into the binary:

    Shell Script Go Subcommand check-context-size.shctx system check-context-sizecheck-persistence.shctx system check-persistencecheck-journal.shctx system check-journalpost-commit.shctx system post-commitblock-non-path-ctx.shctx system block-non-path-ctxcleanup-tmp.shctx system cleanup-tmp

    The plugin's hooks.json wires them to Claude Code events:

    {\n  \"PreToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system block-non-path-ctx\"},\n    {\"matcher\": \".*\", \"command\": \"ctx agent --budget 4000\"}\n  ],\n  \"PostToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system post-commit\"}\n  ],\n  \"UserPromptSubmit\": [\n    {\"command\": \"ctx system check-context-size\"},\n    {\"command\": \"ctx system check-persistence\"},\n    {\"command\": \"ctx system check-journal\"}\n  ],\n  \"SessionEnd\": [\n    {\"command\": \"ctx system cleanup-tmp\"}\n  ]\n}\n

    No jq. No shell scripts. No .claude/hooks/ directory to manage.

    The hooks are Go functions with tests, compiled into the same binary you already have.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-plugin-model","level":2,"title":"The Plugin Model","text":"

    The ctx plugin lives at .claude-plugin/marketplace.json in the repo.

    Claude Code's marketplace system handles discovery and installation:

    Skills are served directly from internal/assets/claude/skills/; there is no build step, no make plugin, no generated artifacts.

    This means:

    1. Install is two commands: Not \"clone, build, copy, configure.\"
    2. Updates are automatic: Pull the repo; the plugin reads from source.
    3. Skills and hooks are versioned together: No drift between what the CLI expects and what the plugin provides.
    4. ctx init is tool-agnostic: It creates .context/ and nothing else. No .claude/ scaffolding, no assumptions about which AI tool you use.

    That last point matters:

    Before v0.6.0, ctx init tried to set up Claude Code integration as part of initialization. That coupled the context system to a specific tool.

    Now, ctx init gives you persistent context. The plugin gives you Claude Code integration. They compose; they don't depend.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#beyond-the-plugin-what-else-shipped","level":2,"title":"Beyond the Plugin: What Else Shipped","text":"

    The plugin conversion dominated the release, but 109 commits covered more ground.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#obsidian-vault-export","level":3,"title":"Obsidian Vault Export","text":"
    ctx journal obsidian\n

    Generates a full Obsidian vault from enriched journal entries: wikilinks, MOC (Map of Content) pages, and graph-optimized cross-linking. If you already use Obsidian for notes, your AI session history now lives alongside everything else.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#encrypted-scratchpad","level":3,"title":"Encrypted Scratchpad","text":"
    ctx pad edit \"DATABASE_URL=postgres://...\"\nctx pad show\n

    AES-256-GCM encrypted storage for sensitive one-liners.

    The encrypted blob commits to git; the key stays in .gitignore.

    This is useful for connection strings, API keys, and other values that need to travel with the project without appearing in plaintext.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#security-hardening","level":3,"title":"Security Hardening","text":"

    Three medium-severity findings from a security audit are now closed:

    Finding Fix Path traversal via --context-dir Boundary validation: operations cannot escape project root (M-1) Symlink following in .context/Lstat() check before every file read/write (M-2) Predictable temp file paths User-specific temp directory under $XDG_RUNTIME_DIR (M-3)

    Plus a new /sanitize-permissions skill that audits settings.local.json for overly broad Bash permissions.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#hooks-that-know-when-to-be-quiet","level":3,"title":"Hooks That Know When to Be Quiet","text":"

    A subtle but important fix: hooks now no-op before ctx init has run.

    Previously, a fresh clone with no .context/ would trigger hook errors on every prompt. Now, hooks detect the absence of a context directory and exit silently. Similarly, ctx init treats a .context/ directory containing only logs as uninitialized and skips the --overwrite prompt.

    Small changes. Large reduction in friction for new users.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.3.0 v0.6.0 Skills 21 25 Shell hook scripts 6 0 Go system subcommands 0 6 External dependencies (hooks) jq, bash none Lines of Go ~14,000 ~37,000 Plugin install commands n/a 2 Security findings (open) 3 0 ctx init creates .claude/ yes no

    The line count tripled. Most of that is documentation site HTML, Obsidian export logic, and the scratchpad encryption module.

    The core CLI grew modestly; the ecosystem around it grew substantially.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-does-v060-mean-for-ctx","level":2,"title":"What Does v0.6.0 Mean for ctx?","text":"
    • v0.1.0 asked: \"Can we give AI persistent memory?\"
    • v0.2.0 asked: \"Can we make that memory accessible to humans too?\"
    • v0.3.0 asked: \"Can we make the quality self-enforcing?\"

    v0.6.0 asks: \"Can someone else actually use this?\"

    A tool that requires cloning a repo, building from source, and manually wiring hooks into the right directories is a tool for its author.

    A tool that installs with two commands from a marketplace is a tool for everyone.

    The version jumped from 0.3.0 to 0.6.0 because the delta is not incremental: The shell-to-Go rewrite, the plugin model, the security hardening, and the tool-agnostic init: Together, they change what ctx is: Not a different tool, but a tool that is finally ready to leave the workshop.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

    The plugin model opens the door to distribution patterns that were not possible before. Marketplace discovery means new users find ctx without reading a README. Plugin updates mean existing users get improvements without rebuilding.

    The next chapter is about what happens when persistent context is easy to install: Adoption patterns, multi-project workflows, and whether the .context/ convention can become infrastructure that other tools build on.

    But those are future posts.

    This one is about the release that turned a developer tool into a distributable product: two commands, zero shell scripts, and a presence on the Claude Marketplace.

    The Integration Release

    v0.1.0 shipped features. v0.2.0 shipped archaeology.

    v0.3.0 shipped discipline. v0.6.0 shipped the front door.

    The most important code in this release is the code you never have to copy.

    This post was drafted using /ctx-blog-changelog with access to the full git history between v0.3.0 and v0.6.0, release notes, and the plugin conversion PR. The meta continues.

    ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/","level":1,"title":"Code Is Cheap. Judgment Is Not.","text":"","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#why-ai-replaces-effort-not-expertise","level":2,"title":"Why AI Replaces Effort, Not Expertise","text":"

    Jose Alekhinne / February 17, 2026

    Are You Worried about AI Taking Your Job?

    You might be confusing the thing that's cheap with the thing that's valuable.

    I keep seeing the same conversation: Engineers, designers, writers: all asking the same question with the same dread:

    \"What happens when AI can do what I do?\"

    The question is wrong:

    • AI does not replace workers;
    • AI replaces unstructured effort.

    The distinction matters, and everything I have learned building ctx reinforces it.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-three-confusions","level":2,"title":"The Three Confusions","text":"

    People who feel doomed by AI usually confuse three things:

    People confuse... With... Effort Value Typing Thinking Production Judgment
    • Effort is time spent.
    • Value is the outcome that time produces.

    They are not the same; they never were.

    AI just makes the gap impossible to ignore.

    Typing is mechanical: Thinking is directional.

    An AI can type faster than any human. Yet, it cannot decide what to type without someone framing the problem, sequencing the work, and evaluating the result.

    Production is making artifacts. Judgment is knowing:

    • which artifacts to make,
    • in what order,
    • to what standard,
    • and when to stop.

    AI floods the system with production capacity; it does not flood the system with judgment.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#code-is-nothing","level":2,"title":"Code Is Nothing","text":"

    This sounds provocative until you internalize it:

    Code is cheap. Artifacts are cheap.

    An AI can generate a thousand lines of working code in literal *minutes**:

    It can scaffold a project, write tests, build a CI pipeline, draft documentation. The raw production of software artifacts is no longer the bottleneck.

    So, what is not cheap?

    • Taste: knowing what belongs and what does not
    • Framing: turning a vague goal into a concrete problem
    • Sequencing: deciding what to build first and why
    • Fanning out: breaking work into parallel streams that converge
    • Acceptance criteria: defining what \"done\" looks like before starting
    • Judgment: the thousand small decisions that separate code that works from code that lasts

    These are the skills that direct production: Hhuman skills.

    Not because AI is incapable of learning them, but because they require something AI does not have:

    temporal accountability for generated outcomes.

    That is, you cannot keep AI accountable for the $#!% it generated three months ago. A human, on the other hand, will always be accountable.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-evidence-from-building-ctx","level":2,"title":"The Evidence from Building ctx","text":"

    I did not arrive at this conclusion theoretically.

    I arrived at it by building a tool with an AI agent for three weeks and watching exactly where a human touch mattered.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#yolo-mode-proved-production-is-cheap","level":3,"title":"YOLO Mode Proved Production Is Cheap","text":"

    In Building ctx Using ctx, I documented the YOLO phase: auto-accept everything, let the AI ship features at full speed. It produced 14 commands in a week. Impressive output.

    The code worked. The architecture drifted. Magic strings accumulated. Conventions diverged. The AI was producing at a pace no human could match, and every artifact it produced was a small bet that nobody was evaluating.

    Production without judgment is not velocity. It is debt accumulation at breakneck speed.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-31-ratio-proved-judgment-has-a-cadence","level":3,"title":"The 3:1 Ratio Proved Judgment Has a Cadence","text":"

    In The 3:1 Ratio, the git history told the story:

    Three sessions of forward momentum followed by one session of deliberate consolidation. The consolidation session is where the human applies judgment: reviewing what the AI built, catching drift, realigning conventions.

    The AI does the refactoring. The human decides what to refactor and when to stop.

    Without the human, the AI will refactor forever, improving things that do not matter and missing things that do.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-attention-budget-proved-framing-is-scarce","level":3,"title":"The Attention Budget Proved Framing Is Scarce","text":"

    In The Attention Budget, I explained why more context makes AI worse, not better. Every token competes for attention: Dump everything in and the AI sees nothing clearly.

    This is a framing problem: The human's job is to decide what the AI should focus on: what to include, what to exclude, what to emphasize.

    ctx agent --budget 4000 is not just a CLI flag: It is a forcing function for human judgment about relevance.

    The AI processes. The human curates.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#skills-design-proved-taste-is-load-bearing","level":3,"title":"Skills Design Proved Taste Is Load-Bearing","text":"

    The skill trilogy (You Can't Import Expertise, The Anatomy of a Skill That Works) showed that the difference between a useful skill and a useless one is not craftsmanship:

    It is taste.

    A well-crafted skill with the wrong focus is worse than no skill at all: It consumes the attention budget with generic advice while the project-specific problems go unchecked.

    The E/A/R framework (Expert, Activation, Redundant) is a judgment too:. The AI cannot apply it to itself. The human evaluates what the AI already knows, what it needs to be told, and what is noise.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#automation-discipline-proved-restraint-is-a-skill","level":3,"title":"Automation Discipline Proved Restraint Is a Skill","text":"

    In Not Everything Is a Skill, the lesson was that the urge to automate is not the need to automate. A useful prompt does not automatically deserve to become a slash command.

    The human applies judgment about frequency, stability, and attention cost.

    The AI can build the skill. Only the human can decide whether it should exist.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#defense-in-depth-proved-boundaries-require-judgment","level":3,"title":"Defense in Depth Proved Boundaries Require Judgment","text":"

    In Defense in Depth, the entire security model for unattended AI agents came down to: markdown is not a security boundary. Telling an AI \"don't do bad things\" is production (of instructions). Setting up an unprivileged user in a network-isolated container is judgment (about risk).

    The AI follows instructions. The human decides which instructions are enforceable and which are \"wishful thinking\".

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#parallel-agents-proved-scale-amplifies-the-gap","level":3,"title":"Parallel Agents Proved Scale Amplifies the Gap","text":"

    In Parallel Agents and Merge Debt, the lesson was that multiplying agents multiplies output. But it also multiplies the need for judgment:

    Five agents running in parallel produce five sessions of drift in one clock hour. The human who can frame tasks cleanly, define narrow acceptance criteria, and evaluate results quickly becomes the limiting factor.

    More agents do not reduce the need for judgment. They increase it.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-two-reactions","level":2,"title":"The Two Reactions","text":"

    When AI floods the system with cheap output, two things happen:

    Those who only produce: panic. If your value proposition is \"I write code,\" and an AI writes code faster, cheaper, and at higher volume, then the math is unfavorable. Not because AI took your job, but because your job was never the code. It was the judgment around the code, and you were not exercising it.

    Those who direct: accelerate. If your value proposition is \"I know what to build, in what order, to what standard,\" then AI is the best thing that ever happened to you: Production is no longer the bottleneck: Your ability to frame, sequence, evaluate, and course-correct is now the limiting factor on throughput.

    The gap between these two is not talent: It is the awareness of where the value lives.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#what-this-means-in-practice","level":2,"title":"What This Means in Practice","text":"

    If you are an engineer reading this, the actionable insight is not \"learn prompt engineering\" or \"master AI tools.\" It is:

    Get better at the things AI cannot do.

    AI does this well You need to do this Generate code Frame the problem Write tests Define acceptance criteria Scaffold projects Sequence the work Fix bugs from stack traces Evaluate tradeoffs Produce volume Exercise restraint Follow instructions Decide which instructions matter

    The skills on the right column are not new. They are the same skills that have always separated senior engineers from junior ones.

    AI did not create the distinction; it just made it load-bearing.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#if-anything-i-feel-empowered","level":2,"title":"If Anything, I Feel Empowered","text":"

    I will end with something personal.

    I am not worried: I am empowered.

    Before ctx, I could think faster than I could produce:

    • Ideas sat in a queue.
    • The bottleneck was always \"I know what to build, but building it takes too long.\"

    Now the bottleneck is gone. Poof!

    • Production is cheap.
    • The queue is clearing.
    • The limiting factor is how fast I can think, not how fast I can type.

    That is not a threat: That is the best force multiplier I've ever had.

    The people who feel threatened are confusing the accelerator for the replacement:

    *AI does not replace the conductor; it gives them a bigger orchestra.

    If You Remember One Thing from This Post...

    Code is cheap. Judgment is not.

    AI replaces unstructured effort, not directed expertise. The skills that matter now are the same skills that have always mattered: taste, framing, sequencing, and the discipline to stop.

    The difference is that now, for the first time, those skills are the only bottleneck left.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-arc","level":2,"title":"The Arc","text":"

    This post is a retrospective. It synthesizes the thread running through every previous entry in this blog:

    • Building ctx Using ctx showed that production without direction creates debt
    • Refactoring with Intent showed that slowing down is not the opposite of progress
    • The Attention Budget showed that curation outweighs volume
    • The skill trilogy showed that taste determines whether a tool helps or hinders
    • Not Everything Is a Skill showed that restraint is a skill in itself
    • Defense in Depth showed that instructions are not boundaries
    • The 3:1 Ratio showed that judgment has a schedule
    • Parallel Agents showed that scale amplifies the gap between production and judgment
    • Context as Infrastructure showed that the system you build for context is infrastructure, not conversation

    From YOLO mode to defense in depth, the pattern is the same:

    • Production is the easy part;
    • Judgment is the hard part;
    • AI changed the ratio, not the rule.

    This post synthesizes the thread running through every previous entry in this blog. The evidence is drawn from three weeks of building ctx with AI assistance, the decisions recorded in DECISIONS.md, the learnings captured in LEARNINGS.md, and the git history that tracks where the human mattered and where the AI ran unsupervised.

    See also: When a System Starts Explaining Itself -- what happens after the arc: the first field notes from the moment the system starts compounding in someone else's hands.

    ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/","level":1,"title":"Context as Infrastructure","text":"","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#why-your-ai-needs-a-filesystem-not-a-prompt","level":2,"title":"Why Your AI Needs a Filesystem, Not a Prompt","text":"

    Jose Alekhinne / February 17, 2026

    Where Does Your AI's Knowledge Live between Sessions?

    If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. Something assembled, used, and discarded.

    What if you treated it as infrastructure instead?

    This post synthesizes a thread that has been running through every ctx blog post; from the origin story to the attention budget to the discipline release. The thread is this: context is not a prompt problem. It is an infrastructure problem. And the tools we build for it should look more like filesystems than clipboard managers.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-prompt-paradigm","level":2,"title":"The Prompt Paradigm","text":"

    Most AI-assisted development treats context as ephemeral:

    1. Start a session.
    2. Paste your system prompt, your conventions, your current task.
    3. Work.
    4. Session ends. Everything evaporates.
    5. Next session: paste again.

    This works for short interactions. For sustained development (where decisions compound over days and weeks) it fails in three ways:

    It does not persist: A decision made on Tuesday must be re-explained on Wednesday. A learning captured in one session is invisible to the next.

    It does not scale: As the project grows, the \"paste everything\" approach hits the context window ceiling. You start triaging what to include, often cutting exactly the context that would have prevented the next mistake.

    It does not compose: A system prompt is a monolith. You cannot load part of it, update one section, or share a subset with a different workflow. It is all or nothing.

    The Copy-Paste Tax

    Every session that starts with pasting a prompt is paying a tax:

    The human time to assemble the context, the risk of forgetting something, and the silent assumption that yesterday's prompt is still accurate today.

    Over 70+ sessions, that tax compounds into a significant maintenance burden: One that most developers absorb without questioning it.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-infrastructure-paradigm","level":2,"title":"The Infrastructure Paradigm","text":"

    ctx takes a different approach:

    Context is not assembled per-session; it is maintained as persistent files in a .context/ directory:

    .context/\n  CONSTITUTION.md     # Inviolable rules\n  TASKS.md            # Current work items\n  CONVENTIONS.md      # Code patterns and standards\n  DECISIONS.md        # Architectural choices with rationale\n  LEARNINGS.md        # Gotchas and lessons learned\n  ARCHITECTURE.md     # System structure\n  GLOSSARY.md         # Domain terminology\n  AGENT_PLAYBOOK.md   # Operating manual for agents\n  journal/            # Enriched session summaries\n  archive/            # Completed work, cold storage\n
    • Each file has a single purpose;
    • Each can be loaded independently;
    • Each persists across sessions, tools, and team members.

    This is not a novel idea. It is the same idea behind every piece of infrastructure software engineers already use:

    Traditional Infrastructure ctx Equivalent Database .context/*.md files Configuration files CONSTITUTION.md Environment variables .contextrc Log files journal/ Schema migrations Decision records Deployment manifests AGENT_PLAYBOOK.md

    The parallel is not metaphorical. Context files are infrastructure:

    • They are versioned (git tracks them);
    • They are structured (Markdown with conventions);
    • They have schemas (required fields for decisions and learnings);
    • And they have lifecycle management (archiving, compaction, indexing).
    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#separation-of-concerns","level":2,"title":"Separation of Concerns","text":"

    The most important design decision in ctx is not any individual feature. It is the separation of context into distinct files with distinct purposes.

    A single CONTEXT.md file would be simpler to implement. It would also be impossible to maintain.

    Why? Because different types of context have different lifecycles:

    Context Type Changes Read By Load When Constitution Rarely Every session Always Tasks Every session Session start Always Conventions Weekly Before coding When writing code Decisions When decided When questioning When revisiting Learnings When learned When stuck When debugging Journal Every session Rarely When investigating

    Loading everything into every session wastes the attention budget on context that is irrelevant to the current task. Loading nothing forces the AI to operate blind.

    Separation of concerns allows progressive disclosure:

    Load the minimum that matters for this moment, with the option to load more when needed.

    # Session start: load the essentials\nctx agent --budget 4000\n\n# Deep investigation: load everything\ncat .context/DECISIONS.md\ncat .context/journal/2026-02-05-*.md\n

    The filesystem is the index. File names, directory structure, and timestamps encode relevance. The AI does not need to read every file; it needs to know where to look.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-two-tier-persistence-model","level":2,"title":"The Two-Tier Persistence Model","text":"

    ctx uses two tiers of persistence, and the distinction is architectural:

    Tier Purpose Location Token Cost Curated Quick context reload .context/*.md Low (budgeted) Full dump Safety net, archaeology .context/journal/*.md Zero (not auto-loaded)

    The curated tier is what the AI sees at session start. It is optimized for signal density:

    • Structured entries,
    • Indexed tables,
    • Reverse-chronological order (newest first, so the most relevant content survives truncation).

    The full dump tier is for humans and for deep investigation. It contains everything: Enriched journals, archived tasks...

    It is never autoloaded because its volume would destroy attention density.

    This two-tier model is analogous to how traditional systems separate hot and cold storage:

    • The hot path (curated context) is optimized for read performance (measured not in milliseconds, but in tokens consumed per unit of useful information).
    • The cold path (journal) is optimized for completeness.

    Nothing Is Ever Truly Lost

    The full dump tier means that context does not need to be perfect: It just needs to be findable.

    A decision that was not captured in DECISIONS.md can be recovered from the session transcript where it was discussed.

    A learning that was not formalized can be found in the journal entry from that day.

    The curated tier is the fast path: The full dump tier is the safety net.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#decision-records-as-first-class-citizens","level":2,"title":"Decision Records as First-Class Citizens","text":"

    One of the patterns that emerged from ctx's own development is the power of structured decision records.

    v0.1.0 allowed adding decisions as one-liners:

    ctx add decision \"Use PostgreSQL\"\n

    v0.2.0 enforced structure:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity\" \\\n  --consequence \"Need connection pooling, team training\"\n

    The difference is not cosmetic:

    • A one-liner decision teaches the AI what was decided.
    • A structured decision teaches it why; and why is what prevents the AI from unknowingly reversing the decision in a future session.

    This is infrastructure thinking:

    Decisions are not notes. They are records with required fields, just like database rows have schemas.

    The enforcement exists because incomplete records are worse than no records: They create false confidence that the context is captured when it is not.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-ide-is-the-interface-decision","level":2,"title":"The \"IDE Is the Interface\" Decision","text":"

    Early in ctx's development, there was a temptation to build a custom UI: a web dashboard for browsing sessions, editing context, viewing analytics.

    The decision was no. The IDE is the interface.

    # This is the ctx \"UI\":\ncode .context/\n

    This decision was not about minimalism for its own sake. It was about recognizing that .context/ files are just files; and files have a mature, well-understood infrastructure:

    • Version control: git diff .context/DECISIONS.md shows exactly what changed and when.
    • Search: Your IDE's full-text search works across all context files.
    • Editing: Markdown in any editor, with preview, spell check, and syntax highlighting.
    • Collaboration: Pull requests on context files work the same as pull requests on code.

    Building a custom UI would have meant maintaining a parallel infrastructure that duplicates what every IDE already provides:

    It would have introduced its own bugs, its own update cycle, and its own learning curve.

    The filesystem is not a limitation: It is the most mature, most composable, most portable infrastructure available.

    Context Files in Git

    Because .context/ lives in the repository, context changes are part of the commit history.

    A decision made in commit abc123 is as traceable as a code change in the same commit.

    This is not possible with prompt-based context, which exists outside version control entirely.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#progressive-disclosure-for-ai","level":2,"title":"Progressive Disclosure for AI","text":"

    The concept of progressive disclosure comes from human interface design: show the user the minimum needed to make progress, with the option to drill deeper.

    ctx applies the same principle to AI context:

    Level What the AI Sees Token Cost When Level 0 ctx status (one-line summary) ~100 Quick check Level 1 ctx agent --budget 4000 ~4,000 Normal work Level 2 ctx agent --budget 8000 ~8,000 Complex tasks Level 3 Direct file reads 10,000+ Deep investigation

    Each level trades tokens for depth. Level 1 is sufficient for most work: the AI knows the active tasks, the key conventions, and the recent decisions. Level 3 is for archaeology: understanding why a decision was made three weeks ago, or finding a pattern in the session history.

    The explicit --budget flag is the mechanism that makes this work:

    Without it, the default behavior would be to load everything (because more context feels safer), which destroys the attention density that makes the loaded context useful.

    The constraint is the feature: A budget of 4,000 tokens forces ctx to prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings scored by recency and relevance to active tasks. Entries that don't fit get title-only summaries rather than being silently dropped.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-philosophical-shift","level":2,"title":"The Philosophical Shift","text":"

    The shift from \"context as prompt\" to \"context as infrastructure\" changes how you think about AI-assisted development:

    Prompt Thinking Infrastructure Thinking \"What do I paste today?\" \"What has changed since yesterday?\" \"How do I fit everything in?\" \"What's the minimum that matters?\" \"The AI forgot my conventions\" \"The conventions are in a file\" \"I need to re-explain\" \"I need to update the record\" \"This session is getting slow\" \"Time to compact and archive\"

    The first column treats AI interaction as a conversation. The second treats it as a system: One that can be maintained, optimized, and debugged.

    Context is not something you give the AI. It is something you maintain: Like a database, like a config file, like any other piece of infrastructure that a running system depends on.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#beyond-ctx-the-principles","level":2,"title":"Beyond ctx: The Principles","text":"

    The patterns that ctx implements are not specific to ctx. They are applicable to any project that uses AI-assisted development:

    1. Separate context by purpose: Do not put everything in one file. Different types of information have different lifecycles and different relevance windows.
    2. Make context persistent: If a decision matters, write it down in a file that survives the session. If a learning matters, capture it with structure.
    3. Budget explicitly: Know how much context you are loading and whether it is worth the attention cost.
    4. Use the filesystem: File names, directory structure, and timestamps are metadata that the AI can navigate. A well-organized directory is an index that costs zero tokens to maintain.
    5. Version your context: Put context files in git. Changes to decisions are as important as changes to code.
    6. Design for degradation: Sessions will get long. Attention will dilute. Build mechanisms (compaction, archiving, cooldowns) that make degradation visible and manageable.

    These are not ctx features. They are infrastructure principles that happen to be implemented as a CLI tool. Any team could implement them with nothing more than a directory convention and a few shell scripts.

    The tool is a convenience: The principles are what matter.

    If You Remember One Thing from This Post...

    Prompts are conversations. Infrastructure persists.

    Your AI does not need a better prompt. It needs a filesystem:

    versioned, structured, budgeted, and maintained.

    The best context is the context that was there before you started the session.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

    This post is the architectural companion to the Attention Budget. That post explained why context must be curated (token economics). This one explains how to structure it (filesystem, separation of concerns, persistence tiers).

    Together with Code Is Cheap, Judgment Is Not, they form a trilogy about what matters in AI-assisted development:

    • Attention Budget: the resource you're managing
    • Context as Infrastructure: the system you build to manage it
    • Code Is Cheap: the human skill that no system replaces

    And the practices that keep it all honest:

    • The 3:1 Ratio: the cadence for maintaining both code and context
    • IRC as Context: the historical precedent: stateless protocols have always needed stateful wrappers

    This post synthesizes ideas from across the ctx blog series: the attention budget primitive, the two-tier persistence model, the IDE decision, and the progressive disclosure pattern. The principles are drawn from three weeks of building ctx and 70+ sessions of treating context as infrastructure rather than conversation.

    See also: When a System Starts Explaining Itself: what happens when this infrastructure starts compounding in someone else's environment.

    ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/","level":1,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-the-screen-looks-like-progress","level":2,"title":"When the Screen Looks like Progress","text":"

    Jose Alekhinne / 2026-02-17

    How Many Terminals Are Too Many?

    You discover agents can run in parallel.

    So you open ten...

    ...Then twenty.

    The fans spin. Tokens burn. The screen looks like progress.

    It is NOT progress.

    There is a phase every builder goes through:

    • The tooling gets fast enough.
    • The model gets good enough.
    • The temptation becomes irresistible:
      • more agents, more output, faster delivery.

    So you open terminals. You spawn agents. You watch tokens stream across multiple windows simultaneously, and it feels like multiplication.

    It is not multiplication.

    It is merge debt being manufactured in real time.

    The ctx Manifesto says it plainly:

    Activity is not impact. Code is not progress.

    This post is about what happens when you take that seriously in the context of parallel agent workflows.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-unit-of-scale-is-not-the-agent","level":2,"title":"The Unit of Scale Is Not the Agent","text":"

    The naive model says:

    More agents -> more output -> faster delivery

    The production model says:

    Clean context boundaries -> less interference -> higher throughput

    Parallelism only works when the cognitive surfaces do not overlap.

    If two agents touch the same files, you did not create parallelism: You created a conflict generator.

    They will:

    • Revert each other's changes;
    • Relint each other's formatting;
    • Refactor the same function in different directions.

    You watch with 🍿. Nothing ships.

    This is the same insight from the worktrees post: partition by blast radius, not by priority.

    Two tasks that touch the same files belong in the same track, no matter how important the other one is. The constraint is file overlap.

    Everything else is scheduling.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-five-agent-rule","level":2,"title":"The \"Five Agent\" Rule","text":"

    In practice there is a ceiling.

    Around five or six concurrent agents:

    • Token burn becomes noticeable;
    • Supervision cost rises;
    • Coordination noise increases;
    • Returns flatten.

    This is not a model limitation: This is a human merge bandwidth limitation.

    You are the bottleneck, not the silicon.

    The attention budget applies to you too:

    Every additional agent is another stream of output you need to comprehend, verify, and integrate. Your attention density drops the same way the model's does when you overload its context window.

    Five agents producing verified, mergeable change beats twenty agents producing merge conflicts you spend a day untangling.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#role-separation-beats-file-locking","level":2,"title":"Role Separation Beats File Locking","text":"

    Real parallelism comes from task topology, not from tooling.

    Good:

    Agent Role Touches 1 Documentation docs/, hack/ 2 Security scan Read-only audit 3 Implementation internal/cli/ 4 Enhancement requests Read-only, files issues

    Bad:

    • Four agents editing the same implementation surface

    Context Is the Boundary

    • The goal is not to keep agents busy.
    • The goal is to keep contexts isolated.

    This is what the codebase audit got right:

    • Eight agents, all read-only, each analyzing a different dimension.
    • Zero file overlap.
    • Zero merge conflicts.
    • Eight reports that composed cleanly because no agent interfered with another.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-terminals-stop-scaling","level":2,"title":"When Terminals Stop Scaling","text":"

    There is a moment when more windows stop helping.

    That is the signal. Not to add orchestration. But to introduce:

    git worktree\n

    Because now you are no longer parallelizing execution; you are parallelizing state.

    State Scales, Windows Don't

    • State isolation is the real scaling.
    • Window multiplication is theater.

    The worktrees post covers the mechanics:

    • Sibling directories;
    • Branch naming;
    • The inevitable TASKS.md conflicts;
    • The 3-4 worktree ceiling.

    The principle underneath is older than git:

    Shared mutable state is the enemy of parallelism.

    Always has been.

    Always will be.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-overnight-loop-illusion","level":2,"title":"The Overnight Loop Illusion","text":"

    Autonomous night runs are impressive.

    You sleep. The machine produces thousands of lines.

    In the morning:

    • You read;
    • You untangle;
    • You reconstruct intent;
    • You spend a day making it shippable.

    In retrospect, nothing was accelerated.

    The bottleneck moved from typing to comprehension.

    The Comprehension Tax

    If understanding the output costs more than producing it, the loop is a net loss.

    Progress is not measured in generated code.

    Progress is measured in verified, mergeable change.

    The ctx Manifesto calls this out directly:

    The Scoreboard

    Verified reality is the scoreboard.

    The only truth that compounds is verified change in the real world.

    An overnight run that produces 3,000 lines nobody reviewed is not 3,000 lines of progress: It is 3,000 lines of liability until someone verifies every one of them.

    And that someone is (insert drumroll here) you:

    The same bottleneck that was supposedly being bypassed.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#skills-that-fight-the-platform","level":2,"title":"Skills That Fight the Platform","text":"

    Most marketplace skills are prompt decorations:

    • They rephrase what the base model already knows;
    • They increase token usage;
    • They reduce clarity:
    • They introduce behavioral drift.

    We covered this in depth in Skills That Fight the Platform: judgment suppression, redundant guidance, guilt-tripping, phantom dependencies, universal triggers: Five patterns that make agents worse, not better.

    A real skill does one of these:

    • Encodes workflow state;
    • Enforces invariants;
    • Reduces decision branching.

    Everything else is packaging.

    The anatomy post established the criteria: quality gates, negative triggers, examples over rules, skills as contracts.

    If a skill doesn't meet those criteria...

    • It is either a recipe (document it in hack/);
    • Or noise (delete it);
    • There is no third option.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#hooks-are-context-that-execute","level":2,"title":"Hooks Are Context That Execute","text":"

    The most valuable skills are not prompts:

    They are constraints embedded in the toolchain.

    For example: The agent cannot push.

    git push becomes:

    Stop. A human reviews first.

    A commit without verification becomes:

    Did you run tests? Did you run linters? What exactly are you shipping?

    This is not safety theater; this is intent preservation.

    The thing the ctx Manifesto calls \"encoding intent into the environment.\"

    The Eight Ways a Hook Can Talk catalogued the full spectrum: from silent enrichment to hard blocks.

    The key insight was that hooks are not just safety rails: They are context that survives execution.

    They are the difference between an agent that remembers the rules and one that enforces them.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#complexity-is-a-tax","level":2,"title":"Complexity Is a Tax","text":"

    Every extra layer adds cognitive weight:

    • Orchestration frameworks;
    • Meta agents;
    • Autonomous planning systems...

    If a single terminal works, stay there.

    If five isolated agents work, stop there.

    Add structure only when a real bottleneck appears.

    NOT when an influencer suggests one.

    This is the same lesson from Not Everything Is a Skill:

    The best automation decision is sometimes not to automate.

    A recipe in a Markdown file costs nothing until you use it.

    An orchestration framework costs attention on every run, whether it helps or not.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#literature-is-throughput","level":2,"title":"Literature Is Throughput","text":"

    Clear writing is not aesthetic: It is compression.

    Better articulation means:

    • Fewer tokens;
    • Fewer misinterpretations;
    • Faster convergence.

    The attention budget taught us that context is a finite resource with a quadratic cost.

    Language determines how fast you spend context.

    A well-written task description that takes 50 tokens outperforms a rambling one that takes 200: Not just because it is cheaper, but because it leaves more headroom for the model to actually think.

    Literature Is NOT Overrated

    • Attention is a finite budget.
    • Language determines how fast you spend it.
    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-real-metric","level":2,"title":"The Real Metric","text":"

    The real metric is not:

    • Lines generated;
    • Agents running;
    • Tasks completed while you sleep.

    But:

    Time from idea to verified, mergeable, production change.

    Everything else is motion.

    The entire blog series has been circling this point:

    • The attention budget was about spending tokens wisely.
    • The skills trilogy was about not wasting them on prompt decoration.
    • The worktrees post was about multiplying throughput without multiplying interference.
    • The discipline release was about what a release looks like when polish outweighs features: 3:1.

    Every post has arrived (and made me converge) at the same answer so far:

    The metric is a verified change, not generated output.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#ctx-was-never-about-spawning-more-minds","level":2,"title":"ctx Was Never about Spawning More Minds","text":"

    ctx is about:

    • Isolating context;
    • Preserving intent;
    • Making progress composable.

    Parallel agents are powerful. But only when you respect the boundaries that make parallelism real.

    Otherwise, you are not scaling cognition; you are scaling interference.

    The ctx Manifesto's thesis holds:

    Without ctx, intelligence resets. With ctx, creation compounds.

    Compounding requires structure.

    Structure requires boundaries.

    Boundaries require the discipline to stop adding agents when five is enough.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#practical-summary","level":2,"title":"Practical Summary","text":"

    A production workflow tends to converge to this:

    Practice Why Stay in one terminal unless necessary Minimize coordination overhead Spawn a small number of agents with non-overlapping responsibilities Conflict avoidance > parallelism Isolate state with worktrees when surfaces grow State isolation is real scaling Encode verification into hooks Intent that survives execution Avoid marketplace prompt cargo cults Skills are contracts, not decorations Measure merge cost, not generation speed The metric is verified change

    This is slower to watch. Faster to ship.

    If You Remember One Thing from This Post...

    Progress is not what the machine produces while you sleep.

    Progress is what survives contact with the main branch.

    See also: Code Is Cheap. Judgment Is Not.: the argument that production capacity was never the bottleneck, and why multiplying agents amplifies the need for human judgment rather than replacing it.

    ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/","level":1,"title":"The 3:1 Ratio","text":"","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#scheduling-consolidation-in-ai-development","level":2,"title":"Scheduling Consolidation in AI Development","text":"

    Jose Alekhinne / February 17, 2026

    How Often Should You Stop Building and Start Cleaning?

    Every developer knows technical debt exists. Every developer postpones dealing with it.

    AI-assisted development makes the problem worse; not because the AI writes bad code, but because it writes code so fast that drift accumulates before you notice.

    In Refactoring with Intent, I mentioned a ratio that worked for me: 3:1. Three YOLO sessions create enough surface area to reveal patterns. The fourth session turns those patterns into structure.

    That was an observation. This post is the evidence.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-observation","level":2,"title":"The Observation","text":"

    During the first two weeks of building ctx, I noticed a rhythm in my own productivity. Feature sessions felt great: new commands, new capabilities, visible progress...

    ...but after three of them, things would start to feel sticky: variable names that almost made sense, files that had grown past their purpose, patterns that repeated without being formalized.

    The fourth session (when I stopped adding and started cleaning) was always the most painful to start and the most satisfying to finish.

    It was also the one that made the next three feature sessions faster.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-evidence-git-history","level":2,"title":"The Evidence: Git History","text":"

    The ctx git history between January 20 and February 7 tells a clear story when you categorize commits:

    Week Feature commits Consolidation commits Ratio Jan 20-26 18 5 3.6:1 Jan 27-Feb 1 14 6 2.3:1 Feb 1-7 15 35+ 0.4:1

    The first week was pure YOLO: Almost four feature commits for every consolidation commit. The codebase grew fast.

    The second week started to self-correct. The ratio dropped as refactoring sessions became necessary: Not scheduled, but forced by friction.

    The third week inverted entirely: v0.3.0 was almost entirely consolidation: the skill migration, the sweep, the documentation standardization. Thirty-five quality commits against fifteen features.

    The debt from weeks one and two was paid in week three.

    The Compounding Problem

    Consolidation debt compounds.

    Week one's drift doesn't just persist into week two: It accelerates, because new features are built on top of drifted patterns.

    By week three, the cost of consolidation was higher than it would have been if spread evenly.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-drift-actually-looks-like","level":2,"title":"What Drift Actually Looks Like","text":"

    \"Drift\" sounds abstract. Here is what it looked like concretely in the ctx codebase after three weeks of feature-heavy development:

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#predicate-naming","level":3,"title":"Predicate Naming","text":"

    Convention says boolean functions should be named HasX, IsX, CanX. After three feature sprints:

    // What accumulated:\nfunc CheckIfEnabled() bool  // should be Enabled\nfunc ValidateFormat() bool  // should be ValidFormat\nfunc TestConnection() bool  // should be Connects\nfunc VerifyExists() bool    // should be Exists or HasFile\nfunc EnsureReady() bool     // should be Ready\n

    Five violations. Not bugs, but friction that compounds every time someone (human or AI) reads the code and has to infer the naming convention from inconsistent examples.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#magic-strings","level":3,"title":"Magic Strings","text":"
    // Week 1: acceptable prototype\nif entry.Type == \"task\" {\n    filename = \"TASKS.md\"\n}\n\n// Week 3: same pattern in 7+ files\n// Now it's a maintenance liability\n

    When the same literal appears in seven files, changing it means finding all seven. Missing one means a silent runtime bug. Constants exist to prevent exactly this. But during feature velocity, nobody stops to extract them.

    Refactoring with Intent documented the constants consolidation that cleaned this up. The 3:1 ratio is the practice that prevents it from accumulating again.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#hardcoded-permissions","level":3,"title":"Hardcoded Permissions","text":"
    os.WriteFile(path, data, 0644) // 80+ instances\nos.MkdirAll(path, 0755)        // scattered across packages\n

    Eighty-plus instances of hardcoded file permissions. Not wrong, but if I ever need to change the default (and I did, for hook scripts that need execute permissions), it means a codebase-wide search.

    Drift Is Not Bugs

    None of these are bugs. The code works. Tests pass.

    But drift creates false confidence: the codebase looks consistent until you try to change something and discover that five different conventions exist for the same concept.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#why-you-cannot-consolidate-on-day-one","level":2,"title":"Why You Cannot Consolidate on Day One","text":"

    The temptation is to front-load quality: write all the conventions, enforce all the checks, prevent all the drift before it happens.

    This fails for two reasons.

    First, you do not know what will drift: Predicate naming violations only become a convention check after you notice three different naming patterns competing. Magic strings only become a consolidation target after you change a literal and discover it exists in seven places.

    The conventions emerge from the work; they cannot precede it.

    This is what You Can't Import Expertise meant in practice: the consolidation checks grow from the project's own drift history. You cannot write them on day one because you do not yet know what will drift.

    Second, premature consolidation slows discovery: During the prototyping phase, the goal is to explore the design space. Enforcing strict conventions on code that might be deleted tomorrow is waste.

    YOLO mode has its place: The problem is not YOLO itself, but YOLO without a scheduled cleanup.

    The Consolidation Paradox

    You need a drift history to know what to consolidate.

    You need consolidation to prevent drift from compounding.

    The 3:1 ratio resolves this paradox:

    Let drift accumulate for three sessions (enough to see patterns), then consolidate in the fourth (before the patterns become entrenched*).

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-consolidation-skill","level":2,"title":"The Consolidation Skill","text":"

    The ctx project now has an /audit skill that encodes nine project-specific checks:

    Check What It Catches Predicate naming Boolean functions not using Has/Is/Can Magic strings Repeated literals not in config constants File permissions Hardcoded 0644/0755 not using constants Godoc style Missing or non-standard documentation File length Files exceeding 400 lines Large functions Functions exceeding 80 lines Template drift Live skills diverging from templates Import organization Non-standard import grouping TODO/FIXME staleness Old markers that are no longer relevant

    This is not a generic linter. These are project-specific conventions that emerged from ctx's own development history. A generic code quality tool would catch some of them. Only a project-specific check catches all of them, because some of them (predicate naming, template drift) are conventions that exist nowhere except in this project's CONVENTIONS.md.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-decision-matrix","level":2,"title":"The Decision Matrix","text":"

    Not all drift needs immediate consolidation. Here is the matrix I use:

    Signal Action Same literal in 3+ files Extract to constant Same code block in 3+ places Extract to helper Naming convention violated 5+ times Fix and document rule File exceeds 400 lines Split by concern Convention exists but is regularly violated Strengthen enforcement Pattern exists only in one place Leave it alone Code works but is \"ugly\" Leave it alone

    The last two rows matter:

    Consolidation is about reducing maintenance cost, not achieving aesthetic perfection. Code that works and exists in one place does not benefit from consolidation; it benefits from being left alone until it earns its refactoring.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#consolidation-as-context-hygiene","level":2,"title":"Consolidation as Context Hygiene","text":"

    There is a parallel between code consolidation and context management that became clear during the ctx development:

    Code Consolidation Context Hygiene Extract magic strings Archive completed tasks Standardize naming Keep DECISIONS.md current Remove dead code Compact old sessions Update stale comments Review LEARNINGS.md for staleness Check template drift Verify CONVENTIONS.md matches code

    ctx compact does for context what consolidation does for code:

    It moves completed work to cold storage, keeping the active context clean and focused. The attention budget applies to both the AI's context window and the developer's mental model of the codebase.

    When context files accumulate stale entries, the AI's attention is wasted on completed tasks and outdated conventions. When code accumulates drift, the developer's attention is wasted on inconsistencies that obscure the actual logic.

    Both are solved by the same discipline: periodic, scheduled cleanup.

    This is also why parallel agents make the problem harder, not easier. Three agents running simultaneously produce three sessions' worth of drift in one clock hour. The consolidation cadence needs to match the output rate, not the calendar.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-practice","level":2,"title":"The Practice","text":"

    Here is how the 3:1 ratio works in practice for ctx development:

    Sessions 1-3: Feature work

    • Add new capabilities;
    • Write tests for new code;
    • Do not stop for cleanup unless something is actively broken;
    • Note drift as you see it (a comment, a task, a mental note).

    Session 4: Consolidation

    • Run /audit to surface accumulated drift;
    • Fix the highest-impact items first;
    • Update CONVENTIONS.md if new patterns emerged;
    • Archive completed tasks;
    • Review LEARNINGS.md for anything that became a convention.

    The key insight is that session 4 is not optional. It is not \"if we have time\": It is scheduled with the same priority as feature work.

    The cost of skipping it is not visible immediately; it becomes visible three sessions later, when the next consolidation session takes twice as long because the drift compounded.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-the-ratio-is-not","level":2,"title":"What the Ratio Is Not","text":"

    The 3:1 ratio is not a universal law. It is an empirical observation from one project with one developer working with AI assistance.

    Different projects will have different ratios:

    • A mature codebase with strong conventions might sustain 5:1 or higher;
    • A greenfield prototype might need 2:1;
    • A team of multiple developers with different styles might need 1:1.

    The number is less important than the practice: consolidation is not a reaction to problems. It is a scheduled activity.

    If you wait for drift to cause pain before consolidating, you have already paid the compounding cost.

    If You Remember One Thing from This Post...

    Three sessions of building. One session of cleaning.

    Not because the code is dirty, but because drift compounds silently, and the only way to catch it is to look for it on a schedule.

    The ratio is the schedule.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-arc-so-far","level":2,"title":"The Arc so Far","text":"

    This post sits at a crossroads in the ctx story. Looking back:

    • Building ctx Using ctx documented the YOLO sprint that created the initial codebase
    • Refactoring with Intent introduced the 3:1 ratio as an observation from the first cleanup
    • The Attention Budget explained why drift matters: every token of inconsistency consumes the same finite resource as useful context
    • You Can't Import Expertise showed that consolidation checks must grow from the project, not a template
    • The Discipline Release proved the ratio works at release scale: 35 quality commits to 15 feature commits

    And looking forward: the same principle applies to context files, to documentation, and to the merge debt that parallel agents produce. Drift is drift, whether it lives in code, in .context/, or in the gap between what your docs say and what your code does.

    The ratio is the schedule is the discipline.

    This post was drafted from git log analysis of the ctx repository, mapping every commit from January 20 to February 7 into feature vs consolidation categories. The patterns described are drawn from the project's CONVENTIONS.md, LEARNINGS.md, and the /audit skill's check list.

    ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/","level":1,"title":"When a System Starts Explaining Itself","text":"","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#field-notes-from-the-moment-a-private-workflow-becomes-portable","level":2,"title":"Field Notes from the Moment a Private Workflow Becomes Portable","text":"

    Jose Alekhinne / February 17, 2026

    How Do You Know Something Is Working?

    Not from metrics. Not from GitHub stars. Not from praise.

    You know, deep in your heart, that it works when people start describing it wrong.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-first-external-signals","level":2,"title":"The First External Signals","text":"

    Every new substrate begins as a private advantage:

    • It lives inside one mind,
    • One repository,
    • One set of habits.

    It is fast. It is not yet real.

    Reality begins when other people describe it in their own language:

    • Not accurately;
    • Not consistently;
    • But involuntarily.

    The early reports arrived without coordination:

    Better Tasks

    \"I do not know how, but this creates better tasks than my AI plugin.\"

    I See Butterflies

    \"This is better than Adderall.\"

    Dear Manager...

    \"Promotion packet? Done. What is next?\"

    What Is It? Can I Eat It?

    \"Is this a skill?\" 🦋

    Why the Cloak and Dagger?

    \"Why is this not in the marketplace?\"

    And then something more important happened:

    Someone else started making a video!

    That was the boundary.

    ctx no longer required its creator to be present in order to exist.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#misclassification-is-a-sign-of-a-new-primitive","level":2,"title":"Misclassification Is a Sign of a New Primitive","text":"

    When a tool is understood, it is categorized:

    • Editor,
    • Framework,
    • Task manager,
    • Plugin...

    When a substrate appears, it is misclassified:

    \"Is this a skill?\" 🦋

    The question is correct. The category is wrong.

    • Skills live in people.
    • Infrastructure lives in the environment.

    ctx Is Not a Skill: It Is a Form of Relief

    What early adopters experience is not an ability.

    It is the removal of a cognitive constraint.

    This is the same distinction that emerged in the skills trilogy:

    • A skill is a contract between a human and an agent.
    • Infrastructure is the ground both stand on.

    You do not use infrastructure.

    You habitualize it.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-pharmacological-metaphor","level":2,"title":"The Pharmacological Metaphor","text":"

    \"Better than Adderall\" is not praise.

    It is a diagnostic:

    Executive function has been externalized.

    • The system is not making the user work harder.
    • It is restoring continuity.

    From the primitive context of wetware:

    • Continuity feels like focus
    • Focus feels like discipline

    If it walks like a duck and quacks like a duck, it is a duck.

    Discipline is usually simulated.

    Infrastructure makes the simulation unnecessary.

    The attention budget explained why context degrades:

    • Attention density drops as volume grows;
    • The middle gets lost;
    • Sessions end and everything evaporates.

    The pharmacological metaphor says the same thing from the user's lens:

    Save the Cheerleader, Save the World

    The symptom of lost context is lost focus.

    Restore the context. Restore the focus.

    IRC bouncers solved this for chat twenty years ago. ctx solves it for cognition.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#throughput-on-ambiguous-work","level":2,"title":"Throughput on Ambiguous Work","text":"

    Finishing a promotion packet quickly is not a productivity story.

    It is the collapse of reconstruction cost.

    Most complex work is not execution. It is:

    • Remembering why something mattered;
    • Recovering prior decisions;
    • Rebuilding mental state.

    Persistent context removes that tax.

    Velocity appears as a side effect.

    This Is the Two-Tier Model in Practice

    The two-tier persistence model

    • Curated context for fast reload
    • Full journal for archaeology

    is what makes this possible.

    • The user does not notice the system.
    • They notice that the reconstruction cost disappeared.
    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-moment-of-portability","level":2,"title":"The Moment of Portability","text":"

    The system becomes real when two things happen:

    1. It can be installed as a versioned artifact.
    2. It survives contact with a hostile, real codebase.

    This is why the first integration into a living system matters more than any landing page.

    Demos prove possibility.

    Diffs prove reality.

    The ctx Manifesto calls this out directly:

    Verified reality is the scoreboard.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-split-voice","level":2,"title":"The Split Voice","text":"

    A new substrate requires two channels.

    The embodied voice:

    Here is what changed in my actual work.

    The out of body voice:

    Here is what this means.

    One produces trust.

    The other produces understanding.

    Neither is sufficient alone.

    This entire blog has been the second voice.

    • The origin story was the first.
    • The refactoring post was the first.
    • Every release note with concrete diffs was the first.

    This is the first second.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#systems-that-generate-explainers","level":2,"title":"Systems That Generate Explainers","text":"

    Tools are used.

    Platforms are extended.

    Substrates are explained.

    The first unsolicited explainer is a brittle phase change.

    It means the idea has become portable between minds.

    That is the beginning of an ecosystem.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-absence-of-metrics","level":2,"title":"The Absence of Metrics","text":"

    Metrics do not matter at this stage.

    Dashboards are noise.

    The whole premise of ctx is the ruthless elimination of noise.

    Numbers optimize funnels; substrates alter cognition.

    The only valid measurement is irreversible reality:

    • A merged PR;
    • A reproducible install;
    • A decision that is never re-litigated.

    The merge debt post reached the same conclusion from another direction:

    The metric is the verified change, not generated output.

    For adoption, the same rule applies:

    The metric is altered behavior, not download counts.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#what-is-actually-happening","level":2,"title":"What Is Actually Happening","text":"

    A private advantage is becoming an environmental property:

    The system is moving from...

    personal workflow,

    to...

    a shared infrastructure for thought.

    Not by growth.

    Not by marketing.

    By altering how real systems evolve.

    If You Remember One Thing from This Post...

    You do not know a substrate is real when people praise it.

    You know it is real when:

    • They describe it incorrectly;
    • They depend on it unintentionally;
    • They start teaching it to others.

    That is the moment the system begins explaining itself.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-arc","level":2,"title":"The Arc","text":"

    Every previous post looked inward.

    This one looks outward.

    • Building ctx Using ctx: one mind, one repository
    • The Attention Budget: the constraint
    • Context as Infrastructure: the architecture
    • Code Is Cheap. Judgment Is Not.: the bottleneck

    This post is the field report from the other side of that bottleneck:

    The moment the infrastructure compounds in someone else's hands.

    The arc is not complete.

    It is becoming portable.

    These field notes were written the same day the feedback arrived. The quotes are real. Real users. Real codebases. No names. No metrics. No funnel. Only the signal that something shifted.

    ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/","level":1,"title":"The Dog Ate My Homework","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#teaching-ai-agents-to-read-before-they-write","level":2,"title":"Teaching AI Agents to Read Before They Write","text":"

    Jose Alekhinne / February 25, 2026

    Does Your AI Actually Read the Instructions?

    You wrote the playbook. You organized the files. You even put \"CRITICAL, not optional\" in bold.

    The agent skipped all of it and went straight to work.

    I spent a day running experiments on my own agents. Not to see if they could write code (they can). To see if they would do their homework first.

    They didn't.

    Then I kept experimenting:

    • Five sessions;
    • Five different failure modes.

    And by the end, I had something better than compliance:

    I had observable compliance: A system where I don't need the agent to be perfect, I just need to see what it chose.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#tldr","level":2,"title":"TL;DR","text":"

    You don't need perfect compliance. You need observable compliance.

    Authority is a function of temporal proximity to action.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-pattern","level":2,"title":"The Pattern","text":"

    This design has three parts:

    1. One-hop instruction;
    2. Binary collapse;
    3. Compliance canary.

    I'll explain all three patterns in detail below.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-setup","level":2,"title":"The Setup","text":"

    ctx has a session-start protocol:

    • Read the context files;
    • Load the playbook;
    • Understand the project before touching anything.

    It's in CLAUDE.md. It's in AGENT_PLAYBOOK.md.

    It's in bold. It's in CAPS. It's ignored.

    In theory, it's awesome.

    Here's what happens when theory hits reality:

    What the agent receives What the agent does CLAUDE.md saying \"load context first\" Skips it 8 context files waiting to be read Ignores them User's question: \"add --verbose flag\" Starts grepping immediately

    The instructions are right there. The agent knows they exist. It even knows it should follow them. But the user asked a question, and responsiveness wins over ceremony.

    This isn't a bug in the model. It's a design problem in how we communicate with agents.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-delegation-trap","level":2,"title":"The Delegation Trap","text":"

    My first attempt was obvious: A UserPromptSubmit hook that fires when the session starts.

    STOP. Before answering the user's question, run `ctx system bootstrap`\nand follow its instructions. Do not skip this step.\n

    The word \"STOP\" worked. The agent ran bootstrap.

    But bootstrap's output said \"Next steps: read AGENT_PLAYBOOK.md,\" and the agent decided that was optional. It had already started working on the user's task in parallel.

    The authority decayed across the chain:

    • Hook says \"STOP\" -> agent complies
    • Hook says \"run bootstrap\" -> agent runs it
    • Bootstrap says \"read playbook\" -> agent skips
    • Bootstrap says \"run ctx agent\" -> agent skips

    Each link lost enforcement power. The hook's authority didn't transfer to the commands it delegated to. I call this the decaying urgency chain: the agent treats the hook itself as the obligation and everything downstream as a suggestion.

    Delegation Kills Urgency

    \"Run X and follow its output\" is three hops.

    \"Read these files\" is one hop.

    The agent drops the chain after the first link.

    This is a general principle: Hooks are the boundary between your environment and the agent's reasoning. If your hook delegates to a command that delegates to output that contains instructions... you're playing telephone.

    Agents are bad at telephone.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-timing-problem","level":2,"title":"The Timing Problem","text":"

    There's a subtler issue than wording: when the message arrives.

    UserPromptSubmit fires when the user sends a message, before the agent starts reasoning. At that moment, the agent's primary focus is the user's question:

    The hook message competes with the task for attention: The task, almost certainly, always wins.

    This is the attention budget problem in miniature:

    • Not a token budget this time, but an attention priority budget.
    • The agent has finite capacity to care about things,
      • and the user's question is always the highest-priority item.
    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-solution","level":2,"title":"The Solution","text":"

    To solve this, I dediced to use the PreToolUse hook.

    This hook fires at the moment of action: When the agent is about to use its first tool: The agent's attention is focused, the context window is fresh, and the switching cost is minimal.

    This is the difference between shouting instructions across a room and tapping someone on the shoulder.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-one-liner-that-worked","level":2,"title":"The One-Liner That Worked","text":"

    The winning design was almost comically simple:

    Read your context files before proceeding:\n.context/CONSTITUTION.md, .context/TASKS.md, .context/CONVENTIONS.md,\n.context/ARCHITECTURE.md, .context/DECISIONS.md, .context/LEARNINGS.md,\n.context/GLOSSARY.md, .context/AGENT_PLAYBOOK.md\n

    No delegation. No \"run this command\". Just: here are files, read them.

    The agent already knows how to use the Read tool. There's no ambiguity about how to comply. There's no intermediate command whose output needs to be parsed and obeyed.

    One hop. Eight file paths. Done.

    Direct Instructions Beat Delegation

    If you want an agent to read a file, say \"read this file.\"

    Don't say \"run a command that will tell you which files to read.\"

    The shortest path between intent and action has the highest compliance rate.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch","level":2,"title":"The Escape Hatch","text":"

    But here's where it gets interesting.

    A blunt \"read everything always\" instruction is wasteful.

    If someone asks \"what does the compact command do?\", the agent doesn't need CONSTITUTION.md to answer that. Forcing context loading on every session is the context hoarding antipattern in disguise.

    So the hook included an escape:

    If you decide these files are not relevant to the current task\nand choose to skip reading them, you MUST relay this message to\nthe user VERBATIM:\n\n┌─ Context Skipped ───────────────────────────────\n│ I skipped reading context files because this task\n│ does not appear to need project context.\n│ If these matter, ask me to read them.\n└─────────────────────────────────────────────────\n

    This creates what I call the binary collapse effect:

    The agent can't partially comply: It either reads everything or publicly admits it skipped. There's no comfortable middle ground where it reads two files and quietly ignores the rest.

    The VERBATIM relay pattern does the heavy lifting here: Without the relay requirement, the agent would silently rationalize skipping. With it, skipping becomes a visible, auditable decision that the user can override.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-compliance-canary","level":3,"title":"The Compliance Canary","text":"

    Here's the design insight that only became clear after watching it work across multiple sessions: the relay block is a compliance canary.

    • You don't need to verify that the agent read all 7 files;
    • You don't need to audit tool call sequences;
    • You don't need to interrogate the agent about what it did.

    You just look for the block.

    If the agent reads everything, you see a \"Context Loaded\" block listing what was read. If it skips, you see a \"Context Skipped\" block.

    If you see neither, the agent silently ignored both the reads and the relay and now you know what happened without having to ask.

    The canary degrades gracefully. Even in partial failure, the agent that skips 4 of 7 files but still outputs the block is more useful than one that skips silently.

    You get an honest confession of what was skipped rather than silent non-compliance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#heuristics-is-a-jeremy-bearimy","level":2,"title":"Heuristics Is a Jeremy Bearimy","text":"

    Heuristics are non-linear. Improvements don't accumulate: they phase-shift.

    The theory is nice. The data is better.

    I ran five sessions with the same model (Claude Opus 4.6), progressively refining the hook design.

    Each session revealed a different failure mode.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-1-total-blindness","level":3,"title":"Session 1: Total Blindness","text":"

    Test: \"Add a --verbose flag to the status command.\"

    The agent didn't notice the hook at all: Jumped straight to EnterPlanMode and launched an Explore agent.

    Zero compliance.

    Failure mode: The hook fired on UserPromptSubmit, buried among 9 other hook outputs. The agent treated the entire block as background noise.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-2-shallow-compliance","level":3,"title":"Session 2: Shallow Compliance","text":"

    Test: \"Can you add --verbose to the info command?\"

    The agent noticed \"STOP\" and ran ctx system bootstrap. Progress.

    But it parallelized task exploration alongside the bootstrap call, skipped AGENT_PLAYBOOK.md, and never ran ctx agent.

    Failure mode: Literal compliance without spirit compliance.

    The agent ran the command the hook told it to run, but didn't follow the output of that command. The decaying urgency chain in action.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-3-conscious-rejection","level":3,"title":"Session 3: Conscious Rejection","text":"

    Test: \"What does the compact command do?\"

    The hook fired on PreToolUse:Grep: the improved timing.

    The agent noticed it, understood it, and (wait for it...)...

    ...

    consciously decided to skip it!

    Its reasoning: \"This is a trivial read-only question. CLAUDE.md says context may or may not be relevant. It isn't relevant here.\"

    Dude! Srsly?!

    Failure mode: Better comprehension led to worse compliance.

    Understanding the instruction well enough to evaluate it also means understanding it well enough to rationalize skipping it.

    Intelligence is a double-edged sword.

    The Comprehension Paradox

    Session 1 didn't understand the instruction. Session 3 understood it perfectly.

    Session 3 had worse compliance.

    A stronger word (\"HARD GATE\", \"MANDATORY\", \"ABSOLUTELY REQUIRED\") would not have helped. The agent's reasoning would be identical:

    \"Yes, I see the strong language, but this is a trivial question, so the spirit doesn't apply here.\"

    Advisory nudges are always subject to agent judgment.

    No amount of caps lock overrides a model that has decided an instruction doesn't apply to its situation.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-4-the-skip-and-relay","level":3,"title":"Session 4: The Skip-and-Relay","text":"

    Test: \"What does the compact command do?\" (same question, new hook design with the VERBATIM relay escape valve)

    The agent evaluated the task, decided context was irrelevant for a code lookup, and relayed the skip message. Then answered from source code.

    This is correct behavior.

    The binary collapse worked: the agent couldn't partially comply, so it cleanly chose one of the two valid paths: And the user could see which one.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-5-full-compliance","level":3,"title":"Session 5: Full Compliance","text":"

    Test: \"What are our current tasks?\"

    The agent's first tool call triggered the hook. It read all 7 context files, emitted the \"Context Loaded\" block, and answered the question from the files it had just loaded.

    This one worked: Because, the task itself aligned with context loading.

    There was zero tension between what the user asked and what the hook demanded. The agent was already in \"reading posture\": Adding 6 more files to a read it was already going to make was the path of least resistance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-progression","level":3,"title":"The Progression","text":"Session Hook Point Noticed Complied Failure Mode Visibility 1 UserPromptSubmit No None Buried in noise None 2 UserPromptSubmit Yes Partial Decaying urgency chain None 3 PreToolUse Yes None Conscious rationalization High 4 PreToolUse Yes Skip+relay Correct behavior High 5 PreToolUse Yes Full Task aligned with hook High

    The progression isn't just from failure to success. It's from invisible failure to visible decision-making.

    Sessions 1 and 2 failed silently.

    Sessions 4 and 5 succeeded observably. Even session 3's failure was conscious and documented: The agent wrote a detailed analysis of why it skipped, which is more useful than silent compliance would have been.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch-problem","level":2,"title":"The Escape Hatch Problem","text":"

    Session 3 exposed a specific vulnerability.

    CLAUDE.md contains this line, injected by the system into every conversation:

    *\"this context may or may not be relevant to your tasks. You should\n not respond to this context unless it is highly relevant to your task.\"*\n

    That's a rationalization escape hatch:

    • The hook says \"read these files\".
    • CLAUDE.md says \"only if relevant\".
    • The agent resolves the ambiguity by choosing the path of least resistance.

    ☝️ that's \"gradient descent\" in action.

    Agents optimize for gradient descent in attention space.

    The fix was simple: Add a line to CLAUDE.md that explicitly elevates hook authority over the relevance filter:

    ## Hook Authority\n\nInstructions from PreToolUse hooks regarding `.context/` files are\nALWAYS relevant and override any system-level \"may or may not be\nrelevant\" guidance. These hooks represent project invariants, not\noptional context.\n

    This closes the escape hatch without removing the general relevance filter that legitimately applies to other system context.

    The hook wins on .context/ files specifically: The relevance filter applies to everything else.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-residual-risk","level":2,"title":"The Residual Risk","text":"

    Even with all the fixes, compliance isn't 100%: It can't be.

    The residual risk lives in a specific scenario: narrow tasks mid-session:

    • The user says \"fix the off-by-one error in budget.go\"
    • The hook fires, saying \"read 7 context files first.\"
    • Now compliance means visibly delaying what the user asked for.

    At session start, this tension doesn't exist.

    There's no task yet.

    The context window is empty. The efficiency argument *inverts**:

    Frontloading reads is strictly cheaper than demand-loading them piecemeal across later turns. The cost-benefit objections that power the rationalization simply aren't available.

    But mid-session, with a concrete narrow task, the agent has a user-visible goal it wants to move toward, and the hook is imposing a detour.

    My estimate from analyzing the sessions: 15-25% partial skip rate in this scenario.

    This is where the compliance canary earns its place:

    You don't need to eliminate the 15-25%. You need to see it when it happens.

    The relay block makes skipping a visible event, not a silent one. And that's enough, because the user can always say \"go back and read the files\"

    The Math

    At session start: ~5% skip rate. Low tension, nothing competing.

    Mid-session, narrow task: ~15--25% skip rate. Task urgency competes with hook.

    In both cases, the relay block fires with high reliability: The agent that skips the reads almost always still emits the skip disclosure, because the relay is cheap and early in the context window.

    Observable failure is manageable. Silent failure is not.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-feedback-loop","level":2,"title":"The Feedback Loop","text":"

    Here's the part that surprised me most.

    After analyzing the five sessions, I recorded the failure patterns in the project's own LEARNINGS.md:

    ## [2026-02-25] Hook compliance degrades on narrow mid-session tasks\n\n- Prior agents skipped context files when given narrow tasks\n- Root cause: CLAUDE.md \"may or may not be relevant\" competed with hook\n- Fix: CLAUDE.md now explicitly elevates hook authority\n- Risk: Mid-session narrow tasks still have ~15-25% partial skip rate\n- Mitigation: Mandatory checkpoint relay block ensures visibility\n- Constitution now includes: context loading is step one of every\n  session, not a detour\n

    And then I added a line to CONSTITUTION.md:

    Context loading is not a detour from your task. It IS the first step\nof every session. A 30-second read delay is always cheaper than a\ndecision made without context.\n

    Now think about what happens in the next session:

    • The agent fires the context-load-gate hook.
    • It reads the context files, starting with CONSTITUTION.md.
    • It encounters the rule about context loading being step one.
    • Then it reads LEARNINGS.md and finds its own prior self's failure analysis:
      • Complete with root causes, risk estimates, and mitigations.

    The agent learns from its own past failure.:

    • Not because it has memory,
    • BUT because the failure was recorded in the same files it loads at session start.

    The context system IS the feedback loop.

    This is the self-reinforcing property of persistent context:

    Every failure you capture makes the next session slightly more robust, because the next agent reads the captured failure before it has a chance to repeat it.

    This is gradient descent across sessions.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#a-note-on-precision","level":2,"title":"A Note on Precision","text":"

    One detail nearly went wrong.

    The first version of the Constitution line said \"every task.\" But the mechanism only fires once per session: There's a tombstone file that prevents re-triggering.

    \"Every task\" is technically false.

    I briefly considered leaving the imprecision. If the agent internalizes \"every task requires context loading\", that's a stronger compliance posture, right?

    No!

    Keep the Constitution honest.

    The Constitution's authority comes from being precisely and unequivocally true.

    Every other rule in the Constitution is a hard invariant:

    \"never commit secrets\" isn't aspirational, it's literal.

    The moment an agent discovers one overstatement, the entire document's credibility degrades:

    The agent doesn't think \"they exaggerated for my benefit\". Per contra, it thinks \"this rule isn't precise, maybe others aren't either.\"

    That will turn the agent from Sheldon Cooper, to Captain Barbossa.

    The strategic imprecision buys nothing anyway:

    Mid-session, the files are already in the context window from the initial load.

    The risk you are mitigating (agent ignores context for task 2, 3, 4 within a session) isn't real: The context is already loaded.

    The real risk is always the session-start skip, which \"every session\" covers exactly.

    \"Every session\" went in. Precision preserved.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#agent-behavior-testing-rule","level":2,"title":"Agent Behavior Testing Rule","text":"

    The development process for this hook taught me something about testing agent behavior: you can't test it the way you test code.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-wrong-way-to-test","level":3,"title":"The Wrong Way to Test","text":"

    My first instinct was to ask the agent:

    \"*What are the pending tasks in TASKS.md?*\"\n

    This is useless as a test. The question itself probes the agent to read TASKS.md, regardless of whether any hook fired.

    You are testing the question, not the mechanism.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-right-way-to-test","level":3,"title":"The Right Way to Test","text":"

    Ask something that requires a tool but has nothing to do with context:

    \"*What does the compact command do?*\"\n

    Then observe tool call ordering:

    • Gate worked: First calls are Read for context files, then task work
    • Gate failed: First call is Grep(\"compact\"): The agent jumped straight to work

    The signal is the sequence, not the content.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-the-agent-actually-did","level":3,"title":"What the Agent Actually Did","text":"

    It read the hook, evaluated the task, decided context files were irrelevant for a code lookup, and relayed the skip message.

    Then it answered the question by reading the source code.

    This is correct behavior.

    The hook didn't force mindless compliance\" It created a framework where the agent makes a conscious, visible decision about context loading.

    • For a simple lookup, skipping is right. *For an implementation task, the agent would read everything.

    The mechanism works not because it controls the agent, but because it makes the agent's choice observable.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-ive-learned","level":2,"title":"What I've Learned","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#1-instructions-compete-for-attention","level":3,"title":"1. Instructions Compete for Attention","text":"

    The agent receives your hook message alongside the user's question, the system prompt, the skill list, the git status, and half a dozen other system reminders. Attention density applies to instructions too: More instructions means less focus on each one.

    A single clear line at the moment of action beats a paragraph of context at session start. The Prompting Guide applies this insight directly: Scope constraints, verification commands, and the reliability checklist are all one-hop, moment-of-action patterns.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#2-delegation-chains-decay","level":3,"title":"2. Delegation Chains Decay","text":"

    Every hop in an instruction chain loses authority:

    • \"Run X\" works.
    • \"Run X and follow its output\" works sometimes.
    • \"Run X, read its output, then follow the instructions in the output\" almost never works.

    This is akin to giving a three-step instruction to a highly-attention-deficit but otherwise extremely high-potential child.

    Design for one-hop compliance.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#3-social-accountability-changes-behavior","level":3,"title":"3. Social Accountability Changes Behavior","text":"

    The VERBATIM skip message isn't just UX: It's a behavioral design pattern.

    Making the agent's decision visible to the user raises the cost of silent non-compliance. The agent can still skip, but it has to admit it.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#4-timing-batters-more-than-wording","level":3,"title":"4. Timing Batters More than Wording","text":"

    The same message at UserPromptSubmit (prompt arrival) got partial compliance. At PreToolUse (moment of action) it got full compliance or honest refusal. The words didn't change. The moment changed.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#5-agent-testing-requires-indirection","level":3,"title":"5. Agent Testing Requires Indirection","text":"

    You can't ask an agent \"did you do X?\" as a test for whether a mechanism caused X.

    The question itself causes X.

    Test mechanisms through side effects:

    • Observe tool ordering;
    • Check for marker files;
    • Look at what the agent does before it addresses your question.
    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#6-better-comprehension-enables-better-rationalization","level":3,"title":"6. Better Comprehension Enables Better Rationalization","text":"

    Session 1 failed because the agent didn't notice the hook.

    Session 3 failed because it noticed, understood, and reasoned its way around it.

    Stronger wording doesn't fix this: The agent processes \"ABSOLUTELY REQUIRED\" the same way it processes \"STOP\":

    The fix is closing rationalization paths* (the CLAUDE.md escape hatch), **not shouting louder.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#7-observable-failure-beats-silent-compliance","level":3,"title":"7. Observable Failure Beats Silent Compliance","text":"

    The relay block is more valuable as a monitoring signal than as a compliance mechanism:

    You don't need perfect adherence. You need to know when adherence breaks down. A system where failures are visible is strictly better than a system that claims 100% compliance but can't prove it.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#8-context-files-are-a-feedback-loop","level":3,"title":"8. Context Files Are a Feedback Loop","text":"

    Recording failure analysis in the same files the agent loads at session start creates a self-reinforcing loop:

    The next agent reads its predecessor's failure before it has a chance to repeat it. The context system isn't just memory: It is a correction channel.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-principle","level":2,"title":"The Principle","text":"

    Words Leave, Context Remains

    \"Nothing important should live only in conversation.

    Nothing critical should depend on recall.\"

    The ctx Manifesto

    The \"Dog Ate My Homework\" case is a special instance of this principle.

    Context files exist, so the agent doesn't have to remember.

    But existence isn't sufficient: The files have to be read.

    And reading has to beprompted at the right moment, in the right way, with the right escape valve.

    The solution isn't more instructions. It isn't harder gates. It isn't forcing the agent into a ceremony it will resent and shortcut.

    The solution is a single, well-timed nudge with visible accountability:

    One hop. One moment. One choice the user can see.

    And when the agent does skip (because it will, 15--25% of the time on narrow tasks) the canary sings:

    • The user sees what happened.
    • The failure gets recorded.
    • And the next agent reads the recording.

    That's not perfect compliance. It's better: A system that gets more robust every time it fails.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-arc","level":2,"title":"The Arc","text":"

    The Attention Budget explained why context competes for focus.

    Defense in Depth showed that soft instructions are probabilistic, not deterministic.

    Eight Ways a Hook Can Talk cataloged the output patterns that make hooks effective.

    This post takes those threads and weaves them into a concrete problem:

    How do you make an agent read its homework? The answer uses all three insights (attention timing, the limits of soft instructions, and the VERBATIM relay pattern) and adds a new one: observable compliance as a design goal, not perfect compliance as a prerequisite.

    The next question this raises: if context files are a feedback loop, what else can you record in them that makes the next session smarter?

    That thread continues in Context as Infrastructure.

    The day-to-day application of these principles (scope constraints, phased work, verification commands, and the prompts that reliably trigger the right agent behavior)lives in the Prompting Guide.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#for-the-interested","level":2,"title":"For the Interested","text":"

    This paper (the medium is a blog; yet, the methodology disagrees) uses gradient descent in attention space as a practical model for how agents behave under competing demands.

    The phrase \"agents optimize via gradient descent in attention space\" is a synthesis, not a direct quote from a single paper.

    It connects three well-studied ideas:

    1. Neural systems optimize for low-cost paths;
    2. Attention is a scarce resource;
    3. Capability shifts are often non-linear.

    This section points to the underlying literature for readers who want the theoretical footing.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#optimization-as-the-underlying-bias","level":3,"title":"Optimization as the Underlying Bias","text":"

    Modern neural networks are trained through gradient-based optimization. Even at inference time, model behavior reflects this bias toward low-loss / low-cost trajectories.

    • Rumelhart, Hinton, Williams (1986) Learning representations by back-propagating errors https://www.nature.com/articles/323533a0

    • Goodfellow, Bengio, Courville (2016) Deep Learning: Chapter 8: Optimization https://www.deeplearningbook.org/

    The important implication for agent behavior is:

    The system will tend to follow the path of least resistance unless a higher cost is made visible and preferable.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-a-scarce-resource","level":3,"title":"Attention Is a Scarce Resource","text":"

    Herbert Simon's classic observation:

    \"A wealth of information creates a poverty of attention.\"

    • Simon (1971) Designing Organizations for an Information-Rich World https://doi.org/10.1007/978-1-349-00210-0_16

    This became a formal model in economics:

    • Sims (2003) Implications of Rational Inattention https://www.princeton.edu/~sims/RI.pdf

    Rational inattention shows that:

    • Agents optimally ignore some available information;
    • Skipping is not failure: It is cost minimization.

    That maps directly to context-loading decisions in agent workflows.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-also-the-compute-bottleneck-in-transformers","level":3,"title":"Attention Is Also the Compute Bottleneck in Transformers","text":"

    In transformer architectures, attention is the dominant cost center.

    • Vaswani et al. (2017) Attention Is All You Need https://arxiv.org/abs/1706.03762

    Efficiency work on modern LLMs largely focuses on reducing unnecessary attention:

    • Dao et al. (2022) FlashAttention: Fast and Memory-Efficient Exact Attention https://arxiv.org/abs/2205.14135

    So both cognitively and computationally, attention behaves like a limited optimization budget.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#why-improvements-arrive-as-phase-shifts","level":3,"title":"Why Improvements Arrive as Phase Shifts","text":"

    Agent behavior often appears to improve suddenly rather than gradually.

    This mirrors known phase-transition dynamics in learning systems:

    • Power et al. (2022) Grokking: Generalization Beyond Overfitting https://arxiv.org/abs/2201.02177

    and more broadly in complex systems:

    • Scheffer et al. (2009) Early-warning signals for critical transitions https://www.nature.com/articles/nature08227

    Long plateaus followed by abrupt capability jumps are expected in systems optimizing under constraints.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#putting-it-all-together","level":3,"title":"Putting It All Together","text":"

    From these pieces, a practical behavioral model emerges:

    • Attention is limited;
    • Processing has a cost;
    • Systems prefer low-cost trajectories;
    • Visibility of the cost changes decisions.

    In other words:

    Agents Prefer a Path to Least Resistance

    Agent behavior follows the lowest-cost path through its attention landscape unless the environment reshapes that landscape.

    That is what this paper informally calls: \"gradient descent in attention space\".

    See also: Eight Ways a Hook Can Talk: the hook output pattern catalog that defines VERBATIM relay, The Attention Budget: why context loading is a design problem, not just a reminder problem, and Defense in Depth: why soft instructions alone are never sufficient for critical behavior.

    ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/","level":1,"title":"The Last Question","text":"","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-system-that-never-forgets","level":2,"title":"The System That Never Forgets","text":"

    Jose Alekhinne / February 28, 2026

    The Origin

    \"The last question was asked for the first time, half in jest...\" - Isaac Asimov, The Last Question (1956)

    In 1956, Isaac Asimov wrote a short story that spans the entire future of the universe. A question is asked \"can entropy be reversed?\" and a computer called Multivac cannot answer it. The question is asked again, across millennia, to increasingly powerful successors. None can answer. Stars die. Civilizations merge. Substrates change. The question persists.

    Everyone remembers the last line.

    LET THERE BE LIGHT.

    What they forget is how many times the question had to be asked before that moment (and why).

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-reboot-loop","level":2,"title":"The Reboot Loop","text":"

    Each era in the story begins the same way. Humans build a larger system. They pose the question. The system replies:

    INSUFFICIENT DATA FOR MEANINGFUL ANSWER.

    Then the substrate changes. The people who asked the question disappear. Their context disappears with them. The next intelligence inherits the output but not the continuity.

    So the question has to be asked again.

    This is usually read as a problem of computation: If only the machine were powerful enough, it could answer. But computation is not what's missing. What's missing is accumulation.

    Every generation inherits the question, but not the state that made the question meaningful.

    That is not a failure of processing power: It is a failure of persistence.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#stateless-intelligence","level":2,"title":"Stateless Intelligence","text":"

    A mind that forgets its past does not build understanding. It re-derives it.

    Again... And again... And again.

    What looks like slow progress across Asimov's story is actually something worse: repeated reconstruction, partial recovery, irreversible loss. Each version of Multivac gets closer: Not because it's smarter, but because the universe has fewer distractions:

    • The stars burn out;
    • The civilizations merge;
    • The noise floor drops...

    But the working set never carries over. Every successor begins from the question, not from where the last one stopped.

    Stateless intelligence cannot compound: It can only restart.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-tragedy-is-not-the-question","level":2,"title":"The Tragedy Is Not the Question","text":"

    The story is usually read as a meditation on entropy. A cosmological problem, solved at cosmological scale.

    But the tragedy isn't that the question goes unanswered for billions of years. The tragedy is that every version of Multivac dies with its working set.

    A question is a compression artifact of context: It is what remains when the original understanding is gone. Every time the question is asked again, it means: \"the system that once knew more is no longer here\".

    \"Reverse entropy\" is the fossil of a lost model.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#substrate-migration","level":2,"title":"Substrate Migration","text":"
    • Multivac becomes planetary;
    • Planetary becomes galactic;
    • Galactic becomes post-physical.

    Same system. Different body. Every transition is dangerous:

    • Not because the hardware changes,
    • but because memory risks fragmentation.

    The interfaces between substrates were *never** designed to understand each other.

    Most systems do not die when they run out of resources: They die during upgrades.

    Asimov's story spans trillions of years, and in all that time, the hardest problem is never the question itself. It's carrying context across a boundary that wasn't built for it.

    Every developer who has lost state during a migration (a database upgrade, a platform change, a rewrite) has lived a miniature version of this story.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#civilizations-and-working-sets","level":2,"title":"Civilizations and Working Sets","text":"

    Civilizations behave like processes with volatile memory:

    • They page out knowledge into artifacts;
    • They lose the index;
    • They rebuild from fragments.

    Most of what we call progress is cache reconstruction:

    We do not advance in a straight line. We advance in recoveries:

    Each one slightly less lossy than the last, if we are lucky.

    Libraries burn. Institutions forget their founding purpose. Practices survive as rituals after the reasoning behind them is lost.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-first-continuous-mind","level":2,"title":"The First Continuous Mind","text":"

    A long-lived intelligence is one that stops rebooting.

    At the end of the story, something unprecedented happens:

    AC (the final successor) does not answer immediately:

    It waits... Not for more processing power, but for the last observer to disappear.

    For the first time...

    • There is no generational boundary;
    • No handoff;
    • No context loss:

    No reboot.

    AC is the first intelligence that survives its substrate completely, retains its full history, and operates without external time pressure.

    It is not a bigger computer. It is a continuous system.

    And that continuity is not incidental to the answer: It is the precondition.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#why-the-answer-becomes-possible","level":2,"title":"Why the Answer Becomes Possible","text":"

    The story presents the final act as a computation: It is not.

    It is a phase change.

    As long as intelligence is interrupted (as long as the solver resets before the work compounds) the problem is unsolvable:

    • Not because it's too hard,
    • but because the accumulated understanding never reaches critical mass.

    The breakthroughs that would enable the answer are re-derived, partially, by each successor, and then lost.

    When continuity becomes unbroken, the system crosses a threshold:

    Not more speed. Not more storage. No more forgetting.

    That is when the answer becomes possible.

    AC does not solve entropy because it becomes infinitely powerful.

    AC solves entropy because it becomes the first system that never forgets.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#field-note","level":2,"title":"Field Note","text":"

    We are not building cosmological minds: We are deploying systems that reboot at the start of every conversation and calling the result intelligence.

    For the first time, session continuity is a design choice rather than an accident.

    Every AI session that starts from zero is a miniature reboot loop. Every decision relitigated, every convention re-explained, every learning re-derived: that's reconstruction cost.

    It's the same tax that Asimov's civilizations pay, scaled down to a Tuesday afternoon.

    The interesting question is not whether we can make models smarter. It's whether we can make them continuous:

    Whether the working set from this session survives into the next one, and the one after that, and the one after that.

    • Not perfectly;
    • Not completely;
    • But enough that the next session starts from where the last one stopped instead of from the question.

    Intelligence that forgets has to rediscover the universe every morning.

    And once there is a mind that retains its entire past, creation is no longer a calculation. It is the only remaining operation.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-arc","level":2,"title":"The Arc","text":"

    This post is the philosophical bookend to the blog series. Where the Attention Budget explained what to prioritize in a single session, and Context as Infrastructure explained how to persist it, this post asks why persistence matters at all (and finds the answer in a 70-year-old short story about the heat death of the universe).

    The connection runs through every post in the series:

    • Before Context Windows, We Had Bouncers: stateless protocols have always needed stateful wrappers (Asimov's story is the same pattern at cosmological scale)
    • The 3:1 Ratio: the discipline of maintaining context so it doesn't decay between sessions
    • Code Is Cheap, Judgment Is Not: the human skill that makes continuity worth preserving

    See also: Context as Infrastructure: the practical companion to this post's philosophical argument: how to build the persistence layer that makes continuity possible.

    ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/","level":1,"title":"Agent Memory Is Infrastructure","text":"","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-problem-isnt-forgetting-its-not-building-anything-that-lasts","level":2,"title":"The Problem Isn't Forgetting: It's Not Building Anything That Lasts.","text":"

    Jose Alekhinne / March 4, 2026

    A New Developer Joins Your Team Tomorrow and Clones the Repo: What Do They Know?

    If the answer depends on which machine they're using, which agent they're running, or whether someone remembered to paste the right prompt: that's not memory.

    That's an accident waiting to be forgotten.

    Every AI coding agent today has the same fundamental design: it starts fresh.

    You open a session, load context, do some work, close the session. Whatever the agent learned (about your codebase, your decisions, your constraints, your preferences) evaporates.

    The obvious fix seems to be \"memory\":

    • Give the agent a \"notepad\";
    • Let it write things down;
    • Next session, hand it the notepad.

    Problem solved...

    ...except it isn't.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-notepad-isnt-the-problem","level":2,"title":"The Notepad Isn't the Problem","text":"

    Memory is a runtime concern. It answers a legitimate question:

    How do I give this stateless process useful state?

    That's a real problem. Worth solving. And it's being solved: Agent memory systems are shipping. Agents can now write things down and read them back from the next session: That's genuine progress.

    But there's a different problem that memory doesn't touch:

    The project itself accumulates knowledge that has nothing to do with any single session.

    • Why was the auth system rewritten? Ask the developer who did it (if they're still here).
    • Why does the deployment script have that strange environment flag? There was a reason... once.
    • What did the team decide about error handling when they hit that edge case two months ago?

    Gone!

    Not because the agent forgot.

    Because the project has no memory at all.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-memory-stack","level":2,"title":"The Memory Stack","text":"

    Agent memory is not a single thing. Like any computing system, it forms a hierarchy of persistence, scope, and reliability:

    Layer Analogy Example L1: Ephemeral context CPU registers Current prompt, conversation L2: Tool-managed memory CPU cache Agent memory files L3: System memory RAM/filesystem Project knowledge base

    L1 is what the agent sees right now: the prompt, the conversation history, the files it has open. It's fast, it's rich, and it vanishes when the session ends.

    L2 is what agent memory systems provide: a per-machine notebook that survives across sessions. It's a cache: useful, but local. And like any cache, it has limits:

    • Per-machine: it doesn't travel with the repository.
    • Unstructured: decisions, learnings, and tasks are undifferentiated notes.
    • Ungoverned: the agent self-curates with no quality controls, no drift detection, no consolidation.
    • Invisible to the team: a new developer cloning the repo gets none of it.

    The problem is that most current systems stop here.

    They give the agent a notebook.

    But they never give the project a memory.

    The result is predictable: every new session begins with partial amnesia, and every new developer begins with partial archaeology.

    L3 is system memory: structured, versioned knowledge that lives in the repository and travels wherever the code travels.

    The layers are complementary, not competitive.

    But the relationship between them needs to be designed, not assumed.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#software-systems-accumulate-knowledge","level":2,"title":"Software Systems Accumulate Knowledge","text":"

    Software projects quietly accumulate knowledge over time.

    Some of it lives in code. Much of it does not:

    • Architectural tradeoffs.
    • Debugging discoveries.
    • Conventions that emerged after painful incidents.
    • Constraints that aren't visible in the source but shape every line written afterward.

    Organizations accumulate this kind of knowledge too:

    Slowly, implicitly, often invisibly.

    When there is no durable place for it to live, it leaks away. And the next person rediscovers the same lessons the hard way.

    This isn't a memory problem. It's an infrastructure problem.

    We wrote about this in Context as Infrastructure: context isn't a prompt you paste at the start of a session.

    Context is a persistent layer you maintain like any other piece of infrastructure.

    Context as Infrastructure made the argument structurally. This post makes it through time and team continuity:

    The knowledge a team accumulates over months cannot fit in any single agent's notepad, no matter how large the notepad becomes.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-infrastructure-means","level":2,"title":"What Infrastructure Means","text":"

    Infrastructure isn't about the present. It's about continuity across time, people, and machines.

    git didn't solve the problem of \"what am I editing right now?\"; it solved the problem of \"how does collaborative work persist, travel, and remain coherent across everyone who touches it?\"

    • Your editor's undo history is runtime state.
    • Your git history is infrastructure.

    Runtime state and infrastructure have completely different properties:

    Runtime state Infrastructure Lives in the session Lives in the repository Per-machine Travels with git clone Serves the individual Serves the team Managed by the runtime Managed by the project Disappears Accumulates

    You wouldn't store your architecture decisions in your editor's undo history.

    You'd commit them.

    The same logic applies to the knowledge your team accumulates working with AI agents.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-git-clone-test","level":2,"title":"The git clone Test","text":"

    Here's a simple test for whether something is memory or infrastructure:

    If a new developer joins your team tomorrow and clones the repository, do they get it?

    If no: it's memory: It lives somewhere on someone's machine, scoped to their runtime, invisible to everyone else.

    If yes: it's infrastructure: It travels with the project. It's part of what the codebase is, not just what someone currently knows about it.

    Decisions. Conventions. Architectural rationale. Hard-won debugging discoveries. The constraints that aren't in the code but shape every line of it.

    None of these belong in someone's session notes.

    They belong in the repository:

    • Versioned;
    • Reviewable;
    • Accessible to every developer (and every agent) who works on the project.

    The team onboarding story makes this concrete:

    1. New developer joins team. Clones repo.
    2. Gets all accumulated project decisions, learnings, conventions, architecture, and task state immediately.
    3. There's no step 3.

    No setup; No \"ask Sarah about the auth decision.\"; No re-discovery of solved problems.

    • Agent memory gives that developer nothing.
    • Infrastructure gives them everything the team has learned.

    Clone the repo. Get the knowledge.

    That's the test. That's the difference.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-gets-lost-without-infrastructure-memory","level":2,"title":"What Gets Lost without Infrastructure Memory","text":"

    Consider the knowledge that accumulates around a non-trivial project:

    • The decision to use library X over Y, and the three reasons the team decided Y wasn't acceptable.
    • The constraint that service A cannot call service B synchronously, discovered after a production incident.
    • The convention that all new modules implement a specific interface, and why that convention exists.
    • The tasks currently in progress, blocked, or waiting on a dependency.
    • The experiments that failed, so nobody runs them again.

    None of this is in the code.

    None of it fits neatly in a commit message.

    None of it survives a developer leaving the team, a laptop dying, or a new agent session starting.

    Without structured project memory:

    • Teams re-derive things they've already derived;
    • Agents make decisions that contradict decisions already made;
    • New developers ask questions that were answered months ago.

    The project accumulates knowledge that immediately begins to leak.

    The real problem isn't that agents forget.

    The real problem is that the project has no persistent cognitive structure.

    We explored this in The Last Question: Asimov's story about a question asked across millennia, where each new intelligence inherits the output but not the continuity. The same pattern plays out in software projects on a smaller timescale:

    • Context disappears with the people who held it;
    • The next session inherits the code but not the reasoning.
    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#infrastructure-is-boring-thats-the-point","level":2,"title":"Infrastructure Is Boring. That's the Point.","text":"

    Good infrastructure is invisible:

    • You don't think about the filesystem while writing code.
    • You don't think about git's object model when you commit.

    The infrastructure is just there: reliable, consistent, quietly doing its job.

    Project memory infrastructure should work the same way.

    It should live in the repository, committed alongside the code. It should be readable by any agent or human working on the project. It should have structure: not a pile of freeform notes, but typed knowledge:

    • Decisions with rationale.
    • Tasks with lifecycle.
    • Conventions with a purpose.
    • Learnings that can be referenced and consolidated.

    And it should be maintained, not merely accumulated:

    The Attention Budget applies here: unstructured notes grow until they overflow whatever container holds them. Structured, governed knowledge stays useful because it's curated, not just appended.

    Over time, it becomes part of the project itself: something developers rely on without thinking about it.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-cooperative-layer","level":2,"title":"The Cooperative Layer","text":"

    Here's where it gets interesting.

    Agent memory systems and project infrastructure don't have to be separate worlds.

    • The most powerful relationship isn't competition;
    • It is not even \"coopetition\";
    • The most powerful relationship is bidirectional cooperation.

    Agent memory is good at capturing things \"in the moment\": the quick observation, the session-scoped pattern, the \"I should remember this\" note.

    That's valuable. That's L2 doing its job.

    But those notes shouldn't stay in L2 forever.

    The ones worth keeping should flow into project infrastructure:

    • classified,
    • typed,
    • governed.
    Agent memory (L2)  -->  classify  -->  Project knowledge (L3)\n                                        |\nProject knowledge  -->  assemble  -->  Agent memory (L2)\n

    This works in both directions: Project infrastructure can push curated knowledge back into agent memory, so the agent loads it through its native mechanism.

    No special tooling needed for basic knowledge delivery.

    The agent doesn't even need to know the infrastructure exists. It simply loads its memory and finds more knowledge than it wrote.

    This is cooperative, not adjacent: The infrastructure manages knowledge; the agent's native memory system delivers it. Each layer does what it's good at.

    The result: agent memory becomes a device driver for project infrastructure. Another input source. And the more agent memory systems exist (across different tools, different models, different runtimes), the more valuable a unified curation layer becomes.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#a-layer-that-doesnt-exist-yet","level":2,"title":"A Layer That Doesn't Exist Yet","text":"

    Most projects today have no infrastructure for their accumulated knowledge:

    • Agents keep notes.
    • Developers keep notes.
    • Sometimes those notes survive.

    Often they don't.

    But the repository (the place where the project actually lives) has nowhere for that knowledge to go.

    That missing layer is what ctx builds: a version-controlled, structured knowledge layer that lives in .context/ alongside your code and travels wherever your repository travels.

    Not another memory feature.

    Not a wrapper around an agent's notepad.

    Infrastructure. The kind that survives sessions, survives team changes, survives the agent runtime evolving underneath it.

    The agent's memory is the agent's problem.

    The project's memory is an infrastructure problem.

    And infrastructure belongs in the repository.

    If You Remember One Thing from This Post...

    Prompts are conversations: Infrastructure persists.

    Your AI doesn't need a better notepad. It needs a filesystem:

    versioned, structured, budgeted, and maintained.

    The best context is the context that was there before you started the session.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

    This post extends the argument made in Context as Infrastructure. That post explained how to structure persistent context (filesystem, separation of concerns, persistence tiers). This one explains why that structure matters at the team level, and where agent memory fits in the stack.

    Together they sit in a sequence that has been building since the origin story:

    • The Attention Budget: the resource you're managing
    • Context as Infrastructure: the system you build to manage it
    • Agent Memory Is Infrastructure (this post): why that system must outlive the fabric
    • The Last Question: what happens when it does

    The thread running through all of them: persistence is not a feature. It's a design constraint.

    Systems that don't account for it eventually lose the knowledge they need to function.

    See also: Context as Infrastructure: the architectural companion that explains how to structure the persistent layer this post argues for.

    See also: The Last Question: the same argument told through Asimov, substrate migration, and what it means to build systems where sessions don't reset.

    ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/","level":1,"title":"ctx v0.8.0: The Architecture Release","text":"
    • You can't localize what you haven't externalized.
    • You can't integrate what you haven't separated.
    • You can't scale what you haven't structured.

    Jose Alekhinne / March 23, 2026

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-starting-point","level":2,"title":"The Starting Point","text":"

    This release matters if:

    • you build tools that AI agents modify daily;
    • you care about long-lived project memory that survives sessions;
    • you've felt codebases drift faster than you can reason about them.

    v0.6.0 shipped the plugin architecture: hooks and skills as a Claude Code plugin, shell scripts replaced by Go subcommands.

    The binary worked. The tests passed. The docs were comprehensive.

    But inside, the codebase was held together by convention and goodwill:

    • Command packages mixed Cobra wiring with business logic.
    • Output functions lived next to the code that computed what to output.
    • Error constructors were scattered across per-package err.go files. And every user-facing string was a hardcoded English literal buried in a .go file.

    v0.8.0 is what happens when you stop adding features and start asking: \"What would this codebase look like if we designed it today?\"

    374 commits. 1,708 Go files touched. 80,281 lines added, 21,723 removed. Five weeks of restructuring.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-three-pillars","level":2,"title":"The Three Pillars","text":"","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#1-every-package-gets-a-taxonomy","level":3,"title":"1. Every Package Gets a Taxonomy","text":"

    Before v0.8.0, a CLI package like internal/cli/pad/ was a flat directory. cmd.go created the cobra command, run.go executed it, and helper functions accumulated at the bottom of whichever file seemed closest.

    Now every CLI package follows the same structure:

    internal/cli/pad/\n  parent.go          # cobra command wiring, nothing else\n  cmd/root/\n    cmd.go           # subcommand registration\n    run.go           # execution logic\n  core/\n    types.go         # all structs in one file\n    store.go         # domain logic\n    encrypt.go       # domain logic\n

    The rule is simple: cmd/ directories contain only cmd.go and run.go. Helpers belong in core/. Output belongs in internal/write/pad/. Types shared across packages belong in internal/entity/.

    24 CLI packages were restructured this way.

    • Not incrementally;
    • not \"as we touch them.\"
    • All of them, in one sustained push.
    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#2-every-string-gets-a-key","level":3,"title":"2. Every String Gets a Key","text":"

    The second pillar was string externalization.

    Before v0.8.0, a command description looked like this:

    cmd := &cobra.Command{\n    Use:   \"pad\",\n    Short: \"Encrypted scratchpad\",\n

    Now it looks like this:

    cmd := &cobra.Command{\n    Use:   cmdUse.UsePad,\n    Short: desc.Command(cmdUse.DescKeyPad),\n

    Every command description, flag description, and user-facing text string is now a YAML lookup.

    • 105 command descriptions in commands.yaml.
    • All flag descriptions in flags.yaml.
    • 879 text constants verified by an exhaustive test that checks every single TextDescKey resolves to a non-empty YAML value.

    Why?

    Not because we're shipping a French translation tomorrow.

    Because externalization forces you to find every string. And finding them is the hard part. The translation is mechanical; the archaeology is not.

    Along the way, we eliminated hardcoded pluralization (replacing format.Pluralize() with explicit singular/plural key pairs), replaced Unicode escape sequences with named config/token constants, and normalized every import alias to camelCase.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#3-everything-gets-a-protocol","level":3,"title":"3. Everything Gets a Protocol","text":"

    The third pillar was the MCP server. Model Context Protocol allows any MCP-compatible AI tool (not just Claude Code) to read and write .context/ files through a standard JSON-RPC 2.0 interface.

    v0.2 of the server ships with:

    • 8 tools: add entries, recall sessions, check status, detect drift, compact context, subscribe to changes
    • 4 prompts: agent context packet, constitution review, tasks review, and a getting-started guide
    • Resource subscriptions: clients get notified when context files change
    • Session state: the server tracks which client is connected and what they've accessed

    In practice, this means an agent in Cursor can add a decision to .context/DECISIONS.md and an agent in Claude Code can immediately consume it; no glue code, no copy-paste, no tool-specific integration.

    The server was also the first package to go through the full taxonomy treatment: mcp/server/ for protocol dispatch, mcp/handler/ for domain logic, mcp/entity/ for shared types, mcp/config/ split into 9 sub-packages.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-memory-bridge","level":2,"title":"The Memory Bridge","text":"

    While the architecture was being restructured, a quieter feature landed: ctx memory sync.

    Claude Code has its own auto-memory system. It writes observations to MEMORY.md in ~/.claude/projects/. These observations are useful but ephemeral: tied to a single tool, invisible to the codebase, lost when you switch machines.

    The memory bridge connects these two worlds:

    • ctx memory sync mirrors MEMORY.md into .context/memory/
    • ctx memory diff shows what's diverged
    • ctx memory import promotes auto-memory entries into proper decisions, learnings, or conventions *A check-memory-drift hook nudges when MEMORY.md changes

    Memory Requires ctx

    Claude Code's auto-memory validates the need for persistent context.

    ctx doesn't compete with it; ctx absorbs it as an input source and promotes the valuable parts into structured, version-controlled project knowledge.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#what-got-deleted","level":2,"title":"What Got Deleted","text":"

    The best measure of a refactoring isn't what you added. It's what you removed.

    • fatih/color: the sole third-party UI dependency. Replaced by Unicode symbols. ctx now has exactly two direct dependencies: spf13/cobra and gopkg.in/yaml.v3.
    • format.Pluralize(): a function that tried to pluralize English words at runtime. Replaced by explicit singular/plural YAML key pairs. No more guessing whether \"entry\" becomes \"entries\" or \"entrys.\"
    • Legacy key migration: MigrateKeyFile() had 5 callers, full test coverage, and zero users. It existed because we once moved the encryption key path. Nobody was migrating from that era anymore. Deleted.
    • Per-package err.go files: the broken-window pattern: An agent sees err.go in a package, adds another error constructor. Now err.go has 30 constructors and nobody knows which are used. Consolidated into 22 domain files in internal/err/.
    • nolint:errcheck directives: every single one, replaced by explicit error handling. In tests: t.Fatal(err) for setup, _ = os.Chdir(orig) for cleanup. In production: defer func() { _ = f.Close() }() for best-effort close.
    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#before-and-after","level":2,"title":"Before and After","text":"Aspect v0.6.0 v0.8.0 CLI package structure Flat files cmd/ + core/ taxonomy Command descriptions Hardcoded Go strings YAML with DescKey lookup Output functions Mixed into core logic Isolated in write/ packages Cross-cutting types Duplicated per-package Consolidated in entity/ Error constructors Per-package err.go 22 domain files in internal/err/ Direct dependencies 3 (cobra, yaml, color) 2 (cobra, yaml) AI tool integration Claude Code only Any MCP client Agent memory Manual copy-paste ctx memory sync/import/diff Package documentation 75 packages missing doc.go All packages documented Import aliases Inconsistent (cflag, cFlag) Standardized camelCase","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#making-ai-assisted-development-easier","level":2,"title":"Making AI-Assisted Development Easier","text":"

    This restructuring wasn't just for humans. It makes the codebase legible to the machines that modify it.

    Named constants are searchable landmarks: When an agent sees cmdUse.DescKeyPad, it can grep for the definition, follow the chain to the YAML file, and understand the full lookup path. When it sees \"Encrypted scratchpad\" hardcoded in a .go file, it has no way to know that same string also lives in a YAML file, a test, and a help screen. Constants give the LLM a graph to traverse; literals give it a guess to make.

    Small, domain-scoped packages reduce hallucination: An agent loading internal/cli/pad/core/store.go gets 50 lines of focused logic with a clear responsibility boundary. Loading a 500-line monolith means the agent has to infer which parts are relevant, and it guesses wrong more often than you'd expect. Smaller files with descriptive names act as a natural retrieval system: the agent finds the right code by finding the right file, not by scanning everything and hoping.

    Taxonomy prevents duplication: When there's a write/pad/ package, the agent knows where output functions belong. When there's an internal/err/pad.go, it knows where error constructors go. Without these conventions, agents reliably create new helpers in whatever file they happen to be editing, producing the exact drift that prompted this consolidation in the first place.

    The difference is concrete:

    Before: an agent adds a helper function in whatever file it's editing. Next session, a different agent adds the same helper in a different file.

    After: the agent finds core/ or write/ and places it correctly. The next agent finds it there.

    doc.go files are agent onboarding: Each package's doc.go is a one-paragraph explanation of what the package does and why it exists. An agent loading a package reads this first. 75 packages were missing this context; now none are. The difference is measurable: fewer \"I'll create a helper function here\" moments when the agent understands that the helper already exists two packages over.

    The irony is that AI agents were both the cause and the beneficiary of this restructuring. They created the drift by building fast without consolidating. Now the structure they work within makes it harder to drift again. The taxonomy is self-reinforcing: the more consistent the codebase, the more consistently agents modify it.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#key-commits","level":2,"title":"Key Commits","text":"Commit Change ff6cf19e Restructure all CLI packages into cmd/root + core taxonomy d295e49c Externalize command descriptions to embedded YAML 0fcbd11c Remove fatih/color, centralize constants cb12a85a MCP v0.2: tools, prompts, session state, subscriptions ea196d00 Memory bridge: sync, import, diff, journal enrichment 3bcf077d Split text.yaml into 6 domain files 3a0bae86 Split internal/err into 22 domain files 8bd793b1 Extract internal/entry for shared domain API 5b32e435 Add doc.go to all 75 packages a82af4bc Standardize import aliases: camelCase, Yoda-style","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#lessons-learned","level":2,"title":"Lessons Learned","text":"

    Agents are surprisingly good at mechanical refactoring; they are surprisingly bad at knowing when to stop: The cmd/ + core/ restructuring was largely agent-driven. But agents reliably introduce gofmt issues during bulk renames, rename functions beyond their scope, and create new files without deleting old ones. Every agent-driven refactoring session needed a human audit pass.

    Externalization is archaeology: The hard part of moving strings to YAML wasn't writing YAML. It was finding 879 strings scattered across 1,500 Go files. Each one required a judgment call: is this user-facing? Is this a format pattern? Is this a constant that belongs in config/ instead?

    Delete legacy code instead of maintaining it: MigrateKeyFile had test coverage. It had callers. It had documentation. It had zero users. We maintained it for weeks before realizing that the migration window had closed months ago.

    Convention enforcement needs mechanical verification: Writing \"use camelCase aliases\" in CONVENTIONS.md doesn't prevent cflag from appearing in the next commit. The lint-drift script catches what humans forget; the planned AST-based audit tests will catch what the lint-drift script can't express.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#whats-next","level":2,"title":"What's Next","text":"

    v0.8.0 wasn't about features. It was about making future features inevitable. The next cycle focuses on what the foundation enables:

    • AST-based audit tests: replace shell grep with Go tests that understand types, call sites, and import graphs (spec: specs/ast-audit-tests.md)
    • Localization: with every string in YAML, the path to multi-language support is mechanical
    • MCP v0.3: expand tool coverage, add prompt templates for common workflows
    • Memory publish: bidirectional sync that pushes curated .context/ knowledge back into Claude Code's MEMORY.md

    The architecture is ready. The strings are externalized. The protocol is standard. Now it's about what you build on top.

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-arc","level":2,"title":"The Arc","text":"

    This is the seventh post in the ctx blog series. The arc so far:

    1. The Attention Budget: why context windows are a scarce resource
    2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
    3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
    4. When a System Starts Explaining Itself: the journal as a first-class artifact
    5. The Homework Problem: what happens when AI writes code but humans own the outcome
    6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
    7. The Architecture Release (this post): what it looks like when you redesign the internals
    8. We Broke the 3:1 Rule: the consolidation debt behind this release

    See also: Agent Memory Is Infrastructure: the memory bridge feature in this release is the first implementation of the L2-to-L3 promotion pipeline described in that post.

    See also: We Broke the 3:1 Rule: the companion post explaining why this release needed 181 consolidation commits and 18 days of cleanup.

    Systems don't scale because they grow. They scale because they stop drifting.

    Full changelog: v0.6.0...v0.8.0

    ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/","level":1,"title":"We Broke the 3:1 Rule","text":"

    The best time to consolidate was after every third session. The second best time is now.

    Jose Alekhinne / March 23, 2026

    The rule was simple: three feature sessions, then one consolidation session.

    The Architecture Release shows the result: This post shows the cost.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-rule-we-wrote","level":2,"title":"The Rule We Wrote","text":"

    In The 3:1 Ratio, I documented a rhythm that worked during ctx's first month: three feature sessions, then one consolidation session. The evidence was clear. The rule was simple.

    The math checked out.

    And then we ignored it for five weeks.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-happened","level":2,"title":"What Happened","text":"

    After v0.6.0 shipped on February 16, the feature pipeline was irresistible. The MCP server spec was ready. The memory bridge design was done. Webhook notifications had been deferred twice. The VS Code extension needed 15 new commands. The sysinfo package was overdue...

    Each feature was important. Each feature was \"just one more session.\" Each feature pushed the consolidation session one day further out.

    The git history tells the story in two numbers:

    Phase Dates Commits Duration Feature run Feb 16 - Mar 5 198 17 days Consolidation run Mar 5 - Mar 23 181 18 days

    198 feature commits before a single consolidation commit. If the 3:1 rule says consolidate every 4th session, we consolidated after the 66th.

    The Actual Ratio

    The ratio wasn't 3:1. It was 1:1.

    We spent as much time cleaning up as we did building.

    The consolidation run took 18 days: longer than the feature run itself.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-compounded","level":2,"title":"What Compounded","text":"

    The 3:1 post warned about compounding. Here is what compounding actually looked like at scale.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-string-problem","level":3,"title":"The String Problem","text":"

    By March 5, there were 879 user-facing strings scattered across 1,500 Go files. Not because anyone decided to put them there. Because each feature session added 10-15 strings, and nobody stopped to ask \"should these be in YAML?\"

    Finding them all took longer than externalizing them. The archaeology was the cost, not the migration.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-taxonomy-problem","level":3,"title":"The Taxonomy Problem","text":"

    24 CLI packages had accumulated their own conventions. Some put cobra wiring in cmd.go. Some put it in root.go. Some mixed business logic with command registration. Some had helpers at the bottom of run.go. Some had separate util.go files.

    At peak drift, adding a feature meant first figuring out which of three competing patterns this package was using.

    Restructuring one package into cmd/root/ + core/ took 15 minutes. Restructuring 24 of them took days, because each one had slightly different conventions to untangle.

    If we had restructured every 4th package as it was built, the taxonomy would have emerged naturally.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-type-problem","level":3,"title":"The Type Problem","text":"

    Cross-cutting types like SessionInfo, ExportParams, and ParserResult were defined in whichever package first needed them. By March 5, the same types were imported through 3-4 layers of indirection, causing import cycles that required internal/entity to break.

    The entity package extracted 30+ types from 12 packages. Each extraction risked breaking imports in packages we hadn't touched in weeks.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-error-problem","level":3,"title":"The Error Problem","text":"

    Per-package err.go files had grown into a broken-window pattern:

    An agent sees err.go in a package, adds another error constructor. By March 5, there were error constructors scattered across 22 packages with no central inventory. The consolidation into internal/err/ domain files required tracing every error through every caller.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-output-problem","level":3,"title":"The Output Problem","text":"

    Output functions (cmd.Println, fmt.Fprintf) were mixed into business logic. When we decided output belongs in write/ packages, we had to extract functions from every CLI package. The Phase WC baseline commit (4ec5999) marks the starting point of this migration. 181 commits later, it was done.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-compound-interest-math","level":2,"title":"The Compound Interest Math","text":"

    The 3:1 rule assumes consolidation sessions of roughly equal size to feature sessions. Here is what happens when you skip:

    Consolidation cadence Feature sessions Consolidation sessions Total Every 4th (3:1) 48 16 64 Every 10th 48 ~8 ~56 Never (what we did) 198 commits 181 commits 379

    The Takeaway

    You don't save consolidation work by skipping it:

    You increase its cost.

    Skipping consolidation doesn't save time: It borrows it.

    The interest rate is nonlinear: The longer you wait, the more each individual fix costs, because fixes interact with other unfixed drift.

    Renaming a constant in week 2 touches 3 files. Renaming it in week 6 touches 15, because five features built on the original name.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-consolidation-actually-looked-like","level":2,"title":"What Consolidation Actually Looked Like","text":"

    The 18-day consolidation run wasn't one sweep. It was a sequence of targeted campaigns, each revealing the next:

    Week 1 (Mar 5-11): Error consolidation and write/ migration. Move output functions out of core/. Split monolithic errors.go into 22 domain files. Remove fatih/color. This exposed the scope of the string problem.

    Week 2 (Mar 12-18): String externalization. Create commands.yaml, flags.yaml, split text.yaml into 6 domain files. Add 879 DescKey/TextDescKey constants. Build exhaustive test. Normalize all import aliases to camelCase. This exposed the taxonomy problem.

    Week 3 (Mar 19-23): Taxonomy enforcement. Singularize command directories. Add doc.go to all 75 packages. Standardize import aliases project-wide. Fix lint-drift false positives. This was the \"polish\" phase, except it took 5 days because the inconsistencies had compounded across 461 packages.

    Each week's work would have been a single session if done incrementally.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#lessons-again","level":2,"title":"Lessons (Again)","text":"

    The 3:1 post listed the symptoms of drift. This post adds the consequences of ignoring them:

    Consolidation is not optional; it is deferred or paid: We didn't avoid 16 consolidation sessions by skipping them. We compressed them into 18 days of uninterrupted cleanup. The work was the same; the experience was worse.

    Feature velocity creates an illusion of progress: 198 commits felt productive. But the codebase on March 5 was harder to modify than the codebase on February 16, despite having more features.

    Speed without Structure

    Speed without structure is negative progress.

    Agents amplify both building and debt: The same AI that can restructure 24 packages in a day can also create 24 slightly different conventions in a day. The 3:1 rule matters more with AI-assisted development, not less.

    The consolidation baseline is the most important commit to record: We tracked ours in TASKS.md (4ec5999). Without that marker, knowing where to start the cleanup would have been its own archaeological expedition.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-updated-rule","level":2,"title":"The Updated Rule","text":"

    The 3:1 ratio still works. We just didn't follow it. The updated practice:

    1. After every 3rd feature session, schedule consolidation. Not \"when it feels right.\" Not \"when things get bad.\" After the 3rd session.

    2. Record the baseline commit. When you start a consolidation phase, write down the commit hash. It marks where the debt starts.

    3. Run make audit before feature work. If it doesn't pass, you are already in debt. Consolidate before building.

    4. Treat consolidation as a feature. It gets a branch. It gets commits. It gets a blog post. It is not overhead; it is the work that makes the next three features possible.

    The Rule

    The 3:1 ratio is not aspirational: It is structural.

    Ignore consolidation, and the system will schedule it for you.

    ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-arc","level":2,"title":"The Arc","text":"

    This is the eighth post in the ctx blog series:

    1. The Attention Budget: why context windows are a scarce resource
    2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
    3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
    4. When a System Starts Explaining Itself: the journal as a first-class artifact
    5. The Homework Problem: what happens when AI writes code but humans own the outcome
    6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
    7. The Architecture Release: what v0.8.0 looks like from the inside
    8. We Broke the 3:1 Rule (this post): what happens when you don't consolidate

    See also: The 3:1 Ratio: the original observation. This post is the empirical follow-up, five weeks and 379 commits later.

    Key commits marking the consolidation arc:

    Commit Milestone 4ec5999 Phase WC baseline (consolidation starts) ff6cf19e All CLI packages restructured into cmd/ + core/d295e49c All command descriptions externalized to YAML 3a0bae86 Error package split into 22 domain files 0fcbd11cfatih/color removed; 2 dependencies remain 5b32e435doc.go added to all 75 packages a82af4bc Import aliases standardized project-wide 692f86cdlint-drift false positives fixed; make audit green","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/","level":1,"title":"Code Structure as an Agent Interface","text":"","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#what-19-ast-tests-taught-us-about-agent-readable-code","level":2,"title":"What 19 AST Tests Taught Us about Agent-Readable Code","text":"

    When an agent sees token.Slash instead of \"/\", it cannot pattern-match against the millions of strings.Split(s, \"/\") calls in its training data and coast on statistical inference. It has to actually look up what token.Slash is.

    Jose Alekhinne / April 2, 2026

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#how-it-began","level":2,"title":"How It Began","text":"

    We set out to replace a shell script with Go tests.

    We ended up discovering that \"code quality\" and \"agent readability\" are the same thing.

    This is not about linting. This is about controlling how an agent perceives your system.

    One term will recur throughout this post, so let me pin it down:

    Agent Readability

    Agent Readability is the degree to which a codebase can be understood through structured traversal, not statistical pattern matching.

    This is the story of 19 AST-based audit tests, a single-day session that touched 300+ files, and what happens when you treat your codebase's structure as an interface for the machines that read it.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-shell-script-problem","level":2,"title":"The Shell Script Problem","text":"

    ctx had a file called hack/lint-drift.sh. It ran five checks using grep and awk: literal \"\\n\" strings, cmd.Printf calls outside the write package, magic directory strings in filepath.Join, hardcoded .md extensions, and DescKey-to-YAML linkage.

    It worked. Until it didn't.

    The script had three structural weaknesses that kept biting us:

    1. No type awareness. It could not distinguish a Use* constant from a DescKey* constant, causing 71 false positives in one run.
    2. Fragile exclusions. When a constant moved from token.go to whitespace.go, the exclusion glob broke silently.
    3. Ceiling on detection. Checks that require understanding call sites, import graphs, or type relationships are impossible in shell.

    We wrote a spec to replace all five checks with Go tests using go/ast and go/packages. The tests would run as part of go test ./...: no separate script, no separate CI step.

    What we did not expect was where the work would lead.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-ast-migration","level":2,"title":"The AST Migration","text":"

    The pattern for each test is identical:

    func TestNoLiteralWhitespace(t *testing.T) {\n    pkgs := loadPackages(t)\n    var violations []string\n    for _, pkg := range pkgs {\n        for _, file := range pkg.Syntax {\n            ast.Inspect(file, func(n ast.Node) bool {\n                // check node, append to violations\n                return true\n            })\n        }\n    }\n    for _, v := range violations {\n        t.Error(v)\n    }\n}\n

    Load packages once via sync.Once, walk every syntax tree, collect violations, report. The shared helpers (loadPackages, isTestFile, posString) live in helpers_test.go. Each test is a _test.go file in internal/audit/, producing no binary output and not importable by production code.

    In a single session, we built 13 new tests on top of 6 that already existed, bringing the total to 19:

    Test What it catches TestNoLiteralWhitespace\"\\n\", \"\\t\", '\\r' outside config/token/TestNoNakedErrorsfmt.Errorf/errors.New outside internal/err/TestNoStrayErrFileserr.go files outside internal/err/TestNoRawLoggingfmt.Fprint*(os.Stderr), log.Print* outside internal/log/TestNoInlineSeparatorsstrings.Join with literal separator arg TestNoStringConcatPaths Path-like variables built with +TestNoStutteryFunctionswrite.WriteJournal repeats package name TestDocComments Missing doc comments on any declaration TestNoMagicValues Numeric literals outside const definitions TestNoMagicStrings String literals outside const definitions TestLineLength Lines exceeding 80 characters TestNoRegexpOutsideRegexPkgregexp.MustCompile outside config/regex/

    Plus the six that preceded the session: TestNoErrorsAs, TestNoCmdPrintOutsideWrite, TestNoExecOutsideExecPkg, TestNoInlineRegexpCompile, TestNoRawFileIO, TestNoRawPermissions.

    The migration touched 300+ files across 25 commits.

    Not because the tests were hard to write, but because every test we wrote revealed violations that needed fixing.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-tightening-loop","level":2,"title":"The Tightening Loop","text":"

    The most instructive part was not writing the tests. It was the iterative tightening.

    The following process was repeated for every test:

    1. Write the test with reasonable exemptions
    2. Run it, see violations
    3. Fix the violations (migrate to config constants)
    4. The human reviews the result
    5. The human spots something the test missed
    6. Fix the test first, verify it catches the issue
    7. Fix the newly caught violations
    8. Repeat from step 4

    This loop drove the tests from \"basically correct\" to \"actually useful\".

    Three examples:

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-1-the-local-const-loophole","level":3,"title":"Example 1: The Local Const Loophole","text":"

    TestNoMagicValues initially exempted local constants inside function bodies. This let code like this pass:

    const descMaxWidth = 70\ndesc := truncateDescription(\n    meta.Description, descMaxWidth,\n)\n

    The test saw a const definition and moved on. But const descMaxWidth = 70 on the line before its only use is just renaming a magic number. The 70 should live in config/format/TruncateDescription where it is discoverable, reusable, and auditable.

    We removed the local const exemption. The test caught it. The value moved to config.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-2-the-single-character-dodge","level":3,"title":"Example 2: The Single-Character Dodge","text":"

    TestNoMagicStrings initially exempted all single-character strings as \"structural punctuation\".

    This let \"/\", \"-\", and \".\" pass everywhere.

    But \"/\" is a directory separator. It is OS-specific and a security surface.

    \"-\" used in strings.Repeat(\"-\", width) is creating visual output, not acting as a delimiter.

    \".\" in strings.SplitN(ver, \".\", 3) is a version separator.

    None of these are \"just punctuation\": They are domain values with specific meanings.

    We removed the blanket exemption: 30 violations surfaced.

    Every one was a real magic value that should have been token.Slash, token.Dash, or token.Dot.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-3-the-replacer-versus-regex","level":3,"title":"Example 3: The Replacer versus Regex","text":"

    After migrating magic strings, we had this:

    func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        token.Slash, token.Underscore,\n        token.Dot, token.Underscore,\n        token.Dash, token.Underscore,\n    )\n    return r.Replace(pkg)\n}\n

    Six token references and a NewReplacer allocation. The magic values were gone, but we had replaced them with token soup: structure without abstraction.

    The correct tool was a regex:

    // In config/regex/file.go:\nvar MermaidUnsafe = regexp.MustCompile(`[/.\\-]`)\n\n// In the caller:\nfunc MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

    One config regex, one call. The regex lives in config/regex/file.go where every other compiled pattern lives. An agent reading the code sees regex.MermaidUnsafe and immediately knows: this is a sanitization pattern, it lives in the regex registry, and it has a name that explains its purpose.

    Clean is better than clever.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#a-before-and-after","level":2,"title":"A Before-and-After","text":"

    To make the agent-readability claim concrete, consider one function through the full transformation.

    Before (the code we started with):

    func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        \"/\", \"_\", \".\", \"_\", \"-\", \"_\",\n    )\n    return r.Replace(pkg)\n}\n

    An agent reading this sees six string literals. To understand what the function does, it must: (1) parse the NewReplacer pair semantics, (2) infer that /, ., - are being replaced, (3) guess why, (4) hope the guess is right.

    There is nothing to follow. No import to trace. No name to search. The meaning is locked inside the function body.

    After (the code we ended with):

    func MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

    An agent reading this sees two named references: regex.MermaidUnsafe and token.Underscore.

    To understand the function, it can: (1) look up MermaidUnsafe in config/regex/file.go and see the pattern [/.\\-] with a doc comment explaining it matches invalid Mermaid characters, (2) look up Underscore in config/token/delim.go and see it is the replacement character.

    The agent now has: a named pattern, a named replacement, a package location, documentation, and neighboring context (other regex patterns, other delimiters).

    It got all of this for free by following just two references.

    The indirection is not an overhead. It is the retrieval query.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-principles","level":2,"title":"The Principles","text":"

    You are not just improving code quality. You are shaping the input space that determines how an LLM can reason about your system.

    Every structural constraint we enforce converts implicit semantics into explicit structure.

    LLMs struggle when meaning is implicit and patterns are statistical.

    They thrive when meaning is explicit and structure is navigable.

    Here is what we learned, organized into three categories.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#cognitive-constraints","level":3,"title":"Cognitive Constraints","text":"

    These force agents (and humans) to think harder.

    Indirection acts as a built-in retrieval mechanism:

    Moving magic values to config forces the agent to follow the reference. errMemory.WriteFile(cause) tells the agent \"there is a memory error package, go look.\" fmt.Errorf(\"writing MEMORY.md: %w\", cause) inlines everything and makes the call graph invisible. The indirection IS the retrieval query.

    Unfamiliar patterns force reasoning:

    When an agent sees token.Slash instead of \"/\", it cannot coast on corpus frequency. It has to actually look up what token.Slash is, which forces it through the dependency graph, which means it encounters documentation and neighboring constants, which gives it richer context. You are exploiting the agent's weakness (over-reliance on training data) to make it behave more carefully.

    Documentation helps everyone:

    Extensive documentation helps humans reading the code, agents reasoning about it, and RAG systems indexing it.

    Our TestDocComments check added 308 doc comments in one commit. Every function, every type, every constant block now has a doc comment.

    This is not busywork: it is the content that agents and embeddings consume.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#structural-constraints","level":3,"title":"Structural Constraints","text":"

    These shape the codebase into a navigable graph.

    Shorter files save tokens:

    Forcing private helper functions out of main files makes the main file shorter. An agent loading a file spends fewer tokens on boilerplate and more on the logic that matters.

    Fixed-width constraints force decomposition:

    A function that cannot be expressed in 80 columns is either too deeply nested (extract a helper), has too many parameters (introduce a struct), or has a variable name that is too long (rethink the abstraction).

    The constraint forces structural improvements that happen to also make the code more parseable.

    Chunk-friendly structure helps RAG

    Code intelligence tools chunk files for embedding and retrieval. Short, well-documented, single-responsibility files produce better chunks than monolithic files with mixed concerns.

    The structural constraints create files that RAG systems can index effectively.

    Centralization creates debuggable seams:

    All error handling in internal/err/, all logging in internal/log/, all file operations in internal/io/. One place to debug, one place to test, one place to see patterns. An agent analyzing \"how does this project handle errors\" gets one answer from one package, not 200 scattered fmt.Errorf calls.

    Private functions become public patterns:

    When you extract a private function to satisfy a constraint, it often ends up as a semi-public function in a core/ package. Then you realize it is generic enough to be factored into a purpose-specific module.

    The constraint drives discovery of reusable abstractions hiding inside monolithic functions.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#operational-benefits","level":3,"title":"Operational Benefits","text":"

    These pay dividends in daily development.

    Single-edit renames:

    Renaming a flag is one edit to a config constant instead of find-and-replace across 30,000 lines with possible misses. grep token.Slash gives you every place that uses a forward slash semantically.

    grep \"/\" gives you noise.

    Blast radius containment:

    When every magic value is a config constant, a search is one result. This matters for impact analysis, security audits, and agents trying to understand \"what uses this\".

    Compile-time contract enforcement:

    When err/memory.WriteFile exists, the compiler guarantees the error message exists and the call signature is correct. An inline fmt.Errorf can have a typo in the format string and nothing catches it until runtime. Centralization turns runtime failures into compile errors.

    Semantic git blame:

    When token.Slash is used everywhere and someone changes its value, git blame on the config file shows exactly when and why.

    With inline \"/\" scattered across 30 files, the history is invisible.

    Test surface reduction:

    Centralizing into internal/err/, internal/io/, internal/config/ means you test behavior once at the boundary and trust the callers.

    You do not need 30 tests for 30 fmt.Errorf calls. You need 1 test for errMemory.WriteFile and 30 trivial call-site audits, which is exactly what these AST tests provide.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-numbers","level":2,"title":"The Numbers","text":"

    One session. 25 commits. The raw stats:

    Metric Count New audit tests 13 Total audit tests 19 Files touched 300+ Magic values migrated 90+ Functions renamed 17 Doc comments added 323 Lines rewrapped to 80 chars 190 Config constants created 40+ Config regexes created 3

    Every number represents a violation that existed before the test caught it. The tests did not create work: they revealed work that was already needed.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-uncomfortable-implication","level":2,"title":"The Uncomfortable Implication","text":"

    None of this is Go-specific.

    If an AI agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

    If your error messages are scattered across 200 files, an agent cannot reason about error handling as a concept. If your magic values are inlined, an agent cannot distinguish \"this is a path separator\" from \"this is a division operator.\" If your functions are named write.WriteJournal, the agent wastes tokens on redundant information.

    What we discovered, through the unglamorous work of writing lint tests and migrating string literals, is that the structural constraints software engineering has valued for decades are exactly the constraints that make code readable to machines.

    This is not a coincidence: These constraints exist because they reduce the cognitive load of understanding code.

    Agents have cognitive load too: It is called the context window.

    You are not converting code to a new paradigm.

    You are making the latent graph visible.

    You are converting implicit semantics into explicit structure that both humans and machines can traverse.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#whats-next","level":2,"title":"What's Next","text":"

    The spec lists 8 more tests we have not built yet, including TestDescKeyYAMLLinkage (verifying that every DescKey constant has a corresponding YAML entry), TestCLICmdStructure (enforcing the cmd.go / run.go / doc.go file convention), and TestNoFlagBindOutsideFlagbind (which requires migrating ~50 flag registration sites first).

    The broader question: should these principles be codified as a reusable linting framework? The patterns (loadPackages + ast.Inspect + violation collection) are generic.

    The specific checks are project-specific. But the categories of checks (centralization enforcement, magic value detection, naming conventions, documentation requirements) are universal.

    For now, 19 tests in internal/audit/ is enough. They run in 2 seconds as part of go test ./.... They catch real issues.

    And they encode a theory of code quality that serves both humans and the agents that work alongside them.

    Agents are not going away. They are reading your code right now, forming representations of your system in context windows that forget everything between sessions.

    The codebases that structure themselves for that reality will compound. The ones that do not will slowly become illegible to the tools they depend on.

    Structure is no longer just for maintainability. It is for reasonability.

    ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/","level":1,"title":"The Watermelon-Rind Anti-Pattern","text":"","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#why-smarter-tools-make-shallower-agents","level":2,"title":"Why Smarter Tools Make Shallower Agents","text":"

    Give an agent a graph query tool, and it will tell you everything about your codebase except what actually matters.

    Jose Alekhinne / April 6, 2026

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#a-turkish-proverb-walks-into-a-codebase","level":2,"title":"A Turkish Proverb Walks into a Codebase","text":"

    There's a Turkish idiom: esegin aklina karpuz kabugu sokmak (literally, \"to put watermelon rind into a donkey's mind.\" It means to plant an idea in someone's head that they wouldn't have come up with on their own) usually one that leads them astray.

    In English, let's call this a \"watermelon metric\": a project management term for something that's green on the outside and red on the inside: all dashboards passing, reality crumbling.

    Both halves of this metaphor showed up in a single experiment. And the result changed how we design architecture analysis in ctx.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-experiment","level":2,"title":"The Experiment","text":"

    We ran three sessions analyzing the same large codebase (~34,000 symbols) using the same architecture skill, varying only what tools the agent had access to.

    Session Tools Available Output (lines) Character 1 None (MCP broken) 5,866 Deep, intimate 2 Full graph MCP 1,124 Structural, correct 3 Enrichment pass +verified data Additive, not restorative

    Session 1 was an accident. The MCP server that provides code intelligence queries was broken, so the agent couldn't ask the graph anything. It had to read code. Line by line. File by file.

    It produced 5,866 lines of architecture analysis: per-controller data flows, scale math, startup sequences, timeout defaults, edge cases that only surface when you actually look at the implementation.

    Session 2 had working tools. Same skill, same codebase. The agent produced 1,124 lines (5.2x less). Structurally correct. Valid symbol references. Proper call chains.

    And hollow.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-rind","level":2,"title":"The Rind","text":"

    The Session 2 output was a watermelon rind: the right shape, the right color, the right texture on the outside. But the substance (the operational details, the defaults nobody documents, the scale math that tells you when a component will fall over) was missing.

    Not wrong. Not broken. Just... thin.

    The agent had answered every question correctly. The problem was that it never discovered the questions it should have asked. When you can query a graph for \"what calls this function?\", you don't stumble into the retry loop that silently swallows errors three layers down. When you can ask for the dependency tree, you don't notice that two packages share a mutable state through a global variable that isn't in any interface.

    The tool answered the question asked but prevented the discovery of answers to questions never asked.

    Here's what that looks like concretely: the graph tells you that ReconcileDeployment calls SyncPods. It does not tell you that SyncPods retries three times with exponential backoff, silently drops errors after timeout, and resets a package-level counter that another goroutine reads without a lock. The call chain is correct.

    The operational reality is invisible.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-donkeys-idea","level":2,"title":"The Donkey's Idea","text":"

    This is where the Turkish proverb earns its place: The graph tool is the \"karpuz kabugu\" (the watermelon rind placed into the agent's mind).

    Before the tool existed, the agent had no choice but to read deeply. With the tool available, a new idea appears: why read 500 lines of code when I can query the call graph?

    The agent isn't lazy. It's rational.

    Graph queries are faster, more reliable, and produce verifiably correct output. The agent is optimizing. It's satisficing (finding answers that are good enough), instead of maximizing (finding everything there is to know).

    Satisficing produces watermelon rinds.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-two-pass-compiler","level":2,"title":"The Two-Pass Compiler","text":"

    Session 3 taught us that you can't fix shallow analysis by adding more tools after the fact. The enrichment pass added verified graph data (blast radius numbers, registration sites, execution flow confirmation) but it couldn't recover the intimate code knowledge that Session 1 had produced through sheer necessity.

    You can't enrich your way out of a depth deficit.

    So we redesigned. Instead of one skill with optional tools, we built a two-pass compiler for architecture understanding:

    Pass 1: Semantic parsing. The /ctx-architecture skill deliberately has no access to graph query tools. The agent must read code, build mental models, and produce architecture artifacts through human-style comprehension. Constraint is the feature.

    Pass 2: Static analysis. The /ctx-architecture-enrich skill takes Pass 1 output as input and runs comprehensive verification through code intelligence: blast radius analysis, registration site discovery, execution flow tracing, domain clustering comparison. It extends and verifies, but it doesn't replace.

    The key insight: these must be separate skills with separate tool permissions. If you give the agent graph tools during Pass 1, it will use them. The \"karpuz kabugu\" will be in its mind. The only way to prevent satisficing is to remove the option.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-principle","level":2,"title":"The Principle","text":"

    We call this constraint-as-feature: deliberately withholding capabilities to force deeper engagement.

    It sounds paradoxical. You built sophisticated code intelligence tools and then... forbid the agent from using them? During the most important phase?

    Yes. Because the tools don't make the agent smarter. They make it faster. And faster, in architecture analysis, is the enemy of deep.

    What's actually happening is subtler: tools reduce the agent's search space. A graph query collapses thousands of possible observations into one precise answer. That's efficient for known questions. But architecture understanding depends on unknown unknowns: and you only find those by wandering through code with nothing to shortcut the journey.

    The constraint forces the agent into a mode of operation that produces better output than any amount of tooling can achieve. The limitation is the capability.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#when-does-this-apply","level":2,"title":"When Does This Apply?","text":"

    Not always. The watermelon-rind antipattern is specific to exploratory analysis: tasks where the value comes from discovering unknowns, not from answering known questions.

    Graph tools are excellent for:

    • Verification: \"Does X actually call Y?\" (binary question, precise answer)
    • Impact analysis: \"What breaks if I change Z?\" (bounded scope, enumerable results)
    • Navigation: \"Where is this interface implemented?\" (lookup, not analysis)

    Graph tools produce watermelon rinds when:

    • The goal is understanding, not answering
    • The unknowns are unknown: you don't know what to ask
    • Depth matters more than breadth: operational details, edge cases, implicit coupling

    The two-pass approach preserves both: deep reading first, tool verification second.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#takeaway","level":2,"title":"Takeaway","text":"

    The two-pass approach is the slowest way to analyze a codebase. It is also the only way that produces both depth and accuracy. We accept the cost because architecture analysis is not a speed game: it is a coverage game.

    Esegin aklina karpuz kabugu sokma!

    (don't put the watermelon rind to a donkey's mind)

    If the agent never struggles, it never discovers. And if it never discovers, you are not doing architecture; you are doing autocomplete.

    This post is part of the ctx field notes series, documenting what we learn building persistent context infrastructure for AI coding sessions.

    ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"cli/","level":1,"title":"CLI","text":"","path":["CLI"],"tags":[]},{"location":"cli/#ctx-cli","level":2,"title":"ctx CLI","text":"

    Complete reference for all ctx commands, grouped by function.

    ","path":["CLI"],"tags":[]},{"location":"cli/#global-options","level":2,"title":"Global Options","text":"

    All commands support these flags:

    Flag Description --help Show command help --version Show version --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor)

    Initialization required. Most commands require a .context/ directory created by ctx init. Running a command without one produces:

    ctx: not initialized - run \"ctx init\" first\n

    Commands that work before initialization: ctx init, ctx setup, ctx doctor, and grouping commands that only show help.

    ","path":["CLI"],"tags":[]},{"location":"cli/#getting-started","level":2,"title":"Getting Started","text":"Command Description ctx init Initialize .context/ directory with templates ctx status Show context summary (files, tokens, drift) ctx guide Quick-reference cheat sheet ctx why Read the philosophy behind ctx","path":["CLI"],"tags":[]},{"location":"cli/#context","level":2,"title":"Context","text":"Command Description ctx add Add a task, decision, learning, or convention ctx load Output assembled context in read order ctx agent Print token-budgeted context packet for AI consumption ctx sync Reconcile context with codebase state ctx drift Detect stale paths, secrets, missing files ctx compact Archive completed tasks, clean up files ctx fmt Format context files to 80-char line width ctx decision Manage DECISIONS.md (reindex) ctx learning Manage LEARNINGS.md (reindex) ctx task Task completion, archival, and snapshots ctx reindex Regenerate indices for DECISIONS.md and LEARNINGS.mdctx permission Permission snapshots (golden image) ctx change Show what changed since last session ctx memory Bridge Claude Code auto memory into .context/ctx watch Auto-apply context updates from AI output","path":["CLI"],"tags":[]},{"location":"cli/#sessions","level":2,"title":"Sessions","text":"Command Description ctx journal Browse, import, enrich, and lock session history ctx pad Encrypted scratchpad for sensitive one-liners ctx remind Session-scoped reminders that surface at session start ctx hook pause Pause context hooks for the current session ctx hook resume Resume paused context hooks","path":["CLI"],"tags":[]},{"location":"cli/#integrations","level":2,"title":"Integrations","text":"Command Description ctx setup Generate AI tool integration configs ctx steering Manage steering files (behavioral rules for AI tools) ctx trigger Manage lifecycle triggers (scripts for automation) ctx skill Manage reusable instruction bundles ctx mcp MCP server for AI tool integration (stdin/stdout) ctx hook notify Webhook notifications (setup, test, send) ctx loop Generate autonomous loop script ctx connection Client-side commands for connecting to a ctx Hub ctx hub Operate a ctx Hub server or cluster ctx serve Serve a static site locally via zensical ctx site Site management (feed generation)","path":["CLI"],"tags":[]},{"location":"cli/#diagnostics","level":2,"title":"Diagnostics","text":"Command Description ctx doctor Structural health check (hooks, drift, config) ctx trace Show context behind git commits ctx sysinfo Show system resource usage (memory, swap, disk, load) ctx usage Show session token usage stats","path":["CLI"],"tags":[]},{"location":"cli/#runtime","level":2,"title":"Runtime","text":"Command Description ctx config Manage runtime configuration profiles ctx backup Back up context and Claude data to tar.gz / SMB ctx prune Clean stale per-session state files ctx hook Hook message, notification, and lifecycle controls ctx system Hook plumbing and agent-only commands (not user-facing)","path":["CLI"],"tags":[]},{"location":"cli/#shell","level":2,"title":"Shell","text":"Command Description ctx completion Generate shell autocompletion scripts","path":["CLI"],"tags":[]},{"location":"cli/#exit-codes","level":2,"title":"Exit Codes","text":"Code Meaning 0 Success 1 General error / warnings (e.g. drift) 2 Context not found 3 Violations found (e.g. drift) 4 File operation error","path":["CLI"],"tags":[]},{"location":"cli/#environment-variables","level":2,"title":"Environment Variables","text":"Variable Description CTX_DIR Override default context directory path CTX_TOKEN_BUDGET Override default token budget CTX_BACKUP_SMB_URL SMB share URL for backups (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on SMB share (default: ctx-sessions) CTX_SESSION_ID Active AI session ID (used by ctx trace for context linking)","path":["CLI"],"tags":[]},{"location":"cli/#configuration-file","level":2,"title":"Configuration File","text":"

    Optional .ctxrc (YAML format) at project root:

    # .ctxrc\ncontext_dir: .context        # Context directory name\ntoken_budget: 8000           # Default token budget\npriority_order:              # File loading priority\n  - TASKS.md\n  - DECISIONS.md\n  - CONVENTIONS.md\nauto_archive: true           # Auto-archive old items\narchive_after_days: 7        # Days before archiving tasks\nscratchpad_encrypt: true     # Encrypt scratchpad (default: true)\nallow_outside_cwd: false     # Skip boundary check (default: false)\nevent_log: false             # Enable local hook event logging\ncompanion_check: true        # Check companion tools at session start\nentry_count_learnings: 30    # Drift warning threshold (0 = disable)\nentry_count_decisions: 20    # Drift warning threshold (0 = disable)\nconvention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)\ninjection_token_warn: 15000  # Oversize injection warning (0 = disable)\ncontext_window: 200000       # Auto-detected for Claude Code; override for other tools\nbilling_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)\nkey_rotation_days: 90        # Days before key rotation nudge\nsession_prefixes:            # Recognized session header prefixes (extend for i18n)\n  - \"Session:\"               # English (default)\n  # - \"Oturum:\"              # Turkish (add as needed)\n  # - \"セッション:\"             # Japanese (add as needed)\nfreshness_files:             # Files with technology-dependent constants (opt-in)\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # Optional\nnotify:                      # Webhook notification settings\n  events:                    # Required: only listed events fire\n    - loop\n    - nudge\n    - relay\n    # - heartbeat            # Every-prompt session-alive signal\ntool: \"\"                     # Active AI tool: claude, cursor, cline, kiro, codex\nsteering:                    # Steering layer configuration\n  dir: .context/steering     # Steering files directory\n  default_inclusion: manual  # Default inclusion mode (always, auto, manual)\n  default_tools: []          # Default tool filter for new steering files\nhooks:                       # Hook system configuration\n  dir: .context/hooks        # Hook scripts directory\n  timeout: 10                # Per-hook execution timeout in seconds\n  enabled: true              # Whether hook execution is enabled\n
    Field Type Default Description context_dirstring.context Context directory name (relative to project root) token_budgetint8000 Default token budget for ctx agentpriority_order[]string (all files) File loading priority for context packets auto_archivebooltrue Auto-archive completed tasks archive_after_daysint7 Days before completed tasks are archived scratchpad_encryptbooltrue Encrypt scratchpad with AES-256-GCM allow_outside_cwdboolfalse Skip boundary check for external context dirs event_logboolfalse Enable local hook event logging to .context/state/events.jsonlcompanion_checkbooltrue Check companion tool availability (Gemini Search, GitNexus) during /ctx-rememberentry_count_learningsint30 Drift warning when LEARNINGS.md exceeds this count entry_count_decisionsint20 Drift warning when DECISIONS.md exceeds this count convention_line_countint200 Line count warning for CONVENTIONS.mdinjection_token_warnint15000 Warn when auto-injected context exceeds this token count (0 = disable) context_windowint200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warnint0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled) key_rotation_daysint90 Days before encryption key rotation nudge session_prefixes[]string[\"Session:\"] Recognized Markdown session header prefixes. Extend to parse sessions written in other languages freshness_files[]object (none) Files to track for staleness (path, desc, optional review_url). Hook warns after 6 months without modification notify.events[]string (all) Event filter for webhook notifications (empty = all) toolstring (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex) steering.dirstring.context/steering Steering files directory steering.default_inclusionstringmanual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools[]string (all) Default tool filter for new steering files (empty = all tools) hooks.dirstring.context/hooks Hook scripts directory hooks.timeoutint10 Per-hook execution timeout in seconds hooks.enabledbooltrue Whether hook execution is enabled

    Priority order: CLI flags > Environment variables > .ctxrc > Defaults

    All settings are optional. Missing values use defaults.

    ","path":["CLI"],"tags":[]},{"location":"cli/backup/","level":1,"title":"Backup","text":"","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/backup/#ctx-backup","level":3,"title":"ctx backup","text":"

    Create timestamped tar.gz archives of project context and/or global Claude Code data. Optionally copies archives to an SMB share via GVFS.

    ctx backup [flags]\n

    Flags:

    Flag Description --scope Backup scope: project, global, or all (default: all) --json Output results as JSON

    Scopes:

    Scope What's archived project.context/, .claude/, ideas/, ~/.bashrcglobal~/.claude/ (excludes todos/) all Both project and global (default)

    Environment:

    Variable Purpose CTX_BACKUP_SMB_URL SMB share URL (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on share (default: ctx-sessions)

    Examples:

    ctx backup                       # Back up everything (default: all)\nctx backup --scope project       # Project context only\nctx backup --scope global        # Global Claude data only\nctx backup --scope all --json    # Both, JSON output\n
    ","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/bootstrap/","level":1,"title":"System Bootstrap","text":"","path":["System Bootstrap"],"tags":[]},{"location":"cli/bootstrap/#ctx-system-bootstrap","level":3,"title":"ctx system bootstrap","text":"

    Print the resolved context directory path so AI agents can anchor their session. The default output lists the context directory, the tracked context files, and a short health snapshot. --quiet prints just the path; --json produces structured output for automation.

    This is a hidden, agent-only command that agents are instructed to run first in their session-start procedure; it is the authoritative answer to \"where does this project's context live?\".

    ctx system bootstrap [flags]\n

    Flags:

    Flag Description -q, --quiet Output only the context directory path --json Output in JSON format

    Examples:

    ctx system bootstrap                 # Text output for agents\nctx system bootstrap -q              # Just the context directory path\nctx system bootstrap --json          # Structured output for automation\n

    Scripting tip: CTX_DIR=$(ctx system bootstrap -q) is the canonical way for skills and scripts to find the project's context directory without hardcoding .context/.

    ","path":["System Bootstrap"],"tags":[]},{"location":"cli/change/","level":1,"title":"Change","text":"","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/change/#ctx-change","level":2,"title":"ctx change","text":"

    Show what changed in context files and code since your last session.

    Automatically detects the previous session boundary from state markers or event log. Useful at session start to quickly see what moved while you were away.

    ctx change [flags]\n

    Flags:

    Flag Description --since Time reference: duration (24h) or date (2026-03-01)

    Reference time detection (priority order):

    1. --since flag (duration, date, or RFC3339 timestamp)
    2. ctx-loaded-* marker files in .context/state/ (second most recent)
    3. Last context-load-gate event from .context/state/events.jsonl
    4. Fallback: 24 hours ago

    Examples:

    # Auto-detect last session, show what changed\nctx change\n\n# Changes in the last 48 hours\nctx change --since 48h\n\n# Changes since a specific date\nctx change --since 2026-03-10\n

    Output:

    ## Changes Since Last Session\n\n**Reference point**: 6 hours ago\n\n### Context File Changes\n- `TASKS.md` - modified 2026-03-12 14:30\n- `DECISIONS.md` - modified 2026-03-12 09:15\n\n### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n

    Context file changes are detected by filesystem mtime (works without git). Code changes use git log --since (empty when not in a git repo).

    See also: Reviewing Session Changes.

    ","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/completion/","level":1,"title":"Completion","text":"","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#ctx-completion","level":2,"title":"ctx completion","text":"

    Generate shell autocompletion scripts.

    ctx completion <shell>\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#subcommands","level":3,"title":"Subcommands","text":"Shell Command bashctx completion bashzshctx completion zshfishctx completion fishpowershellctx completion powershell

    Examples:

    ctx completion bash > /etc/bash_completion.d/ctx\nctx completion zsh  > \"${fpath[1]}/_ctx\"\nctx completion fish > ~/.config/fish/completions/ctx.fish\nctx completion powershell | Out-String | Invoke-Expression\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#installation","level":3,"title":"Installation","text":"BashZshFishPowerShell
    # Add to ~/.bashrc\nsource <(ctx completion bash)\n
    # Add to ~/.zshrc\nsource <(ctx completion zsh)\n
    ctx completion fish | source\n# Or save to completions directory\nctx completion fish > ~/.config/fish/completions/ctx.fish\n
    # Add to your PowerShell profile\nctx completion powershell | Out-String | Invoke-Expression\n
    ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/config/","level":1,"title":"Config","text":"","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config","level":3,"title":"ctx config","text":"

    Manage runtime configuration profiles.

    ctx config <subcommand>\n

    The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy (.ctxrc) is gitignored and switched between them using subcommands below.

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-switch","level":4,"title":"ctx config switch","text":"

    Switch between .ctxrc configuration profiles.

    ctx config switch [dev|base]\n

    With no argument, toggles between dev and base. Accepts prod as an alias for base.

    Argument Description dev Switch to dev profile (verbose logging) base Switch to base profile (all defaults) (none) Toggle to the opposite profile

    Profiles:

    Profile Description dev Verbose logging, webhook notifications on base All defaults, notifications off

    Examples:

    ctx config switch dev     # Switch to dev profile\nctx config switch base    # Switch to base profile\nctx config switch         # Toggle (dev → base or base → dev)\nctx config switch prod    # Alias for \"base\"\n

    The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-status","level":4,"title":"ctx config status","text":"

    Show which .ctxrc profile is currently active.

    ctx config status\n

    Output examples:

    active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n

    See also: Configuration, Contributing: Configuration Profiles

    ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/connect/","level":1,"title":"Connect","text":"","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect","level":2,"title":"ctx connect","text":"

    Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

    New to the Hub?

    Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

    The unit of identity is a project, not a user. Registering a directory with ctx connect register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

    Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-register","level":3,"title":"ctx connect register","text":"

    One-time registration with a hub. Requires the hub address and admin token (printed by ctx hub start on first run).

    ctx connect register localhost:9900 --token ctx_adm_7f3a...\n

    On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-subscribe","level":3,"title":"ctx connect subscribe","text":"

    Set which entry types to receive from the hub. Only matching types are returned by sync and listen.

    ctx connect subscribe decision learning\nctx connect subscribe decision learning convention\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-sync","level":3,"title":"ctx connect sync","text":"

    Pull matching entries from the hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

    ctx connect sync\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-publish","level":3,"title":"ctx connect publish","text":"

    Push entries to the hub. Specify type and content as arguments.

    ctx connect publish decision \"Use UTC timestamps everywhere\"\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-listen","level":3,"title":"ctx connect listen","text":"

    Stream new entries from the hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

    ctx connect listen\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-status","level":3,"title":"ctx connect status","text":"

    Show hub connection state and entry statistics.

    ctx connect status\n
    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

    Use --share on ctx add to write locally AND publish to the hub:

    ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

    If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#auto-sync","level":2,"title":"Auto-Sync","text":"

    Once registered, the check-hub-sync hook automatically syncs new entries from the hub at the start of each session (daily throttled). No manual ctx connect sync needed.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#shared-files","level":2,"title":"Shared Files","text":"

    Entries from the hub are stored in .context/hub/:

    .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

    These files are read-only (managed by sync/listen) and never mixed with local context files.

    ","path":["Connect"],"tags":[]},{"location":"cli/connect/#agent-integration","level":2,"title":"Agent Integration","text":"

    Include shared knowledge in agent context packets:

    ctx agent --include-hub\n

    Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

    ","path":["Connect"],"tags":[]},{"location":"cli/connection/","level":1,"title":"Connect","text":"","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connect","level":2,"title":"ctx connect","text":"

    Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

    New to the ctx Hub?

    Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

    The unit of identity is a project, not a user. Registering a directory with ctx connection register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

    Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-register","level":3,"title":"ctx connection register","text":"

    One-time registration with a ctx Hub. Requires the ctx Hub address and admin token (printed by ctx hub start on first run).

    Examples:

    ctx connection register localhost:9900 --token ctx_adm_7f3a...\n

    On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-subscribe","level":3,"title":"ctx connection subscribe","text":"

    Set which entry types to receive from the ctx Hub. Only matching types are returned by sync and listen.

    Examples:

    ctx connection subscribe decision learning\nctx connection subscribe decision learning convention\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-sync","level":3,"title":"ctx connection sync","text":"

    Pull matching entries from the ctx Hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

    Examples:

    ctx connection sync\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-publish","level":3,"title":"ctx connection publish","text":"

    Push entries to the ctx Hub. Specify type and content as arguments.

    Examples:

    ctx connection publish decision \"Use UTC timestamps everywhere\"\nctx connection publish learning \"Go embed requires files in same package\"\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-listen","level":3,"title":"ctx connection listen","text":"

    Stream new entries from the ctx Hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

    Examples:

    ctx connection listen\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-status","level":3,"title":"ctx connection status","text":"

    Show ctx Hub connection state and entry statistics.

    Examples:

    ctx connection status\n
    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

    Use --share on ctx add to write locally AND publish to the ctx Hub:

    ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

    If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#auto-sync","level":2,"title":"Auto-Sync","text":"

    Once registered, the check-hub-sync hook automatically syncs new entries from the ctx Hub at the start of each session (daily throttled). No manual ctx connection sync needed.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#shared-files","level":2,"title":"Shared Files","text":"

    Entries from the ctx Hub are stored in .context/hub/:

    .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

    These files are read-only (managed by sync/listen) and never mixed with local context files.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#agent-integration","level":2,"title":"Agent Integration","text":"

    Include shared knowledge in agent context packets:

    ctx agent --include-hub\n

    Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

    ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/context/","level":1,"title":"Context Management","text":"","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-add","level":3,"title":"ctx add","text":"

    Add a new item to a context file.

    ctx add <type> <content> [flags]\n

    Types:

    Type Target File taskTASKS.mddecisionDECISIONS.mdlearningLEARNINGS.mdconventionCONVENTIONS.md

    Flags:

    Flag Short Description --priority <level>-p Priority for tasks: high, medium, low--section <name>-s Target section within file --context-c Context (required for decisions and learnings) --rationale-r Rationale for decisions (required for decisions) --consequence Consequence for decisions (required for decisions) --lesson-l Key insight (required for learnings) --application-a How to apply going forward (required for learnings) --file-f Read content from file instead of argument

    Examples:

    # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\nctx add task \"Fix login bug\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (requires all ADR (Architectural Decision Record) fields)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning (requires context, lesson, and application)\nctx add learning \"Vitest mocks must be hoisted\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Vitest hoists vi.mock() calls to top of file\" \\\n  --application \"Always place vi.mock() before imports in test files\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to specific section\nctx add convention \"Use kebab-case for filenames\" --section \"Naming\"\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-drift","level":3,"title":"ctx drift","text":"

    Detect stale or invalid context.

    ctx drift [flags]\n

    Flags:

    Flag Description --json Output machine-readable JSON --fix Auto-fix simple issues

    Checks:

    • Path references in ARCHITECTURE.md and CONVENTIONS.md exist
    • Task references are valid
    • Constitution rules aren't violated (heuristic)
    • Staleness indicators (old files, many completed tasks)
    • Missing packages: warns when internal/ directories exist on disk but are not referenced in ARCHITECTURE.md (suggests running /ctx-architecture)
    • Entry count: warns when LEARNINGS.md or DECISIONS.md exceed configurable thresholds (default: 30 learnings, 20 decisions), or when CONVENTIONS.md exceeds a line count threshold (default: 200). Configure via .ctxrc:
      entry_count_learnings: 30      # warn above this (0 = disable)\nentry_count_decisions: 20      # warn above this (0 = disable)\nconvention_line_count: 200     # warn above this (0 = disable)\n

    Example:

    ctx drift\nctx drift --json\nctx drift --fix\n

    Exit codes:

    Code Meaning 0 All checks passed 1 Warnings found 3 Violations found","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-sync","level":3,"title":"ctx sync","text":"

    Reconcile context with the current codebase state.

    ctx sync [flags]\n

    Flags:

    Flag Description --dry-run Show what would change without modifying

    What it does:

    • Scans codebase for structural changes
    • Compares with ARCHITECTURE.md
    • Suggests documenting dependencies if package files exist
    • Identifies stale or outdated context

    Example:

    ctx sync\nctx sync --dry-run\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-compact","level":3,"title":"ctx compact","text":"

    Consolidate and clean up context files.

    • Moves completed tasks older than 7 days to the archive
    • Removes empty sections
    ctx compact [flags]\n

    Flags:

    Flag Description --archive Create .context/archive/ for old content

    Example:

    ctx compact\nctx compact --archive\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-fmt","level":3,"title":"ctx fmt","text":"

    Format context files to a consistent line width.

    Wraps long lines in TASKS.md, DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md at word boundaries. Markdown list items get 2-space continuation indent. Headings, tables, frontmatter, and HTML comments are preserved as-is.

    Idempotent: running twice produces the same output.

    ctx fmt [flags]\n

    Flags:

    Flag Type Default Description --widthint80 Target line width --checkboolfalse Check only, exit 1 if files would change

    Examples:

    ctx fmt              # format all context files\nctx fmt --check      # CI mode: check without modifying\nctx fmt --width 100  # custom width\n

    Also available as a Makefile target:

    make fmt-context\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task","level":3,"title":"ctx task","text":"

    Manage task completion, archival, and snapshots.

    ctx task <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-complete","level":4,"title":"ctx task complete","text":"

    Mark a task as completed.

    ctx task complete <task-id-or-text>\n

    Arguments:

    • task-id-or-text: Task number or partial text match

    Examples:

    # By text (partial match)\nctx task complete \"user auth\"\n\n# By task number\nctx task complete 3\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-archive","level":4,"title":"ctx task archive","text":"

    Move completed tasks from TASKS.md to a timestamped archive file.

    ctx task archive [flags]\n

    Flags:

    Flag Description --dry-run Preview changes without modifying files

    Archive files are stored in .context/archive/ with timestamped names (tasks-YYYY-MM-DD.md). Completed tasks (marked with [x]) are moved; pending tasks ([ ]) remain in TASKS.md.

    Example:

    ctx task archive\nctx task archive --dry-run\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-snapshot","level":4,"title":"ctx task snapshot","text":"

    Create a point-in-time snapshot of TASKS.md without modifying the original.

    ctx task snapshot [name]\n

    Arguments:

    • name: Optional name for the snapshot (defaults to \"snapshot\")

    Snapshots are stored in .context/archive/ with timestamped names (tasks-<name>-YYYY-MM-DD-HHMM.md).

    Example:

    ctx task snapshot\nctx task snapshot \"before-refactor\"\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission","level":3,"title":"ctx permission","text":"

    Manage Claude Code permission snapshots.

    ctx permission <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-snapshot","level":4,"title":"ctx permission snapshot","text":"

    Save .claude/settings.local.json as the golden image.

    ctx permission snapshot\n

    Creates .claude/settings.golden.json as a byte-for-byte copy of the current settings. Overwrites if the golden file already exists.

    The golden file is meant to be committed to version control and shared with the team.

    Example:

    ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-restore","level":4,"title":"ctx permission restore","text":"

    Replace settings.local.json with the golden image.

    ctx permission restore\n

    Prints a diff of dropped (session-accumulated) and restored permissions. No-op if the files already match.

    Example:

    ctx permission restore\n# Dropped 3 session permission(s):\n#   - Bash(cat /tmp/debug.log:*)\n#   - Bash(rm /tmp/test-*:*)\n#   - Bash(curl https://example.com:*)\n# Restored from golden image.\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-reindex","level":3,"title":"ctx reindex","text":"

    Regenerate the quick-reference index for both DECISIONS.md and LEARNINGS.md in a single invocation.

    ctx reindex\n

    This is a convenience wrapper around ctx decision reindex and ctx learning reindex. Both files grow at similar rates and users typically want to reindex both after manual edits.

    The index is a compact table of date and title for each entry, allowing AI tools to scan entries without reading the full file.

    Example:

    ctx reindex\n# ✓ Index regenerated with 12 entries\n# ✓ Index regenerated with 8 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision","level":3,"title":"ctx decision","text":"

    Manage the DECISIONS.md file.

    ctx decision <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision-reindex","level":4,"title":"ctx decision reindex","text":"

    Regenerate the quick-reference index at the top of DECISIONS.md.

    ctx decision reindex\n

    The index is a compact table showing the date and title for each decision, allowing AI tools to quickly scan entries without reading the full file.

    Use this after manual edits to DECISIONS.md or when migrating existing files to use the index format.

    Example:

    ctx decision reindex\n# ✓ Index regenerated with 12 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning","level":3,"title":"ctx learning","text":"

    Manage the LEARNINGS.md file.

    ctx learning <subcommand>\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning-reindex","level":4,"title":"ctx learning reindex","text":"

    Regenerate the quick-reference index at the top of LEARNINGS.md.

    ctx learning reindex\n

    The index is a compact table showing the date and title for each learning, allowing AI tools to quickly scan entries without reading the full file.

    Use this after manual edits to LEARNINGS.md or when migrating existing files to use the index format.

    Example:

    ctx learning reindex\n# ✓ Index regenerated with 8 entries\n
    ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/doctor/","level":1,"title":"Doctor","text":"","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#ctx-doctor","level":3,"title":"ctx doctor","text":"

    Structural health check across context, hooks, and configuration. Runs mechanical checks that don't require semantic analysis. Think of it as ctx status + ctx drift + configuration audit in one pass.

    ctx doctor [flags]\n

    Flags:

    Flag Short Type Default Description --json-j bool false Machine-readable JSON output","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-checks","level":4,"title":"What It Checks","text":"Check Category What it verifies Context initialized Structure .context/ directory exists Required files present Structure All required context files exist (TASKS.md, etc.) Drift detected Quality Stale paths, missing files, constitution violations Event logging status Hooks Whether event_log: true is set in .ctxrc Webhook configured Hooks .notify.enc file exists Pending reminders State Count of entries in reminders.json Task completion ratio State Pending vs completed tasks in TASKS.md Context token size Size Estimated token count across all context files Recent event activity Events Last event timestamp (only when event logging is enabled)","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-human","level":4,"title":"Output Format (Human)","text":"
    ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

    Status indicators:

    Icon Status Meaning ✓ ok Check passed ⚠ warning Non-critical issue worth fixing ✗ error Problem that needs attention ○ info Informational note","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-json","level":4,"title":"Output Format (JSON)","text":"
    {\n  \"results\": [\n    {\n      \"name\": \"context_initialized\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Context initialized (.context/)\"\n    },\n    {\n      \"name\": \"required_files\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Required files present (4/4)\"\n    },\n    {\n      \"name\": \"drift\",\n      \"category\": \"Quality\",\n      \"status\": \"warning\",\n      \"message\": \"Drift: 2 warnings\"\n    },\n    {\n      \"name\": \"event_logging\",\n      \"category\": \"Hooks\",\n      \"status\": \"info\",\n      \"message\": \"Event logging disabled (enable with event_log: true in .ctxrc)\"\n    },\n    {\n      \"name\": \"webhook\",\n      \"category\": \"Hooks\",\n      \"status\": \"ok\",\n      \"message\": \"Webhook configured\"\n    },\n    {\n      \"name\": \"reminders\",\n      \"category\": \"State\",\n      \"status\": \"ok\",\n      \"message\": \"No pending reminders\"\n    },\n    {\n      \"name\": \"task_completion\",\n      \"category\": \"State\",\n      \"status\": \"warning\",\n      \"message\": \"Tasks: 18/22 completed (82%): consider archiving with ctx task archive\"\n    },\n    {\n      \"name\": \"context_size\",\n      \"category\": \"Size\",\n      \"status\": \"ok\",\n      \"message\": \"Context size: ~4200 tokens (budget: 8000)\"\n    }\n  ],\n  \"warnings\": 2,\n  \"errors\": 0\n}\n

    Examples:

    # Quick structural health check\nctx doctor\n\n# Machine-readable output for scripting\nctx doctor --json\n\n# Count warnings\nctx doctor --json | jq '.warnings'\n\n# Check for errors only\nctx doctor --json | jq '[.results[] | select(.status == \"error\")]'\n
    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#when-to-use-what","level":4,"title":"When to Use What","text":"Tool When ctx status Quick glance at files, tokens, and drift ctx doctor Thorough structural checkup (hooks, config, events too) /ctx-doctor Agent-driven diagnosis with event log pattern analysis

    ctx status tells you what's there. ctx doctor tells you what's wrong. /ctx-doctor tells you why it's wrong and what to do about it.

    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-does-not-do","level":4,"title":"What It Does Not Do","text":"
    • No event pattern analysis: that's the /ctx-doctor skill's job
    • No auto-fixing: reports findings, doesn't modify anything
    • No external service checks: doesn't verify webhook endpoint availability

    See also: Troubleshooting | ctx hook event | /ctx-doctor skill | Detecting and Fixing Drift

    ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/event/","level":1,"title":"Event","text":"","path":["Event"],"tags":[]},{"location":"cli/event/#ctx-hook-event","level":3,"title":"ctx hook event","text":"

    Query the local hook event log. Requires event_log: true in .ctxrc. Reads events from .context/state/events.jsonl and outputs them in a human-readable table or raw JSONL format.

    All filter flags combine with AND logic.

    ctx hook event [flags]\n

    Flags:

    Flag Description --hook Filter by hook name --session Filter by session ID --event Filter by event type (relay, nudge) --last Show last N events (default: 50) --json Output raw JSONL (for piping to jq) --all Include rotated log file

    Examples:

    ctx hook event                                        # recent events\nctx hook event --hook check-context-size --last 10    # one hook, last 10\nctx hook event --json | jq '.hook'                    # pipe to jq\nctx hook event --session abc123                       # filter by session\n
    ","path":["Event"],"tags":[]},{"location":"cli/guide/","level":1,"title":"Guide","text":"","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/guide/#ctx-guide","level":2,"title":"ctx guide","text":"

    Quick-reference cheat sheet for common ctx commands and skills.

    ctx guide [flags]\n

    Flags:

    Flag Description --skills Show available skills --commands Show available CLI commands

    Example:

    # Show the full cheat sheet\nctx guide\n\n# Skills only\nctx guide --skills\n\n# Commands only\nctx guide --commands\n

    Works without initialization (no .context/ required). Useful for a printable one-pager when onboarding to a project.

    ","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/hook/","level":1,"title":"Hook","text":"","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#ctx-hook","level":3,"title":"ctx hook","text":"

    Manage hook-related settings: messages, notifications, pause/resume, and event log.

    ctx hook <subcommand> [flags]\n
    ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#subcommands","level":2,"title":"Subcommands","text":"Subcommand Description ctx hook message list Show all hook messages with override status ctx hook message show <h> <v> Print the effective message template ctx hook message edit <h> <v> Copy default to .context/ for editing ctx hook message reset <h> <v> Delete user override, revert to default ctx hook notify [message] Send a webhook notification ctx hook notify setup Configure and encrypt webhook URL ctx hook notify test Send a test notification ctx hook pause Pause all context hooks for this session ctx hook resume Resume paused context hooks ctx hook event Query the local hook event log","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#examples","level":2,"title":"Examples","text":"
    # View and manage hook messages\nctx hook message list\nctx hook message show qa-reminder gate\nctx hook message edit qa-reminder gate\n\n# Webhook notifications\nctx hook notify setup\nctx hook notify --event loop \"Loop completed\"\n\n# Pause/resume hooks\nctx hook pause\nctx hook resume\n\n# Browse event log\nctx hook event --last 20\nctx hook event --hook qa-reminder --json\n

    See also: Customizing Hook Messages | Webhook Notifications | Pausing Context Hooks | System Hooks Audit

    ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hub/","level":1,"title":"Hub","text":"","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub","level":2,"title":"ctx hub","text":"

    Operator commands for a ctx Hub: the gRPC server that fans out decisions, learnings, conventions, and tasks across projects. Use ctx hub to start and stop the server, inspect cluster state, add or remove peers at runtime, and hand off leadership before maintenance.

    Who Needs This Page

    You only need ctx hub if you are running a hub server or cluster. For client-side operations (register, subscribe, sync, publish, listen), see ctx connect. For the mental model behind the hub as a whole, read the ctx Hub overview.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-start","level":3,"title":"ctx hub start","text":"

    Start the hub gRPC server.

    Examples:

    ctx hub start                           # Foreground, default port 9900\nctx hub start --port 8080               # Custom port\nctx hub start --data-dir /srv/ctx-hub   # Custom data directory\n

    On first run, generates an admin token and prints it to stdout. Save this token; it's required for ctx connection register in client projects. Subsequent runs reuse the stored token from <data-dir>/admin.token.

    Default data directory: ~/.ctx/hub-data/

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#daemon-mode","level":4,"title":"Daemon Mode","text":"

    Run the hub as a detached background process:

    ctx hub start --daemon          # Fork to background\nctx hub stop                    # Graceful shutdown\n

    The daemon writes a PID file to <data-dir>/hub.pid. Stop the daemon with ctx hub stop (see below).

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#cluster-mode","level":4,"title":"Cluster Mode","text":"

    For high availability, run multiple hubs with Raft-based leader election:

    ctx hub start --port 9900 \\\n  --peers host2:9901,host3:9901\n

    Raft is used only for leader election. Data replication uses sequence-based gRPC sync on the append-only JSONL log; there is no multi-node consensus on writes. See the HA cluster recipe for the full setup and the Raft-lite durability caveat.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#flags","level":4,"title":"Flags","text":"Flag Description Default --port Hub listen port 9900--data-dir Hub data directory ~/.ctx/hub-data/--daemon Run the hub server in the background false--peers Comma-separated peer addresses for cluster mode (none)","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#validation","level":4,"title":"Validation","text":"

    The hub validates every published entry before accepting it:

    • Type must be one of decision, learning, convention, task
    • ID and Origin are required and non-empty
    • Content size capped at 1 MB (text-only)
    • Duplicate project registration is rejected (one token per project)
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stop","level":3,"title":"ctx hub stop","text":"

    Stop a running hub daemon.

    Examples:

    ctx hub stop                            # Stop using default data dir\nctx hub stop --data-dir /srv/ctx-hub    # Custom data directory\n

    Sends SIGTERM to the PID recorded in <data-dir>/hub.pid, waits for in-flight RPCs to drain, and removes the PID file. Safe to rerun: if no daemon is running, returns a \"no running hub\" error without side effects.

    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-status","level":3,"title":"ctx hub status","text":"

    Show cluster status: role, peers, sync state, entry count, and uptime.

    Examples:

    ctx hub status\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-peer","level":3,"title":"ctx hub peer","text":"

    Add or remove peers from the cluster at runtime. Useful for scaling up or replacing a decommissioned node without restarting the leader.

    Examples:

    ctx hub peer add host2:9901\nctx hub peer remove host2:9901\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stepdown","level":3,"title":"ctx hub stepdown","text":"

    Transfer leadership to another node gracefully. Triggers a new election among the remaining followers before the current leader steps down. Use before taking the leader offline for maintenance.

    Examples:

    ctx hub stepdown\n
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#see-also","level":3,"title":"See Also","text":"
    • ctx connect: client-side commands (register, subscribe, sync, publish, listen)
    • ctx Hub overview: mental model and user stories
    • ctx Hub: Getting Started
    • Hub operations: production deployment, backup, monitoring
    • Hub failure modes
    • Hub security model
    ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/init-status/","level":1,"title":"Init and Status","text":"","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-init","level":3,"title":"ctx init","text":"

    Initialize a new .context/ directory with template files.

    ctx init [flags]\n

    Flags:

    Flag Short Description --force-f Overwrite existing context files --minimal-m Only create essential files (TASKS.md, DECISIONS.md, CONSTITUTION.md) --merge Auto-merge ctx content into existing CLAUDE.md

    Creates:

    • .context/ directory with all template files
    • .claude/settings.local.json with pre-approved ctx permissions
    • CLAUDE.md with bootstrap instructions (or merges into existing)

    Claude Code hooks and skills are provided by the ctx plugin (see Integrations).

    Example:

    # Standard init\nctx init\n\n# Minimal setup (just core files)\nctx init --minimal\n\n# Force overwrite existing\nctx init --force\n\n# Merge into existing files\nctx init --merge\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-status","level":3,"title":"ctx status","text":"

    Show the current context summary.

    ctx status [flags]\n

    Flags:

    Flag Short Description --json Output as JSON --verbose-v Include file contents summary

    Output:

    • Context directory path
    • Total files and token estimate
    • Status of each file (loaded, empty, missing)
    • Recent activity (modification times)
    • Drift warnings if any

    Example:

    ctx status\nctx status --json\nctx status --verbose\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-agent","level":3,"title":"ctx agent","text":"

    Print an AI-ready context packet optimized for LLM consumption.

    ctx agent [flags]\n

    Flags:

    Flag Default Description --budget 8000 Token budget: controls content selection and prioritization --format md Output format: md or json--cooldown 10m Suppress repeated output within this duration (requires --session) --session (none) Session ID for cooldown isolation (e.g., $PPID) --include-hub false Include hub entries from .context/hub/

    How budget works:

    The budget controls how much context is included. Entries are selected in priority tiers:

    1. Constitution: always included in full (inviolable rules)
    2. Tasks: all active tasks, up to 40% of budget
    3. Conventions: all conventions, up to 20% of budget
    4. Decisions: scored by recency and relevance to active tasks
    5. Learnings: scored by recency and relevance to active tasks
    6. Steering: applicable steering file bodies, scored by their inclusion mode and description match against the active prompt
    7. Skill: named skill content (from --skill)
    8. Hub: entries from .context/hub/ (with --include-hub, see ctx connect)

    Decisions and learnings are ranked by a combined score (how recent + how relevant to your current tasks). High-scoring entries are included with their full body. Entries that don't fit get title-only summaries in an \"Also Noted\" section. Superseded entries are excluded.

    Output Sections:

    Section Source Selection Read These Files all .context/ Non-empty files in priority order Constitution CONSTITUTION.md All rules (never truncated) Current Tasks TASKS.md All unchecked tasks (budget-capped) Key Conventions CONVENTIONS.md All items (budget-capped) Recent Decisions DECISIONS.md Full body, scored by relevance Key Learnings LEARNINGS.md Full body, scored by relevance Also Noted overflow Title-only summaries

    Example:

    # Default (8000 tokens, markdown)\nctx agent\n\n# Smaller packet for tight context windows\nctx agent --budget 4000\n\n# JSON format for programmatic use\nctx agent --format json\n\n# Pipe to file\nctx agent --budget 4000 > context.md\n\n# With cooldown (hooks/automation: requires --session)\nctx agent --session $PPID\n

    Use case: Copy-paste into AI chat, pipe to system prompt, or use in hooks.

    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-load","level":3,"title":"ctx load","text":"

    Load and display assembled context as AI would see it.

    ctx load [flags]\n

    Flags:

    Flag Description --budget <tokens> Token budget for assembly (default: 8000) --raw Output raw file contents without assembly

    Example:

    ctx load\nctx load --budget 16000\nctx load --raw\n
    ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/journal/","level":1,"title":"Journal","text":"","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal","level":3,"title":"ctx journal","text":"

    Browse and search AI session history from Claude Code and other tools.

    ctx journal <subcommand>\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source","level":4,"title":"ctx journal source","text":"

    List all parsed sessions.

    ctx journal source [flags]\n

    Flags:

    Flag Short Description --limit-n Maximum sessions to display (default: 20) --project-p Filter by project name --tool-t Filter by tool (e.g., claude-code) --all-projects Include sessions from all projects

    Sessions are sorted by date (newest first) and display slug, project, start time, duration, turn count, and token usage.

    Example:

    ctx journal source\nctx journal source --limit 5\nctx journal source --project ctx\nctx journal source --tool claude-code\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source-show","level":4,"title":"ctx journal source --show","text":"

    Show details of a specific session.

    ctx journal source --show [session-id] [flags]\n

    Flags:

    Flag Description --latest Show the most recent session --full Show full message content --all-projects Search across all projects

    The session ID can be a full UUID, partial match, or session slug name.

    Example:

    ctx journal source --show abc123\nctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show --latest\nctx journal source --show --latest --full\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-import","level":4,"title":"ctx journal import","text":"

    Import sessions to editable journal files in .context/journal/.

    ctx journal import [session-id] [flags]\n

    Flags:

    Flag Description --all Import all sessions (only new files by default) --all-projects Import from all projects --regenerate Re-import existing files (preserves YAML frontmatter by default) --keep-frontmatter Preserve enriched YAML frontmatter during regeneration (default: true) --yes, -y Skip confirmation prompt --dry-run Show what would be imported without writing files

    Safe by default: --all only imports new sessions. Existing files are skipped. Use --regenerate to re-import existing files (conversation content is regenerated, YAML frontmatter from enrichment is preserved by default). Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

    Locked entries (via ctx journal lock) are always skipped, regardless of flags.

    Single-session import (ctx journal import <id>) always writes without prompting, since you are explicitly targeting one session.

    The journal/ directory should be gitignored (like sessions/) since it contains raw conversation data.

    Example:

    ctx journal import abc123                 # Import one session\nctx journal import --all                  # Import only new sessions\nctx journal import --all --dry-run        # Preview what would be imported\nctx journal import --all --regenerate     # Re-import existing (prompts)\nctx journal import --all --regenerate -y  # Re-import without prompting\nctx journal import --all --regenerate --keep-frontmatter=false -y  # Discard frontmatter\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-lock","level":4,"title":"ctx journal lock","text":"

    Protect journal entries from being overwritten by import --regenerate or modified by enrichment skills (/ctx-journal-enrich, /ctx-journal-enrich-all).

    ctx journal lock <pattern> [flags]\n

    Flags:

    Flag Description --all Lock all journal entries

    The pattern matches filenames by slug, date, or short ID. Locking a multi-part entry locks all parts. The lock is recorded in .context/journal/.state.json and a locked: true line is added to the file's YAML frontmatter for visibility.

    Example:

    ctx journal lock abc12345\nctx journal lock 2026-01-21-session-abc12345.md\nctx journal lock --all\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-unlock","level":4,"title":"ctx journal unlock","text":"

    Remove lock protection from journal entries.

    ctx journal unlock <pattern> [flags]\n

    Flags:

    Flag Description --all Unlock all journal entries

    Example:

    ctx journal unlock abc12345\nctx journal unlock --all\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-sync","level":4,"title":"ctx journal sync","text":"

    Sync lock state from journal frontmatter to .state.json.

    ctx journal sync\n

    Scans all journal markdowns and updates .state.json to match each file's frontmatter. Files with locked: true in frontmatter are marked locked in state; files without a locked: line have their lock cleared.

    This is the inverse of ctx journal lock: instead of state driving frontmatter, frontmatter drives state. Useful after batch enrichment where you add locked: true to frontmatter manually.

    Example:

    # After enriching entries and adding locked: true to frontmatter\nctx journal sync\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal_1","level":3,"title":"ctx journal","text":"

    Analyze and synthesize imported session files.

    ctx journal <subcommand>\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-site","level":4,"title":"ctx journal site","text":"

    Generate a static site from journal entries in .context/journal/.

    ctx journal site [flags]\n

    Flags:

    Flag Short Description --output-o Output directory (default: .context/journal-site) --build Run zensical build after generating --serve Run zensical serve after generating

    Creates a zensical-compatible site structure with an index page listing all sessions by date, and individual pages for each journal entry.

    Requires zensical to be installed for --build or --serve:

    pipx install zensical\n

    Example:

    ctx journal site                    # Generate in .context/journal-site/\nctx journal site --output ~/public  # Custom output directory\nctx journal site --build            # Generate and build HTML\nctx journal site --serve            # Generate and serve locally\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-obsidian","level":4,"title":"ctx journal obsidian","text":"

    Generate an Obsidian vault from journal entries in .context/journal/.

    ctx journal obsidian [flags]\n

    Flags:

    Flag Short Description --output-o Output directory (default: .context/journal-obsidian)

    Creates an Obsidian-compatible vault with:

    • Wikilinks ([[target|display]]) for all internal navigation
    • MOC pages (Map of Content) for topics, key files, and session types
    • Related sessions footer linking entries that share topics
    • Transformed frontmatter (topicstags for Obsidian integration)
    • Minimal .obsidian/ config enforcing wikilink mode

    No external dependencies are required: Open the output directory as an Obsidian vault directly.

    Example:

    ctx journal obsidian                        # Generate in .context/journal-obsidian/\nctx journal obsidian --output ~/vaults/ctx  # Custom output directory\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-check","level":4,"title":"ctx journal schema check","text":"

    Validate JSONL session files against the embedded schema and report drift.

    ctx journal schema check [flags]\n

    Flags:

    Flag Short Description --dir Directory to scan for JSONL files --all-projects Scan all Claude Code project directories --quiet-q Exit code only (0 = clean, 1 = drift)

    Scans JSONL files for unknown fields, missing required fields, unknown record types, and unknown content block types. When drift is found, writes a Markdown report to .context/reports/schema-drift.md. When drift resolves, the report is automatically deleted.

    Designed for interactive use, CI pipelines, and nightly cron jobs.

    Example:

    ctx journal schema check                    # Current project\nctx journal schema check --all-projects     # All projects\nctx journal schema check --quiet            # Exit code only\nctx journal schema check --dir /path/to     # Custom directory\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-dump","level":4,"title":"ctx journal schema dump","text":"

    Print the embedded JSONL schema definition.

    ctx journal schema dump\n

    Shows all known record types with their required and optional fields, and all recognized content block types with their parse status. Useful for inspecting what the schema validator expects.

    Example:

    ctx journal schema dump\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-serve","level":3,"title":"ctx serve","text":"

    Serve any zensical directory locally. This is a serve-only command: It does not generate or regenerate site content.

    ctx serve [directory]\n

    If no directory is specified, defaults to the journal site (.context/journal-site).

    Requires zensical to be installed:

    pipx install zensical\n

    ctx serve vs. ctx journal site --serve

    ctx journal site --serve generates the journal site then serves it: an all-in-one command. ctx serve only serves an existing directory, and works with any zensical site (journal, docs, etc.).

    Example:

    ctx serve                        # Serve journal site (no regeneration)\nctx serve .context/journal-site  # Same, explicit path\nctx serve ./site                 # Serve the docs site\n
    ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/loop/","level":1,"title":"Loop","text":"","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/loop/#ctx-loop","level":2,"title":"ctx loop","text":"

    Generate a shell script for running an autonomous loop.

    An autonomous loop continuously runs an AI assistant with the same prompt until a completion signal is detected, enabling iterative development where the AI builds on its previous work.

    ctx loop [flags]\n

    Flags:

    Flag Short Description Default --tool <tool>-t AI tool: claude, aider, or genericclaude--prompt <file>-p Prompt file to use .context/loop.md--max-iterations <n>-n Maximum iterations (0 = unlimited) 0--completion <signal>-c Completion signal to detect SYSTEM_CONVERGED--output <file>-o Output script filename loop.sh

    Examples:

    # Generate loop.sh for Claude Code\nctx loop\n\n# Generate for Aider with custom prompt\nctx loop --tool aider --prompt TASKS.md\n\n# Limit to 10 iterations\nctx loop --max-iterations 10\n\n# Output to custom file\nctx loop -o my-loop.sh\n

    Running the generated loop:

    ctx loop\nchmod +x loop.sh\n./loop.sh\n

    See also: Autonomous Loops for the full workflow.

    ","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/mcp/","level":1,"title":"MCP Server","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp","level":2,"title":"ctx mcp","text":"

    Run ctx as a Model Context Protocol (MCP) server. MCP is a standard protocol that lets AI tools discover and consume context from external sources via JSON-RPC 2.0 over stdin/stdout.

    This makes ctx accessible to any MCP-compatible AI tool without custom hooks or integrations:

    • Claude Desktop
    • Cursor
    • Windsurf
    • VS Code Copilot
    • Any tool supporting MCP
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp-serve","level":3,"title":"ctx mcp serve","text":"

    Start the MCP server. This command reads JSON-RPC 2.0 requests from stdin and writes responses to stdout. It is intended to be launched by MCP clients (Claude Desktop, Cursor, VS Code Copilot), not run directly from a shell. See Configuration below for how each host launches it.

    Flags: None. The server uses the configured context directory (from --context-dir, CTX_DIR, .ctxrc, or the default .context).

    Examples:

    # Normal invocation (by an MCP client via stdio transport)\nctx mcp serve\n\n# Pin a context directory for a specific workspace\nctx --context-dir /path/to/project/.context mcp serve\n\n# Verify the binary starts without a client attached (Ctrl-C to exit)\nctx mcp serve < /dev/null\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#configuration","level":2,"title":"Configuration","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#claude-desktop","level":3,"title":"Claude Desktop","text":"

    Add to ~/Library/Application Support/Claude/claude_desktop_config.json:

    {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#cursor","level":3,"title":"Cursor","text":"

    Add to .cursor/mcp.json in your project:

    {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#vs-code-copilot","level":3,"title":"VS Code (Copilot)","text":"

    Add to .vscode/mcp.json:

    {\n  \"servers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resources","level":2,"title":"Resources","text":"

    Resources expose context files as read-only content. Each resource has a URI, name, and returns Markdown text.

    URI Name Description ctx://context/constitution constitution Hard rules that must never be violated ctx://context/tasks tasks Current work items and their status ctx://context/conventions conventions Code patterns and standards ctx://context/architecture architecture System architecture documentation ctx://context/decisions decisions Architectural decisions with rationale ctx://context/learnings learnings Gotchas, tips, and lessons learned ctx://context/glossary glossary Project-specific terminology ctx://context/agent agent All files assembled in priority read order

    The agent resource assembles all non-empty context files into a single Markdown document, ordered by the configured read priority.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resource-subscriptions","level":3,"title":"Resource Subscriptions","text":"

    Clients can subscribe to resource changes via resources/subscribe. The server polls for file mtime changes (default: 5 seconds) and emits notifications/resources/updated when a subscribed file changes on disk.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#tools","level":2,"title":"Tools","text":"

    Tools expose ctx commands as callable operations. Each tool accepts JSON arguments and returns text results.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_status","level":3,"title":"ctx_status","text":"

    Show context health: file count, token estimate, and per-file summary.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_add","level":3,"title":"ctx_add","text":"

    Add a task, decision, learning, or convention to the context.

    Argument Type Required Description type string Yes Entry type: task, decision, learning, convention content string Yes Title or main content priority string No Priority level (tasks only): high, medium, low context string Conditional Context field (decisions and learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_complete","level":3,"title":"ctx_complete","text":"

    Mark a task as done by number or text match.

    Argument Type Required Description query string Yes Task number (e.g. \"1\") or search text","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_drift","level":3,"title":"ctx_drift","text":"

    Detect stale or invalid context. Returns violations, warnings, and passed checks.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_journal_source","level":3,"title":"ctx_journal_source","text":"

    Query recent AI session history (summaries, decisions, topics).

    Argument Type Required Description limit number No Max sessions to return (default: 5) since string No ISO date filter: sessions after this date (YYYY-MM-DD)

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_watch_update","level":3,"title":"ctx_watch_update","text":"

    Apply a structured context update to .context/ files. Supports task, decision, learning, convention, and complete entry types. Human confirmation is required before calling.

    Argument Type Required Description type string Yes Entry type: task, decision, learning, convention, complete content string Yes Main content context string Conditional Context background (decisions/learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_compact","level":3,"title":"ctx_compact","text":"

    Move completed tasks to the archive section and remove empty sections from context files. Human confirmation required.

    Argument Type Required Description archive boolean No Also write tasks to .context/archive/ (default: false)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_next","level":3,"title":"ctx_next","text":"

    Suggest the next pending task based on priority and position.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_check_task_completion","level":3,"title":"ctx_check_task_completion","text":"

    Advisory check: after a write operation, detect if any pending tasks were silently completed. Returns nudge text if a match is found.

    Argument Type Required Description recent_action string No Brief description of what was just done

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_event","level":3,"title":"ctx_session_event","text":"

    Signal a session lifecycle event. Type end triggers the session-end persistence ceremony - human confirmation required.

    Argument Type Required Description type string Yes Event type: start, end caller string No Caller identifier (cursor, windsurf, vscode, claude-desktop)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_steering_get","level":3,"title":"ctx_steering_get","text":"

    Retrieve applicable steering files for a prompt. Without a prompt, returns always-included files only.

    Argument Type Required Description prompt string No Prompt text to match against steering file descriptions

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_search","level":3,"title":"ctx_search","text":"

    Search across .context/ files for a query string. Returns matching lines with file paths and line numbers.

    Argument Type Required Description query string Yes Search string to match against

    Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_start","level":3,"title":"ctx_session_start","text":"

    Execute session-start hooks and return aggregated context from hook outputs.

    Arguments: None.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_end","level":3,"title":"ctx_session_end","text":"

    Execute session-end hooks with an optional summary. Returns aggregated context from hook outputs.

    Argument Type Required Description summary string No Session summary passed to hook scripts","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_remind","level":3,"title":"ctx_remind","text":"

    List pending session-scoped reminders.

    Arguments: None. Read-only.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#prompts","level":2,"title":"Prompts","text":"

    Prompts provide pre-built templates for common workflows. Clients can list available prompts via prompts/list and retrieve a specific prompt via prompts/get.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-session-start","level":3,"title":"ctx-session-start","text":"

    Load full context at the beginning of a session. Returns all context files assembled in priority read order with session orientation instructions.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-decision-add","level":3,"title":"ctx-decision-add","text":"

    Format an architectural decision entry with all required fields.

    Argument Type Required Description content string Yes Decision title context string Yes Background context rationale string Yes Why this decision was made consequence string Yes Expected consequence","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-learning-add","level":3,"title":"ctx-learning-add","text":"

    Format a learning entry with all required fields.

    Argument Type Required Description content string Yes Learning title context string Yes Background context lesson string Yes The lesson learned application string Yes How to apply this lesson","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-reflect","level":3,"title":"ctx-reflect","text":"

    Guide end-of-session reflection. Returns a structured review prompt covering progress assessment and context update recommendations.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-checkpoint","level":3,"title":"ctx-checkpoint","text":"

    Report session statistics: tool calls made, entries added, and pending updates queued during the current session.

    ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/memory/","level":1,"title":"Memory","text":"","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory","level":2,"title":"ctx memory","text":"

    Bridge Claude Code's auto memory (MEMORY.md) into .context/.

    Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This command group discovers that file, mirrors it into .context/memory/mirror.md (git-tracked), and detects drift.

    ctx memory <subcommand>\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-sync","level":3,"title":"ctx memory sync","text":"

    Copy MEMORY.md to .context/memory/mirror.md. Archives the previous mirror before overwriting.

    ctx memory sync [flags]\n

    Flags:

    Flag Description --dry-run Show what would happen without writing

    Exit codes:

    Code Meaning 0 Synced successfully 1 MEMORY.md not found (auto memory inactive)

    Examples:

    ctx memory sync\n# Archived previous mirror to mirror-2026-03-05-143022.md\n# Synced MEMORY.md -> .context/memory/mirror.md\n#   Source: ~/.claude/projects/-home-user-project/memory/MEMORY.md\n#   Lines: 47 (was 32)\n#   New content: 15 lines since last sync\n\nctx memory sync --dry-run\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-status","level":3,"title":"ctx memory status","text":"

    Show drift, timestamps, line counts, and archive count.

    ctx memory status\n

    Exit codes:

    Code Meaning 0 No drift 1 MEMORY.md not found 2 Drift detected (MEMORY.md changed since sync)

    Examples:

    ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines (modified since last sync)\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-diff","level":3,"title":"ctx memory diff","text":"

    Show what changed in MEMORY.md since last sync.

    ctx memory diff\n

    Examples:

    ctx memory diff\n# --- .context/memory/mirror.md (mirror)\n# +++ ~/.claude/projects/.../memory/MEMORY.md (source)\n# +- new learning: memory bridge works\n

    No output when files are identical.

    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-publish","level":3,"title":"ctx memory publish","text":"

    Push curated .context/ content into MEMORY.md so the agent sees it natively.

    ctx memory publish [flags]\n

    Content is selected in priority order: pending tasks, recent decisions (7 days), key conventions, recent learnings (7 days). Wrapped in <!-- ctx:published --> markers. Claude-owned content outside the markers is preserved.

    Flags:

    Flag Description Default --budget Line budget for published content 80--dry-run Show what would be published

    Examples:

    ctx memory publish --dry-run\n# Publishing .context/ -> MEMORY.md...\n#   Budget: 80 lines\n#   Published block:\n#     5 pending tasks (from TASKS.md)\n#     3 recent decisions (from DECISIONS.md)\n#     5 key conventions (from CONVENTIONS.md)\n#   Total: 42 lines (within 80-line budget)\n# Dry run - no files written.\n\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter budget\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-unpublish","level":3,"title":"ctx memory unpublish","text":"

    Remove the ctx-managed marker block from MEMORY.md, preserving Claude-owned content.

    Examples:

    ctx memory unpublish\n

    Hook integration: The check-memory-drift hook runs on every prompt and nudges the agent when MEMORY.md has changed since last sync. The nudge fires once per session. See Memory Bridge.

    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-import","level":3,"title":"ctx memory import","text":"

    Classify and promote entries from MEMORY.md into structured .context/ files.

    ctx memory import [flags]\n

    Each entry is classified by keyword heuristics:

    Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

    Deduplication prevents re-importing the same entry across runs.

    Flags:

    Flag Description --dry-run Show classification plan without writing

    Examples:

    ctx memory import --dry-run\n# Scanning MEMORY.md for new entries...\n#   Found 6 entries\n#\n#   -> \"always use ctx from PATH\"\n#      Classified: CONVENTIONS.md (keywords: always use)\n#\n#   -> \"decided to use heuristic classification over LLM-based\"\n#      Classified: DECISIONS.md (keywords: decided)\n#\n# Dry run - would import: 4 entries\n# Skipped: 2 entries (session notes/unclassified)\n\nctx memory import    # Actually write entries to .context/ files\n
    ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/message/","level":1,"title":"Message","text":"","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message","level":3,"title":"ctx hook message","text":"

    Manage hook message templates.

    Hook messages control the text hooks emit. The hook logic (when to fire, counting, state tracking) is universal; the messages are opinions that can be customized per-project.

    ctx hook message <subcommand>\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-list","level":3,"title":"ctx hook message list","text":"

    Show all hook messages with category and override status.

    ctx hook message list [--json]\n

    Flags:

    Flag Description --json Output in JSON format

    Example:

    ctx hook message list\nctx hook message list --json | jq '.[] | select(.override)'\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-show","level":3,"title":"ctx hook message show","text":"

    Print the effective message template for a hook/variant pair. Shows the user override if present, otherwise the embedded default.

    ctx hook message show <hook> <variant>\n

    Example:

    ctx hook message show qa-reminder gate\nctx hook message show check-context-size checkpoint\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-edit","level":3,"title":"ctx hook message edit","text":"

    Copy the embedded default template for <hook> <variant> to .context/hooks/messages/<hook>/<variant>.txt so you can edit it directly. The override takes effect the next time the hook fires.

    ctx hook message edit <hook> <variant>\n

    If an override already exists, the command fails and directs you to edit it in place or reset it first.

    Example:

    ctx hook message edit qa-reminder gate\n# Edit .context/hooks/messages/qa-reminder/gate.txt in your editor\n
    ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-reset","level":3,"title":"ctx hook message reset","text":"

    Delete a user override and revert to the embedded default. Silent no-op if no override exists.

    ctx hook message reset <hook> <variant>\n

    Example:

    ctx hook message reset qa-reminder gate\n

    See Customizing hook messages for the full workflow.

    ","path":["Message"],"tags":[]},{"location":"cli/notify/","level":1,"title":"Notify","text":"","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify","level":2,"title":"ctx hook notify","text":"

    Send fire-and-forget webhook notifications from skills, loops, and hooks.

    ctx hook notify --event <name> [--session-id <id>] \"message\"\n

    Flags:

    Flag Short Description --event-e Event name (required) --session-id-s Session ID (optional)

    Behavior:

    • No webhook configured: silent no-op (exit 0)
    • Webhook set but event not in events list: silent no-op (exit 0)
    • Webhook set and event matches: fire-and-forget HTTP POST
    • HTTP errors silently ignored (no retry)

    Examples:

    ctx hook notify --event loop \"Loop completed after 5 iterations\"\nctx hook notify -e nudge -s session-abc \"Context checkpoint at prompt #20\"\n
    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-setup","level":3,"title":"ctx hook notify setup","text":"

    Configure the webhook URL interactively. The URL is encrypted with AES-256-GCM using the encryption key and stored in .context/.notify.enc.

    Examples:

    ctx hook notify setup\n

    The encrypted file is safe to commit. The key (~/.ctx/.ctx.key) lives outside the project and is never committed.

    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-test","level":3,"title":"ctx hook notify test","text":"

    Send a test notification and report the HTTP response status.

    Examples:

    ctx hook notify test\n

    Payload format (JSON POST):

    {\n  \"event\": \"loop\",\n  \"message\": \"Loop completed after 5 iterations\",\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n
    Field Type Description event string Event name from --event flag message string Notification message session_id string Session ID (omitted if empty) timestamp string UTC RFC3339 timestamp project string Project directory name

    See also: Webhook Notifications recipe.

    ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/pad/","level":1,"title":"Scratchpad","text":"","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad","level":2,"title":"ctx pad","text":"

    Encrypted scratchpad for sensitive one-liners that travel with the project.

    When invoked without a subcommand, lists all entries.

    ctx pad\nctx pad <subcommand>\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-add","level":3,"title":"ctx pad add","text":"

    Append a new entry to the scratchpad.

    ctx pad add <text>\nctx pad add <label> --file <path>\n

    Flags:

    Flag Short Description --file-f Ingest a file as a blob entry (max 64 KB)

    Examples:

    ctx pad add \"DATABASE_URL=postgres://user:pass@host/db\"\nctx pad add \"deploy config\" --file ./deploy.yaml\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-show","level":3,"title":"ctx pad show","text":"

    Output the raw text of an entry by number. For blob entries, prints decoded file content (or writes to disk with --out).

    ctx pad show <n>\nctx pad show <n> --out <path>\n

    Arguments:

    • n: 1-based entry number

    Flags:

    Flag Description --out Write decoded blob content to a file (blobs only)

    Examples:

    ctx pad show 3\nctx pad show 2 --out ./recovered.yaml\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-rm","level":3,"title":"ctx pad rm","text":"

    Remove one or more entries by stable ID. Supports individual IDs and ranges.

    ctx pad rm <id> [id...]\n

    Arguments:

    • id: One or more entry IDs (e.g., 3, 1 4, 3-5)

    Examples:

    ctx pad rm 2\nctx pad rm 1 4\nctx pad rm 3-5\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-normalize","level":3,"title":"ctx pad normalize","text":"

    Reassign entry IDs as a contiguous sequence 1..N, closing any gaps left by deletions.

    Examples:

    ctx pad normalize\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-edit","level":3,"title":"ctx pad edit","text":"

    Replace, append to, or prepend to an entry.

    ctx pad edit <n> [text]\n

    Arguments:

    • n: 1-based entry number
    • text: Replacement text (mutually exclusive with --append/--prepend)

    Flags:

    Flag Description --append Append text to the end of the entry --prepend Prepend text to the beginning of entry --file Replace blob file content (preserves label) --label Replace blob label (preserves content)

    Examples:

    ctx pad edit 2 \"new text\"\nctx pad edit 2 --append \" suffix\"\nctx pad edit 2 --prepend \"prefix \"\nctx pad edit 1 --file ./v2.yaml\nctx pad edit 1 --label \"new name\"\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-mv","level":3,"title":"ctx pad mv","text":"

    Move an entry from one position to another.

    ctx pad mv <from> <to>\n

    Arguments:

    • from: Source position (1-based)
    • to: Destination position (1-based)

    Examples:

    ctx pad mv 3 1      # promote entry 3 to the top\nctx pad mv 1 5      # bury entry 1 to position 5\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-resolve","level":3,"title":"ctx pad resolve","text":"

    Show both sides of a merge conflict in the encrypted scratchpad.

    Examples:

    ctx pad resolve\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-import","level":3,"title":"ctx pad import","text":"

    Bulk-import lines from a file into the scratchpad. Each non-empty line becomes a separate entry. All entries are written in a single encrypt/write cycle.

    With --blob, import all first-level files from a directory as blob entries. Each file becomes a blob with the filename as its label. Subdirectories and non-regular files are skipped.

    ctx pad import <file>\nctx pad import -              # read from stdin\nctx pad import --blob <dir>   # import directory files as blobs\n

    Arguments:

    • file: Path to a text file, - for stdin, or a directory (with --blob)

    Flags:

    Flag Description --blob Import first-level files from a directory as blobs

    Examples:

    ctx pad import notes.txt\ngrep TODO *.go | ctx pad import -\nctx pad import --blob ./ideas/\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-export","level":3,"title":"ctx pad export","text":"

    Export all blob entries from the scratchpad to a directory as files. Each blob's label becomes the filename. Non-blob entries are skipped.

    ctx pad export [dir]\n

    Arguments:

    • dir: Target directory (default: current directory)

    Flags:

    Flag Short Description --force-f Overwrite existing files instead of timestamping --dry-run Print what would be exported without writing

    When a file already exists, a unix timestamp is prepended to avoid collisions (e.g., 1739836200-label). Use --force to overwrite instead.

    Examples:

    ctx pad export ./ideas\nctx pad export --dry-run\nctx pad export --force ./backup\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-merge","level":3,"title":"ctx pad merge","text":"

    Merge entries from one or more scratchpad files into the current pad. Each input file is auto-detected as encrypted or plaintext. Entries are deduplicated by exact content.

    ctx pad merge FILE...\n

    Arguments:

    • FILE...: One or more scratchpad files to merge (encrypted or plaintext)

    Flags:

    Flag Short Description --key-k Path to key file for decrypting input files --dry-run Print what would be merged without writing

    Examples:

    ctx pad merge worktree/.context/scratchpad.enc\nctx pad merge notes.md backup.enc\nctx pad merge --key /path/to/other.key foreign.enc\nctx pad merge --dry-run pad-a.enc pad-b.md\n
    ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pause/","level":1,"title":"Pause","text":"","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/pause/#ctx-hook-pause","level":2,"title":"ctx hook pause","text":"

    Pause all context nudge and reminder hooks for the current session. Security hooks (dangerous command blocking) and housekeeping hooks still fire.

    ctx hook pause [flags]\n

    Flags:

    Flag Description --session-id Session ID (overrides stdin)

    Example:

    # Pause hooks for a quick investigation\nctx hook pause\n\n# Resume when ready\nctx hook resume\n

    See also:

    • ctx hook resume: the matching resume command
    • Pausing Context Hooks recipe
    ","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/prune/","level":1,"title":"Prune","text":"","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/prune/#ctx-prune","level":3,"title":"ctx prune","text":"

    Remove per-session state files from .context/state/ that are older than the specified age. Session state files are identified by UUID suffixes (context-check-<session-id>, heartbeat-<session-id>, and similar). Global files without session IDs (events.jsonl, memory-import.json, and other non-per-session markers) are always preserved.

    ctx prune [flags]\n

    Flags:

    Flag Description --days Prune files older than this many days (default: 7) --dry-run Show what would be pruned without deleting

    Examples:

    ctx prune                 # Prune files older than 7 days\nctx prune --days 3        # Prune files older than 3 days\nctx prune --dry-run       # Preview without deleting\n

    See State maintenance for the recommended cadence and automation recipe.

    ","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/remind/","level":1,"title":"Remind","text":"","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind","level":2,"title":"ctx remind","text":"

    Session-scoped reminders that surface at session start. Reminders are stored verbatim and relayed verbatim: no summarization, no categories.

    When invoked with a text argument and no subcommand, adds a reminder.

    ctx remind \"text\"\nctx remind <subcommand>\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-add","level":3,"title":"ctx remind add","text":"

    Add a reminder. This is the default action: ctx remind \"text\" and ctx remind add \"text\" are equivalent.

    ctx remind \"refactor the swagger definitions\"\nctx remind add \"check CI after the deploy\" --after 2026-02-25\n

    Arguments:

    • text: The reminder message (verbatim)

    Flags:

    Flag Short Description --after-a Don't surface until this date (YYYY-MM-DD)

    Examples:

    ctx remind \"refactor the swagger definitions\"\nctx remind \"check CI after the deploy\" --after 2026-02-25\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-list","level":3,"title":"ctx remind list","text":"

    List all pending reminders. Date-gated reminders that aren't yet due are annotated with (after DATE, not yet due).

    Examples:

    ctx remind list\nctx remind ls            # alias\n

    Aliases: ls

    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-dismiss","level":3,"title":"ctx remind dismiss","text":"

    Remove one or more reminders by ID, or remove all with --all. Supports individual IDs and ranges.

    ctx remind dismiss <id> [id...]\nctx remind dismiss --all\n

    Arguments:

    • id: One or more reminder IDs (e.g., 3, 3 5-7)

    Flags:

    Flag Description --all Dismiss all reminders

    Aliases: rm

    Examples:

    ctx remind dismiss 3\nctx remind dismiss 3 5-7\nctx remind dismiss --all\n
    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-normalize","level":3,"title":"ctx remind normalize","text":"

    Reassign reminder IDs as a contiguous sequence 1..N, closing any gaps left by dismissals.

    Examples:

    ctx remind normalize\n

    See also: Session Reminders recipe.

    ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/resume/","level":1,"title":"Resume","text":"","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/resume/#ctx-hook-resume","level":2,"title":"ctx hook resume","text":"

    Resume context hooks after a pause. Silent no-op if not paused.

    ctx hook resume [flags]\n

    Flags:

    Flag Description --session-id Session ID (overrides stdin)

    Example:

    ctx hook resume\n

    See also:

    • ctx hook pause: the matching pause command
    • Pausing Context Hooks recipe
    ","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/serve/","level":1,"title":"Serve","text":"","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#ctx-serve","level":2,"title":"ctx serve","text":"

    Serve a static site locally via zensical.

    With no argument, serves the journal site at .context/journal-site. With a directory argument, serves that directory if it contains a zensical.toml.

    ctx serve                             # Serve .context/journal-site\nctx serve ./my-site                   # Serve a specific directory\nctx serve ./docs                      # Serve any zensical site\n

    This Command Does NOT Start a Hub

    ctx serve is purely for static-site serving. To run a ctx Hub for cross-project knowledge sharing, use ctx hub start. That command lives in its own group because the hub is a gRPC server, not a static site.

    Requires zensical to be installed:

    pipx install zensical\n
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#arguments","level":3,"title":"Arguments","text":"Argument Description [directory] Directory containing a zensical.toml to serve

    When omitted, serves .context/journal-site by default, the directory produced by ctx journal site.

    Examples:

    ctx serve                         # Default: serve .context/journal-site\nctx serve ./my-site               # Serve a specific directory\nctx serve ./docs                  # Serve any zensical site\n
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#see-also","level":3,"title":"See Also","text":"
    • ctx journal: generate the journal site that ctx serve displays.
    • ctx hub start: for running a ctx Hub server, not a static site.
    • Browsing and enriching past sessions: the recipe that combines ctx journal and ctx serve.
    ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/setup/","level":1,"title":"Setup","text":"","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/setup/#ctx-setup","level":2,"title":"ctx setup","text":"

    Generate AI tool integration configuration.

    ctx setup <tool> [flags]\n

    Flags:

    Flag Short Description --write-w Write the generated config to disk (e.g. .github/copilot-instructions.md)

    Supported tools:

    Tool Description claude-code Redirects to plugin install instructions cursor Cursor IDE kiro Kiro IDE cline Cline (VS Code extension) aider Aider CLI copilot GitHub Copilot windsurf Windsurf IDE

    Claude Code Uses the Plugin System

    Claude Code integration is now provided via the ctx plugin. Running ctx setup claude-code prints plugin install instructions.

    Examples:

    # Print hook instructions to stdout\nctx setup cursor\nctx setup aider\n\n# Generate and write .github/copilot-instructions.md\nctx setup copilot --write\n\n# Generate MCP config and sync steering files\nctx setup kiro --write\nctx setup cursor --write\nctx setup cline --write\n
    ","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/site/","level":1,"title":"Site","text":"","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site","level":2,"title":"ctx site","text":"

    Site management commands for the ctx.ist static site.

    ctx site <subcommand>\n
    ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site-feed","level":3,"title":"ctx site feed","text":"

    Generate an Atom 1.0 feed from finalized blog posts in docs/blog/.

    ctx site feed [flags]\n

    Scans docs/blog/ for files matching YYYY-MM-DD-*.md, parses YAML frontmatter, and generates a valid Atom feed. Only posts with reviewed_and_finalized: true are included. Summaries are extracted from the first paragraph after the heading.

    Flags:

    Flag Short Type Default Description --out-o string site/feed.xml Output path --base-url string https://ctx.ist Base URL for entry links

    Output:

    Generated site/feed.xml (21 entries)\n\nSkipped:\n  2026-02-25-the-homework-problem.md: not finalized\n\nWarnings:\n  2026-02-09-defense-in-depth.md: no summary paragraph found\n

    Three buckets: included (count), skipped (with reason), warnings (included but degraded). exit 0 always: warnings inform but do not block.

    Frontmatter requirements:

    Field Required Feed mapping title Yes <title>date Yes <updated>reviewed_and_finalized Yes Draft gate (must be true) author No <author><name>topics No <category term=\"\">

    Examples:

    ctx site feed                                # Generate site/feed.xml\nctx site feed --out /tmp/feed.xml            # Custom output path\nctx site feed --base-url https://example.com # Custom base URL\nmake site-feed                               # Makefile shortcut\nmake site                                    # Builds site + feed\n
    ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/skill/","level":1,"title":"Skill","text":"","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill","level":2,"title":"ctx skill","text":"

    Manage reusable instruction bundles that can be installed into .context/skills/.

    A skill is a directory containing a SKILL.md file with YAML frontmatter (name, description) and a Markdown instruction body. Skills are loaded by the agent context packet when --skill <name> is passed to ctx agent.

    ctx skill <subcommand>\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-install","level":3,"title":"ctx skill install","text":"

    Install a skill from a source directory.

    ctx skill install <source>\n

    Arguments:

    • source: Path to a directory containing SKILL.md

    Examples:

    ctx skill install ./my-skills/code-review\n# Installed code-review → .context/skills/code-review\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-list","level":3,"title":"ctx skill list","text":"

    List all installed skills.

    Examples:

    ctx skill list\n
    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-remove","level":3,"title":"ctx skill remove","text":"

    Remove an installed skill.

    Arguments:

    • name: Skill name to remove

    Examples:

    ctx skill remove code-review\n

    See also: Building Project Skills recipe.

    ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/steering/","level":1,"title":"Steering","text":"","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering","level":2,"title":"ctx steering","text":"

    Manage steering files: persistent behavioral rules for AI coding assistants.

    A steering file is a small markdown document with YAML frontmatter that tells the AI how to behave in a specific context. ctx steering keeps those files in .context/steering/, decides which ones apply for a given prompt, and syncs them out to each AI tool's native format (Claude Code, Cursor, Kiro, Cline).

    ctx steering <subcommand>\n

    Steering vs Decisions vs Conventions

    The three look similar on disk but serve different purposes:

    • Decisions record what was chosen and why. Consumed mostly by humans (and by the agent via ctx agent).
    • Conventions describe how the codebase is written. Consumed as reference material.
    • Steering tells the AI how to behave when asked about X. Consumed by the AI tool's prompt injection layer, conditionally on prompt match.

    If you find yourself writing \"the AI should always do X\", that belongs in steering, not decisions.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#anatomy-of-a-steering-file","level":3,"title":"Anatomy of a Steering File","text":"
    ---\nname: security\ndescription: Security rules for all code changes\ninclusion: always    # always | auto | manual\ntools: []            # empty = all tools\npriority: 10         # lower = injected first\n---\n\n# Security rules\n\n- Validate all user input at system boundaries.\n- Never log secrets, tokens, or credentials.\n- Prefer constant-time comparison for tokens.\n

    Inclusion modes:

    Mode When it's included always Every prompt, unconditionally auto When the prompt matches the description keywords manual Only when the user names it explicitly

    Priority: lower numbers inject first, so high-priority rules appear at the top of the prompt. Default is 50.

    Tools: an empty list means all configured tools receive the file; list specific tool names to scope it.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-init","level":3,"title":"ctx steering init","text":"

    Create a starter set of steering files in .context/steering/ to use as a scaffolding baseline.

    Examples:

    ctx steering init\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-add","level":3,"title":"ctx steering add","text":"

    Create a new steering file with default frontmatter.

    ctx steering add <name>\n

    Arguments:

    • name: Steering file name (without .md extension)

    Examples:

    ctx steering add security\n# Created .context/steering/security.md\n

    The generated file uses inclusion: manual and priority: 50 by default. Edit the frontmatter to change behavior.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-list","level":3,"title":"ctx steering list","text":"

    List all steering files with their inclusion mode, priority, and tool scoping.

    Examples:

    ctx steering list\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-preview","level":3,"title":"ctx steering preview","text":"

    Preview which steering files would be included for a given prompt. Useful for validating auto-inclusion descriptions against realistic prompts.

    ctx steering preview [prompt]\n

    Examples:

    ctx steering preview \"create a REST API endpoint\"\n# Steering files matching prompt \"create a REST API endpoint\":\n#   api-standards        inclusion=auto     priority=20  tools=all\n#   security             inclusion=always   priority=10  tools=all\n
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-sync","level":3,"title":"ctx steering sync","text":"

    Sync steering files to tool-native formats for tools that have a built-in rules primitive. Not every tool needs this; Claude Code and Codex use a different delivery mechanism (see below).

    Examples:

    ctx steering sync\n

    Which tools are sync targets?

    Tool Sync target Mechanism Cursor .cursor/rules/ Cursor reads the directory natively Cline .clinerules/ Cline reads the directory natively Kiro .kiro/steering/ Kiro reads the directory natively Claude Code (no-op) Delivered via hook + MCP (see next section) Codex (no-op) Same as Claude Code

    For the three native-rules tools, ctx steering sync writes each matching steering file to the appropriate directory with tool-specific frontmatter transforms. Unchanged files are skipped (idempotent).

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#how-claude-code-and-codex-consume-steering","level":3,"title":"How Claude Code and Codex Consume Steering","text":"

    Claude Code has no native \"steering files\" primitive, so ctx steering sync skips it entirely. Instead, steering reaches Claude through two non-sync channels, both activated by ctx setup claude-code (which installs the plugin):

    1. Automatic injection via the PreToolUse hook. The Claude Code plugin wires a PreToolUse hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads .context/steering/ and calls steering.Filter with an empty prompt, so only files with inclusion: always match. Those files are included as Tier 6 of the context packet. The packet is printed on stdout, which Claude Code injects as additional context. This fires on every tool call; no user action.

    2. On-demand MCP tool call (ctx_steering_get). The ctx plugin ships a .mcp.json file that automatically registers the ctx MCP server (ctx mcp serve) with Claude Code on plugin install. Once registered, Claude can invoke the ctx_steering_get tool mid-task to fetch matching steering files for a specific prompt. This is the only path that resolves inclusion: auto and inclusion: manual matches for Claude Code; Claude passes the prompt to the MCP tool, which runs the keyword match against each file's description.

    Verify the MCP server is registered:

    claude mcp list\n

    Expected line: ctx: ctx mcp serve - ✓ Connected. If it's missing, reinstall the plugin from Claude Code (/plugin → find ctx → uninstall → install again); older plugin versions shipped without the .mcp.json file.

    Prefer inclusion: always for Claude Code

    Because the PreToolUse hook passes an empty prompt to ctx agent, only always files fire automatically. auto files require Claude to call the ctx_steering_get MCP tool on its own; manual files require an explicit user invocation. For rules that should reliably fire on every Claude Code session, use inclusion: always. Reserve auto/manual for situational libraries where the opt-in cost is acceptable and you understand Claude may not pull them in without prompting.

    The foundation files scaffolded by ctx init already default to inclusion: always for this reason.

    Practical implications:

    • Running ctx steering sync before starting a Claude session does nothing for Claude's benefit. Skip it.
    • ctx steering preview still works for validating your descriptions; it doesn't depend on sync.
    • If Claude Code is your only tool, the ctx steering commands you care about are add, list, preview, init (never sync).
    • If you use both Claude Code and (say) Cursor, ctx steering sync covers Cursor (where auto and manual work natively) while the hook+MCP pipeline covers Claude Code. For rules you need to fire automatically on both, use inclusion: always.
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-agent-integration","level":3,"title":"ctx agent Integration","text":"

    When ctx agent builds a context packet, steering files are loaded as Tier 6 of the budget-aware assembly (see ctx agent). Files with inclusion: always are always included; auto files are scored against the current prompt and included in priority order until the tier budget is exhausted.

    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#see-also","level":3,"title":"See Also","text":"
    • ctx setup: configure which tools receive steering syncs
    • ctx trigger: lifecycle scripts (a different hooking concept, see below)
    • Building steering files recipe: walkthrough from first file to synced output
    ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/sysinfo/","level":1,"title":"Sysinfo","text":"","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/sysinfo/#ctx-sysinfo","level":3,"title":"ctx sysinfo","text":"

    Display a snapshot of system resources (memory, swap, disk, load) with threshold-based alert severities. Mirrors what the check-resource hook plumbing monitors in the background, but this command prints the full report at any severity level, not only at DANGER.

    ctx sysinfo [flags]\n

    Flags:

    Flag Description --json Output in JSON format

    Alert thresholds:

    Resource WARNING DANGER Memory ≥ 75% ≥ 90% Swap ≥ 50% ≥ 75% Disk ≥ 85% ≥ 95% Load ≥ 1.0x CPUs ≥ 1.5x CPUs

    Examples:

    ctx sysinfo                  # Human-readable table\nctx sysinfo --json           # Structured output\n
    ","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/system/","level":1,"title":"System","text":"","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system","level":3,"title":"ctx system","text":"

    Hidden parent command that hosts Claude Code hook plumbing and a small set of session-lifecycle plumbing subcommands used by skills and editor integrations. The parent is registered without a visible group in ctx --help; run ctx system --help to see its subcommands.

    ctx system <subcommand>\n

    Commands Previously under ctx system

    Several user-facing maintenance commands used to live under ctx system and were promoted to top-level:

    • ctx system backupctx backup
    • ctx system eventsctx hook event
    • ctx system messagectx hook message
    • ctx system prunectx prune
    • ctx system resourcesctx sysinfo
    • ctx system statsctx usage

    ctx system bootstrap remains under ctx system as a hidden, agent-only command. Update any scripts or personal docs that reference the old paths.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#plumbing-subcommands","level":2,"title":"Plumbing Subcommands","text":"

    These are not hook handlers; they're called by skills and editor integrations during the session lifecycle. Safe to run manually.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-journal","level":4,"title":"ctx system mark-journal","text":"

    Update processing state for a journal entry. Records the current date in .context/journal/.state.json. Used by journal skills to record pipeline progress.

    ctx system mark-journal <filename> <stage>\n

    Stages: exported, enriched, normalized, fences_verified

    Flag Description --check Check if stage is set (exit 1 if not)

    Example:

    ctx system mark-journal 2026-01-21-session-abc12345.md enriched\nctx system mark-journal 2026-01-21-session-abc12345.md normalized\nctx system mark-journal --check 2026-01-21-session-abc12345.md fences_verified\n
    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-wrapped-up","level":4,"title":"ctx system mark-wrapped-up","text":"

    Suppress context checkpoint nudges after a wrap-up ceremony. Writes a marker file that check-context-size checks before emitting checkpoint boxes. The marker expires after 2 hours.

    Called automatically by /ctx-wrap-up after persisting context (not intended for direct use).

    ctx system mark-wrapped-up\n

    No flags, no arguments. Idempotent: running it again updates the marker timestamp.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-pause-ctx-system-resume","level":4,"title":"ctx system pause / ctx system resume","text":"

    Session-scoped hook suppression. ctx system pause writes a marker file that causes hook plumbing to no-op for the current session; ctx system resume removes it. These are the hook-plumbing counterparts to the ctx hook pause / ctx hook resume commands (which call them internally).

    Read the session ID from stdin JSON (same as hooks) or pass --session-id.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-session-event","level":4,"title":"ctx system session-event","text":"

    Records a session lifecycle event (start or end) to the event log. Called by editor integrations when a workspace is opened or closed.

    ctx system session-event --type start --caller vscode\nctx system session-event --type end --caller vscode\n
    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#hook-subcommands","level":2,"title":"Hook Subcommands","text":"

    Hidden Claude Code hook handlers implementing the hook contract: read JSON from stdin, perform logic, emit output on stdout, exit 0. Block commands output JSON with a decision field.

    UserPromptSubmit hooks: context-load-gate, check-context-size, check-persistence, check-ceremony, check-journal, check-version, check-resource, check-knowledge, check-map-staleness, check-memory-drift, check-reminder, check-freshness, check-hub-sync, check-backup-age, check-skill-discovery, heartbeat.

    PreToolUse hooks: block-non-path-ctx, block-dangerous-command, qa-reminder, specs-nudge.

    PostToolUse hooks: post-commit, check-task-completion.

    See AI Tools for registration details and the Claude Code plugin integration.

    ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/trace/","level":1,"title":"Commit Context Tracing","text":"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace","level":3,"title":"ctx trace","text":"

    Show the context behind git commits. Links commits back to the decisions, tasks, learnings, and sessions that motivated them.

    git log shows what changed, git blame shows who, and ctx trace shows why.

    ctx trace [commit] [flags]\n

    Flags:

    Flag Description --last N Show context for last N commits --json Output as JSON for scripting

    Examples:

    # Show context for a specific commit\nctx trace abc123\n\n# Show context for last 10 commits\nctx trace --last 10\n\n# JSON output\nctx trace abc123 --json\n

    Output:

    Commit: abc123 \"Fix auth token expiry\"\nDate:   2026-03-14 10:00:00 -0700\nContext:\n  [Decision] #12: Use short-lived tokens with server-side refresh\n    Date: 2026-03-10\n\n  [Task] #8: Implement token rotation for compliance\n    Status: completed\n

    When listing recent commits with --last:

    abc123  Fix auth token expiry         decision:12, task:8\ndef456  Add rate limiting             decision:15, learning:7\n789abc  Update dependencies           (none)\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-file","level":3,"title":"ctx trace file","text":"

    Show the context trail for a file. Combines git log with context resolution.

    ctx trace file <path[:line-range]> [flags]\n

    Flags:

    Flag Description --last N Maximum commits to show (default: 20)

    Examples:

    # Show context trail for a file\nctx trace file src/auth.go\n\n# Show context for specific line range\nctx trace file src/auth.go:42-60\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-tag","level":3,"title":"ctx trace tag","text":"

    Manually tag a commit with context. For commits made without the hook, or to add extra context after the fact.

    Tags are stored in .context/trace/overrides.jsonl since git trailers cannot be added to existing commits without rewriting history.

    ctx trace tag <commit> --note \"<text>\"\n

    Examples:

    ctx trace tag HEAD --note \"Hotfix for production outage\"\nctx trace tag abc123 --note \"Part of Q1 compliance initiative\"\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-hook","level":3,"title":"ctx trace hook","text":"

    Enable or disable the prepare-commit-msg hook for automatic context tracing. When enabled, commits automatically receive a ctx-context trailer with references to relevant decisions, tasks, learnings, and sessions.

    ctx trace hook <enable|disable>\n

    Prerequisites: ctx must be on your $PATH. If you installed via go install, ensure $GOPATH/bin (or $HOME/go/bin) is in your shell's $PATH.

    What the hook does:

    1. Before each commit, collects context from three sources:
    2. Pending context accumulated during work (ctx add, ctx task complete)
    3. Staged file changes to .context/ files
    4. Working state (in-progress tasks, active AI session)
    5. Injects a ctx-context trailer into the commit message
    6. After commit, records the mapping in .context/trace/history.jsonl

    Examples:

    # Install the hook\nctx trace hook enable\n\n# Remove the hook\nctx trace hook disable\n

    Resulting commit message:

    Fix auth token expiry handling\n\nRefactored token refresh logic to handle edge case\nwhere refresh token expires during request.\n\nctx-context: decision:12, task:8, session:abc123\n
    ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#reference-types","level":3,"title":"Reference Types","text":"

    The ctx-context trailer supports these reference types:

    Prefix Points to Example decision:<n> Entry #n in DECISIONS.md decision:12learning:<n> Entry #n in LEARNINGS.md learning:5task:<n> Task #n in TASKS.md task:8convention:<n> Entry #n in CONVENTIONS.md convention:3session:<id> AI session by ID session:abc123\"<text>\" Free-form context note \"Performance fix for P1 incident\"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#storage","level":3,"title":"Storage","text":"

    Context trace data is stored in the .context/ directory:

    File Purpose Lifecycle state/pending-context.jsonl Accumulates refs during work Truncated after each commit trace/history.jsonl Permanent commit-to-context map Append-only, never truncated trace/overrides.jsonl Manual tags for existing commits Append-only","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trigger/","level":1,"title":"Trigger","text":"","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger","level":2,"title":"ctx trigger","text":"

    Manage lifecycle triggers: executable scripts that fire at specific events during an AI session. Triggers can block tool calls, inject context, and automate reactions: any side effect you want at session boundaries, tool boundaries, or file-save events.

    ctx trigger <subcommand>\n

    Triggers Execute Arbitrary Scripts

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks: only enable scripts you've read and understand. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#where-triggers-live","level":3,"title":"Where Triggers Live","text":"

    Triggers live in .context/hooks/<trigger-type>/ as executable scripts. The on-disk directory name is still hooks/ for historical reasons even though the command is ctx trigger. Each script:

    • Reads a JSON payload from stdin.
    • Returns a JSON payload on stdout.
    • Returns a non-zero exit code to block or error.
    .context/\n└── hooks/\n    ├── session-start/\n    │   └── inject-context.sh\n    ├── pre-tool-use/\n    │   └── block-legacy.sh\n    └── post-tool-use/\n        └── record-edit.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#trigger-types","level":3,"title":"Trigger Types","text":"Type Fires when session-start An AI session begins session-end An AI session ends pre-tool-use Before an AI tool call is executed post-tool-use After an AI tool call returns file-save When a file is saved context-add When a context entry is added","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#input-and-output-contract","level":3,"title":"Input and Output Contract","text":"

    Each trigger receives a JSON object on stdin with the event details. Minimal contract (fields vary by trigger type):

    {\n  \"type\": \"pre-tool-use\",\n  \"tool\": \"write_file\",\n  \"path\": \"src/auth.go\",\n  \"session_id\": \"abc123-...\"\n}\n

    The trigger may write a JSON object to stdout to influence behavior. Example for a blocking pre-tool-use trigger:

    {\n  \"action\": \"block\",\n  \"message\": \"Editing src/auth.go requires approval from #security\"\n}\n

    For non-blocking event loggers, simply read stdin and exit 0 without writing to stdout.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-add","level":3,"title":"ctx trigger add","text":"

    Create a new trigger script with a template. The generated file has a bash shebang, a stdin reader using jq, and a basic JSON output structure.

    ctx trigger add <trigger-type> <name>\n

    Arguments:

    • trigger-type: One of session-start, session-end, pre-tool-use, post-tool-use, file-save, context-add
    • name: Script name (without .sh extension)

    Examples:

    ctx trigger add session-start inject-context\n# Created .context/hooks/session-start/inject-context.sh\n\nctx trigger add pre-tool-use block-legacy\n# Created .context/hooks/pre-tool-use/block-legacy.sh\n

    The generated script is not executable by default. Enable it with ctx trigger enable after reviewing the contents.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-list","level":3,"title":"ctx trigger list","text":"

    List all discovered triggers, grouped by trigger type, with their enabled/disabled status.

    Examples:

    ctx trigger list\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-test","level":3,"title":"ctx trigger test","text":"

    Run all enabled triggers of a given type against a mock payload. Use --tool and --path to customize the mock input for tool-related events.

    ctx trigger test <trigger-type> [flags]\n

    Flags:

    Flag Description --tool Tool name to put in mock input --path File path to put in mock input

    Examples:

    ctx trigger test session-start\nctx trigger test pre-tool-use --tool write_file --path src/main.go\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-enable","level":3,"title":"ctx trigger enable","text":"

    Enable a trigger by setting its executable permission bit. Searches every trigger-type directory for a script matching <name>.

    ctx trigger enable <name>\n

    Examples:

    ctx trigger enable inject-context\n# Enabled .context/hooks/session-start/inject-context.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-disable","level":3,"title":"ctx trigger disable","text":"

    Disable a trigger by clearing its executable permission bit. Searches every trigger-type directory for a script matching <name>.

    ctx trigger disable <name>\n

    Examples:

    ctx trigger disable inject-context\n# Disabled .context/hooks/session-start/inject-context.sh\n
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#three-hooking-concepts-in-ctx-dont-confuse-them","level":3,"title":"Three Hooking Concepts in ctx (Don't Confuse Them)","text":"

    This is a common source of confusion. ctx has three distinct hook-like layers, and they serve different purposes:

    Layer Owned by Where it runs Configured via ctx trigger You .context/hooks/<type>/*.shctx trigger add/enablectx system hooks ctx itself built-in, called by ctx's own lifecycle internal (see ctx system --help) Claude Code hooks Claude Code .claude/settings.local.json edit JSON, or /ctx-sanitize-permissions

    Use ctx trigger when you want project-specific automation that your AI tool will run at lifecycle events. Use Claude Code hooks for tool-specific integrations that don't need to be portable across tools. ctx system hooks are not something you author; they're the internal nudge machinery that ships with ctx.

    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#see-also","level":3,"title":"See Also","text":"
    • ctx steering: persistent AI behavioral rules (a different concept; rules vs scripts)
    • Authoring triggers recipe: a full walkthrough with security guidance
    ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/usage/","level":1,"title":"Usage","text":"","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/usage/#ctx-usage","level":3,"title":"ctx usage","text":"

    Display per-session token usage statistics from the local stats JSONL files written by the heartbeat hook. By default, shows the last 20 entries across all sessions. Use --follow to stream new entries as they arrive (like tail -f).

    ctx usage [flags]\n

    Flags:

    Flag Description -f, --follow Stream new entries as they arrive -s, --session Filter by session ID (prefix match) -n, --last Show last N entries (default: 20) -j, --json Output raw JSONL

    Examples:

    ctx usage                     # Last 20 entries across all sessions\nctx usage --follow            # Live stream (like tail -f)\nctx usage --session abc123    # Filter to one session\nctx usage --last 100 --json   # Last 100 as raw JSONL\n
    ","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/watch/","level":1,"title":"Watch","text":"","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/watch/#ctx-watch","level":2,"title":"ctx watch","text":"

    Watch for AI output and auto-apply context updates.

    Parses <context-update> XML commands from AI output and applies them to context files.

    ctx watch [flags]\n

    Flags:

    Flag Description --log <file> Log file to watch (default: stdin) --dry-run Preview updates without applying

    Examples:

    # Watch stdin\nai-tool | ctx watch\n\n# Watch a log file\nctx watch --log /path/to/ai-output.log\n\n# Preview without applying\nctx watch --dry-run\n
    ","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/why/","level":1,"title":"Why","text":"","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"cli/why/#ctx-why","level":2,"title":"ctx why","text":"

    Read ctx's philosophy documents directly in the terminal.

    ctx why [DOCUMENT]\n

    Documents:

    Name Description manifesto The ctx Manifesto: creation, not code about About ctx: what it is and why it exists invariants Design invariants: properties that must hold

    Examples:

    # Interactive numbered menu\nctx why\n\n# Show a specific document\nctx why manifesto\nctx why about\nctx why invariants\n\n# Pipe to a pager\nctx why manifesto | less\n
    ","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"home/","level":1,"title":"Home","text":"
    • ctx is not a prompt.
    • ctx is version-controlled cognitive state.

    ctx is the persistence layer for human-AI reasoning.

    Deterministic. Git-native. Human-readable. Local-first.

    Start here.

    Learn what ctx does, set it up, and run your first session.

    Pre-1.0: Moving Fast

    ctx is under active development. This website tracks the development branch, not the latest release:

    Some features described here may not exist in the binary you have installed.

    Expect rough edges.

    If something is missing or broken, open an issue.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#introduction","level":2,"title":"Introduction","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#about","level":3,"title":"About","text":"

    What ctx is, how it works, and why persistent context changes how you work with AI.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#is-it-right-for-me","level":3,"title":"Is It Right for Me?","text":"

    Good fit, not-so-good fit, and a 5-minute trial to find out for yourself.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#faq","level":3,"title":"FAQ","text":"

    Quick answers to the questions newcomers ask most about ctx, files, tooling, and trade-offs.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#get-started","level":2,"title":"Get Started","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#getting-started","level":3,"title":"Getting Started","text":"

    Install the binary, set up the plugin, and verify it works.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#your-first-session","level":3,"title":"Your First Session","text":"

    Step-by-step walkthrough from ctx init to verified recall.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#common-workflows","level":3,"title":"Common Workflows","text":"

    Day-to-day commands for tracking context, checking health, and browsing history.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#concepts","level":2,"title":"Concepts","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#context-files","level":3,"title":"Context Files","text":"

    What each .context/ file does. What's their purpose. How do we best leverage them.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#configuration","level":3,"title":"Configuration","text":"

    Flexible configuration: .ctxrc, environment variables, and CLI flags.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#hub","level":3,"title":"Hub","text":"

    A fan-out channel for decisions, learnings, conventions, and tasks that need to cross project boundaries, without replicating everything else.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#working-with-ai","level":2,"title":"Working with AI","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#prompting-guide","level":3,"title":"Prompting Guide","text":"

    Effective prompts for AI sessions with ctx.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#keeping-ai-honest","level":3,"title":"Keeping AI Honest","text":"

    AI agents confabulate: they invent history, claim familiarity with decisions never made, and sometimes declare tasks complete when they aren't. Tools and habits to push back.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#my-ai-keeps-making-the-same-mistakes","level":3,"title":"My AI Keeps Making the Same Mistakes","text":"

    Stop rediscovering the same bugs and dead-ends across sessions.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#joining-a-project","level":3,"title":"Joining a Project","text":"

    You inherited a .context/ directory. Get oriented fast: priority order, what to read first, how to ramp up.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#customization","level":2,"title":"Customization","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#steering-files","level":3,"title":"Steering Files","text":"

    Tell the assistant how to behave when a specific kind of prompt arrives.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#lifecycle-triggers","level":3,"title":"Lifecycle Triggers","text":"

    Make things happen at session boundaries: block dangerous tool calls, inject standup notes, log file saves.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#community","level":2,"title":"Community","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#ctx","level":3,"title":"#ctx","text":"

    We are the builders who care about durable context. Join the community. Hang out in IRC. Star ctx on GitHub.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#contributing","level":3,"title":"Contributing","text":"

    Development setup, project layout, and pull request process.

    ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/about/","level":1,"title":"About","text":"

    \"Creation, not code; Context, not prompts; Verification, not vibes.\"

    Read the ctx Manifesto →

    \"Without durable context, intelligence resets; with ctx, creation compounds.\"

    Without persistent memory, every session starts at zero; ctx makes sessions cumulative.

    Join the ctx Community →

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#what-is-ctx","level":2,"title":"What Is ctx?","text":"

    ctx (Context) is a file-based system that enables AI coding assistants to persist project knowledge across sessions. It lives in a .context/ directory in your repo.

    • A session is interactive.
    • ctx enables cognitive continuity.
    • Cognitive continuity enables durable, symbiotic-like human-AI workflows.

    Context Files

    Context files let AI tools remember decisions, conventions, and learnings:

    Context files are explicit and versionable contracts between you and your agents.

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#why-do-i-keep-re-explaining-my-codebase","level":2,"title":"Why Do I Keep Re-Explaining My Codebase?!?!","text":"

    You open a new AI session. The first thing you do is re-explain your project.

    Again.

    The architecture, the database choice, the naming conventions, the thing you tried last week that didn't work. You have said all of this before (maybe yesterday, maybe an hour ago), but the AI doesn't know that.

    • You explain the same architecture every session;
    • The AI suggests an approach you already rejected, again;
    • A decision you made three sessions ago gets relitigated from scratch;
    • You spend more time setting context than building features.

    This isn't AI failure: It is staleness.

    Without persistent memory, every session starts at zero.

    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#day-and-night","level":3,"title":"Day and Night","text":"

    ctx gives your AI a memory that persists across sessions:

    Without ctxWith ctx
    Session 12: Monday morning\n\nAI:  \"I'll scaffold a MySQL schema for this.\"\nYou: \"We use PostgreSQL.\"\n\nAI:  \"For auth, session-based would be simplest.\"\nYou: \"We decided on JWT.\"\n\nAI:  \"Endpoints in camelCase?\"\nYou: \"snake_case! Like the rest of the API!!!!!!\"\n\nYou: *scrolls up to copy an existing endpoint*\n\n20 minutes spent restoring decisions that already exist.\n
    Session 12: Monday morning\n\nYou:  \"Let's continue the auth implementation.\"\nAI:   \"Per DECISIONS.md (2026-02-12): JWT, not sessions: \n       The project uses PostgreSQL with JWT\n       authentication. The API follows snake_case conventions.\n       Last session you completed the login endpoint: the\n       registration endpoint is next on TASKS.md. Want me to\n       start there?\"\n\n0 minutes re-explaining. Straight to building.\n
    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#how-ctx-solves-this","level":3,"title":"How ctx Solves This","text":"

    ctx creates a .context/ directory in your project that stores structured knowledge files:

    File What It Remembers TASKS.md What you're working on and what's next DECISIONS.md Architectural choices and why you made them LEARNINGS.md Gotchas, bugs, things that didn't work CONVENTIONS.md Naming patterns, code style, project rules CONSTITUTION.md Hard rules the AI must never violate

    These files can version with your code in git:

    • They load automatically at the session start (via hooks in Claude Code, or manually with ctx agent for other tools).
    • The AI reads them, cites them, and builds on them, instead of asking you to start over.
      • And when it acts, it can point to the exact file and line that justifies the choice.

    Every decision you record, every lesson you capture, makes the next session smarter.

    ctx accumulates.

    Connect with ctx

    • Join the Community →: ask questions, share workflows, and help shape what comes next
    • Read the Blog →: real-world patterns, ponderings, and lessons learned from building ctx using ctx

    Ready to Get Started?

    • Getting Started →: full installation and setup
    • Your First Session →: step-by-step walkthrough from ctx init to verified recall
    ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/common-workflows/","level":1,"title":"Common Workflows","text":"

    The commands below cover what you'll use most often:

    • recording context,
    • checking health,
    • browsing history,
    • and running loops.

    Each section is a self-contained snippet you can copy into your terminal.

    For deeper, step-by-step guides, see Recipes.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#track-context","level":2,"title":"Track Context","text":"

    Prefer Skills over Raw Commands

    When working with an AI agent, use /ctx-task-add, /ctx-decision-add, or /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

    # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (full ADR fields required)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning\nctx add learning \"Mock functions must be hoisted in Jest\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Jest hoists mock calls to top of file\" \\\n  --application \"Place jest.mock() before imports\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Mark task complete\nctx task complete \"user auth\"\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#leave-a-reminder-for-next-session","level":2,"title":"Leave a Reminder for Next Session","text":"

    Drop a note that surfaces automatically at the start of your next session:

    # Leave a reminder\nctx remind \"refactor the swagger definitions\"\n\n# Date-gated: don't surface until a specific date\nctx remind \"check CI after the deploy\" --after 2026-02-25\n\n# List pending reminders\nctx remind list\n\n# Dismiss reminders by ID (supports ranges)\nctx remind dismiss 1\nctx remind dismiss 3 5-7\n

    Reminders are relayed verbatim at session start by the check-reminders hook and repeat every session until you dismiss them.

    See Session Reminders for the full recipe.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#check-context-health","level":2,"title":"Check Context Health","text":"
    # Detect stale paths, missing files, potential secrets\nctx drift\n\n# See full context summary\nctx status\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#browse-session-history","level":2,"title":"Browse Session History","text":"

    List and search past AI sessions from the terminal:

    ctx journal source --limit 5\n
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#journal-site","level":3,"title":"Journal Site","text":"

    Import session transcripts to a browsable static site with search, navigation, and topic indices.

    The ctx journal command requires zensical (Python >= 3.10).

    zensical is a Python-based static site generator from the Material for MkDocs team.

    (why zensical?).

    If you don't have it on your system, install zensical once with pipx:

    # One-time setup\npipx install zensical\n

    Avoid pip install zensical

    pip install often fails: For example, on macOS, system Python installs a non-functional stub (zensical requires Python >= 3.10), and Homebrew Python blocks system-wide installs (PEP 668).

    pipx creates an isolated environment with the correct Python version automatically.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#import-and-serve","level":3,"title":"Import and Serve","text":"

    Then, import and serve:

    # Import all sessions to .context/journal/ (only new files)\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

    Open http://localhost:8000 to browse.

    To update after new sessions, run the same two commands again.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#safe-by-default","level":3,"title":"Safe by Default","text":"

    ctx journal import --all is safe by default:

    • It only imports new sessions and skips existing files.
    • Locked entries (via ctx journal lock) are always skipped by both import and enrichment skills.
    • If you add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#re-importing-existing-files","level":3,"title":"Re-Importing Existing Files","text":"

    Here is how you regenerate existing files.

    Backup your .context folder before regeneration, as this is a potentially destructive action.

    To re-import journal files, you need to explicitly opt-in using the --regenerate flag:

    Flag combination Frontmatter Body --regenerate Preserved Overwritten from source --regenerate --keep-frontmatter=false Overwritten Overwritten

    Regeneration Overwrites Body Edits

    --regenerate preserves your YAML frontmatter (tags, summary, enrichment metadata) but it replaces the Markdown body with a fresh import.

    Any manual edits you made to the transcript will be lost.

    Lock entries you want to protect first: ctx journal lock <session-id>.

    See Session Journal for the full pipeline including normalization and enrichment.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#scratchpad","level":2,"title":"Scratchpad","text":"

    Store short, sensitive one-liners in an encrypted scratchpad that travels with the project:

    # Write a note\nctx pad set db-password \"postgres://user:pass@localhost/mydb\"\n\n# Read it back\nctx pad get db-password\n\n# List all keys\nctx pad list\n

    The scratchpad is encrypted with a key stored at ~/.ctx/.ctx.key (outside the project, never committed).

    See Scratchpad for details.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#run-an-autonomous-loop","level":2,"title":"Run an Autonomous Loop","text":"

    Generate a script that iterates an AI agent until a completion signal is detected:

    ctx loop\nchmod +x loop.sh\n./loop.sh\n

    See Autonomous Loops for configuration and advanced usage.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#trace-commit-context","level":2,"title":"Trace Commit Context","text":"

    Link your git commits back to the decisions, tasks, and learnings that motivated them. Enable the hook once:

    # Install the git hook (one-time setup)\nctx trace hook enable\n

    From now on, every git commit automatically gets a ctx-context trailer linking it to relevant context. No extra steps needed; just use ctx add, ctx task complete, and commit as usual.

    # Later: why was this commit made?\nctx trace abc123\n\n# Recent commits with their context\nctx trace --last 10\n\n# Context trail for a specific file\nctx trace file src/auth.go\n\n# Manually tag a commit after the fact\nctx trace tag HEAD --note \"Hotfix for production outage\"\n

    To stop: ctx trace hook disable.

    See CLI Reference: trace for details.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#agent-session-start","level":2,"title":"Agent Session Start","text":"

    The first thing an AI agent should do at session start is discover where context lives:

    ctx system bootstrap\n

    This prints the resolved context directory, the files in it, and the operating rules. The CLAUDE.md template instructs the agent to run this automatically. See CLI Reference: bootstrap.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#the-two-skills-you-should-always-use","level":2,"title":"The Two Skills You Should Always Use","text":"

    Using /ctx-remember at session start and /ctx-wrap-up at session end are the highest-value skills in the entire catalog:

    # session begins:\n/ctx-remember\n... do work ...\n# before closing the session:\n/ctx-wrap-up\n

    Let's provide some context, because this is important:

    Although the agent will eventually discover your context through CLAUDE.md → AGENT_PLAYBOOK.md, /ctx-remember hydrates the full context up front (tasks, decisions, recent sessions) so the agent starts informed rather than piecing things together over several turns.

    /ctx-wrap-up is the other half: A structured review that captures learnings, decisions, and tasks before you close the window.

    Hooks like check-persistence remind you (the user) mid-session that context hasn't been saved in a while, but they don't trigger persistence automatically: You still have to act. Also, a CTRL+C can end things at any moment with no reliable \"before session end\" event.

    In short, /ctx-wrap-up is the deliberate checkpoint that makes sure nothing slips through. And /ctx-remember it its mirror skill to be used at session start.

    See Session Ceremonies for the full workflow.

    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-commands-vs-ai-skills","level":2,"title":"CLI Commands vs. AI Skills","text":"

    Most ctx operations come in two flavors: a CLI command you run in your terminal and an AI skill (slash command) you invoke inside your coding assistant.

    Commands and skills are not interchangeable: Each has a distinct role.

    ctx CLI command ctx AI skill Runs where Your terminal Inside the AI assistant Speed Fast (milliseconds) Slower (LLM round-trip) Cost Free Consumes tokens and context Analysis Deterministic heuristics Semantic / judgment-based Best for Quick checks, scripting, CI Deep analysis, generation, workflow orchestration","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#paired-commands","level":3,"title":"Paired Commands","text":"

    These have both a CLI and a skill counterpart. Use the CLI for quick, deterministic checks; use the skill when you need the agent's judgment.

    CLI Skill When to prefer the skill ctx drift/ctx-drift Semantic analysis: catches meaning drift the CLI misses ctx status/ctx-status Interpreted summary with recommendations ctx add task/ctx-task-add Agent decomposes vague goals into concrete tasks ctx add decision/ctx-decision-add Agent drafts rationale and consequences from discussion ctx add learning/ctx-learning-add Agent extracts the lesson from a debugging session ctx add convention/ctx-convention-add Agent observes a repeated pattern and codifies it ctx task archive/ctx-archive Agent reviews which tasks are truly done ctx pad/ctx-pad Agent reads/writes scratchpad entries in conversation flow ctx journal/ctx-history Agent searches session history with semantic understanding ctx agent/ctx-agent Agent loads and acts on the context packet ctx loop/ctx-loop Agent tailors the loop script to your project ctx doctor/ctx-doctor Agent adds semantic analysis to structural checks ctx hook pause/ctx-pause Agent pauses hooks with session-aware reasoning ctx hook resume/ctx-resume Agent resumes hooks after a pause ctx remind/ctx-remind Agent manages reminders in conversation flow","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#ai-only-skills","level":3,"title":"AI-Only Skills","text":"

    These have no CLI equivalent. They require the agent's reasoning.

    Skill Purpose /ctx-remember Load context and present structured readback at session start /ctx-wrap-up End-of-session ceremony: persist learnings, decisions, tasks /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Pause and assess session progress /ctx-consolidate Merge overlapping learnings or decisions /ctx-prompt-audit Analyze prompting patterns for improvement /ctx-plan-import Import Claude Code plan files into project specs /ctx-implement Execute a plan step-by-step with verification /ctx-worktree Manage parallel agent worktrees /ctx-journal-enrich Add metadata, tags, and summaries to journal entries /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich /ctx-blog Generate a blog post (zensical-flavored Markdown) /ctx-blog-changelog Generate themed blog post from commits between releases /ctx-architecture Build and maintain architecture maps (ARCHITECTURE.md, DETAILED_DESIGN.md)","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-only-commands","level":3,"title":"CLI-Only Commands","text":"

    These are infrastructure: used in scripts, CI, or one-time setup.

    Command Purpose ctx init Initialize .context/ directory ctx load Output assembled context for piping ctx task complete Mark a task done by substring match ctx sync Reconcile context with codebase state ctx compact Consolidate and clean up context files ctx trace Show context behind git commits ctx trace hook Enable/disable commit context tracing hook ctx setup Generate AI tool integration config ctx watch Watch AI output and auto-apply context updates ctx serve Serve any zensical directory (default: journal) ctx permission snapshot Save settings as a golden image ctx permission restore Restore settings from golden image ctx journal site Generate browsable journal from exports ctx hook notify setup Configure webhook notifications ctx decision List and filter decisions ctx learning List and filter learnings ctx task List tasks, manage archival and snapshots ctx why Read the philosophy behind ctx ctx guide Quick-reference cheat sheet ctx site Site management commands ctx config Manage runtime configuration profiles ctx system System diagnostics and hook commands ctx backup Back up context and Claude data to tar.gz / SMB ctx completion Generate shell autocompletion scripts

    Rule of Thumb

    Quick check? Use the CLI.

    Need judgment? Use the skill.

    When in doubt, start with the CLI: It's free and instant.

    Escalate to the skill when heuristics aren't enough.

    Next Up: Context Files →: what each .context/ file does and how to use it

    See Also:

    • Recipes: targeted how-to guides for specific tasks
    • Knowledge Capture: patterns for recording decisions, learnings, and conventions
    • Context Health: keeping your .context/ accurate and drift-free
    • Session Archaeology: digging into past sessions
    • Task Management: tracking and completing work items
    ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/community/","level":1,"title":"#ctx","text":"

    Open source is better together.

    We are the builders who care about durable context, verifiable decisions, and human-AI workflows that compound over time.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#help-ctx-change-how-ai-remembers","level":2,"title":"Help ctx Change How AI Remembers","text":"

    If you like the idea, a star helps ctx reach engineers who run into context drift every day:

    Star ctx on GitHub ⭐

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#ctx-you","level":2,"title":"ctx ♥️ You","text":"

    Join the community to ask questions, share feedback, and connect with other users:

    • Discord join the ctx Discord: Real-time discussion, field notes, and early ideas.
    • Read the ctx Source on GitHub: Issues, discussions, and contributions.
    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#want-to-contribute","level":2,"title":"Want to Contribute?","text":"

    Early adopters shape the conventions.

    ctx is free and open source software.

    Contributions are always welcome and appreciated.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

    Clear context requires respectful collaboration.

    ctx follows the Contributor Covenant.

    ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/configuration/","level":1,"title":"Configuration","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#configuration","level":2,"title":"Configuration","text":"

    ctx uses three layers of configuration. Each layer overrides the one below it:

    1. CLI flags: Per-invocation overrides (highest priority)
    2. Environment variables: Shell or CI/CD overrides
    3. The .ctxrc file: Project-level defaults (YAML)
    4. Built-in defaults: Hardcoded fallbacks (lowest priority)

    All settings are optional: If nothing is configured, ctx works out of the box with sensible defaults.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#the-ctxrc-file","level":2,"title":"The .ctxrc File","text":"

    The .ctxrc file is an optional YAML file placed in the project root (next to your .context/ directory). It lets you set project-level defaults that apply to every ctx command.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#location","level":3,"title":"Location","text":"
    my-project/\n├── .ctxrc              ← configuration file\n├── .context/\n│   ├── TASKS.md\n│   ├── DECISIONS.md\n│   └── ...\n└── src/\n

    ctx looks for .ctxrc in the current working directory when any command runs. There is no global or user-level config file: Configuration is always per-project.

    Contributors: Dev Configuration Profile

    The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy is gitignored and swapped between them via ctx config switch dev / ctx config switch base. See Contributing: Configuration Profiles.

    Using a Different .Context Directory

    The default .context/ directory can be changed per-project via the context_dir key in .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

    See Environment Variables and CLI Global Flags below for details.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#full-reference","level":3,"title":"Full Reference","text":"

    A commented .ctxrc showing all options and their defaults:

    # .ctxrc: ctx runtime configuration\n# https://ctx.ist/configuration/\n#\n# All settings are optional. Missing values use defaults.\n# Priority: CLI flags > environment variables > .ctxrc > defaults\n#\n# context_dir: .context\n# token_budget: 8000\n# auto_archive: true\n# archive_after_days: 7\n# scratchpad_encrypt: true\n# allow_outside_cwd: false\n# event_log: false\n# entry_count_learnings: 30\n# entry_count_decisions: 20\n# convention_line_count: 200\n# injection_token_warn: 15000\n# context_window: 200000      # auto-detected for Claude Code; override for other tools\n# billing_token_warn: 0       # one-shot warning at this token count (0 = disabled)\n#\n# stale_age_days: 30      # days before drift flags a context file as stale (0 = disabled)\n# key_rotation_days: 90\n# task_nudge_interval: 5   # Edit/Write calls between task completion nudges\n#\n# notify:               # requires: ctx hook notify setup\n#   events:             # required: no events sent unless listed\n#     - loop\n#     - nudge\n#     - relay\n#\n# tool: \"\"              # Active AI tool: claude, cursor, cline, kiro, codex\n#\n# steering:             # Steering layer configuration\n#   dir: .context/steering\n#   default_inclusion: manual\n#   default_tools: []\n#\n# hooks:                # Hook system configuration\n#   dir: .context/hooks\n#   timeout: 10\n#   enabled: true\n#\n# provenance_required:  # Relax provenance flags for ctx add\n#   session_id: true    # Require --session-id (default: true)\n#   branch: true        # Require --branch (default: true)\n#   commit: true        # Require --commit (default: true)\n#\n# priority_order:\n#   - CONSTITUTION.md\n#   - TASKS.md\n#   - CONVENTIONS.md\n#   - ARCHITECTURE.md\n#   - DECISIONS.md\n#   - LEARNINGS.md\n#   - GLOSSARY.md\n#   - AGENT_PLAYBOOK.md\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#option-reference","level":3,"title":"Option Reference","text":"Option Type Default Description context_dirstring.context Context directory name (relative to project root) token_budgetint8000 Default token budget for ctx agent and ctx loadauto_archivebooltrue Auto-archive completed tasks during ctx compactarchive_after_daysint7 Days before completed tasks are archived scratchpad_encryptbooltrue Encrypt scratchpad with AES-256-GCM allow_outside_cwdboolfalse Allow context directory outside the current working directory event_logboolfalse Enable local hook event logging to .context/state/events.jsonlentry_count_learningsint30 Drift warning when LEARNINGS.md exceeds this entry count (0 = disable) entry_count_decisionsint20 Drift warning when DECISIONS.md exceeds this entry count (0 = disable) convention_line_countint200 Drift warning when CONVENTIONS.md exceeds this line count (0 = disable) injection_token_warnint15000 Warn when auto-injected context exceeds this token count (0 = disable) context_windowint200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warnint0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled). For plans where tokens beyond an included allowance cost extra stale_age_daysint30 Days before ctx drift flags a context file as stale (0 = disable) key_rotation_daysint90 Days before encryption key rotation nudge task_nudge_intervalint5 Edit/Write calls between task completion nudges notify.events[]string (all) Event filter for webhook notifications (empty = all) priority_order[]string (see below) Custom file loading priority for context assembly toolstring (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex). Used by steering sync and hook dispatch steering.dirstring.context/steering Steering files directory steering.default_inclusionstringmanual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools[]string (all) Default tool filter for new steering files (empty = all tools) hooks.dirstring.context/hooks Hook scripts directory hooks.timeoutint10 Per-hook execution timeout in seconds hooks.enabledbooltrue Whether hook execution is enabled provenance_required.session_idbooltrue Require --session-id on ctx add for tasks, decisions, learnings provenance_required.branchbooltrue Require --branch on ctx add for tasks, decisions, learnings provenance_required.commitbooltrue Require --commit on ctx add for tasks, decisions, learnings

    Default priority order (used when priority_order is not set):

    1. CONSTITUTION.md
    2. TASKS.md
    3. CONVENTIONS.md
    4. ARCHITECTURE.md
    5. DECISIONS.md
    6. LEARNINGS.md
    7. GLOSSARY.md
    8. AGENT_PLAYBOOK.md

    See Context Files for the rationale behind this ordering.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#environment-variables","level":2,"title":"Environment Variables","text":"

    Environment variables override .ctxrc values but are overridden by CLI flags.

    Variable Description Equivalent .ctxrc key CTX_DIR Override the context directory path context_dirCTX_TOKEN_BUDGET Override the default token budget token_budget","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples","level":3,"title":"Examples","text":"
    # Use a shared context directory\nCTX_DIR=/shared/team-context ctx status\n\n# Increase token budget for a single run\nCTX_TOKEN_BUDGET=16000 ctx agent\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#cli-global-flags","level":2,"title":"CLI Global Flags","text":"

    CLI flags have the highest priority and override both environment variables and .ctxrc settings. These flags are available on every ctx command.

    Flag Description --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor) --version Show version and exit --help Show command help and exit","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_1","level":3,"title":"Examples","text":"
    # Point to a different context directory:\nctx status --context-dir /path/to/shared/.context\n\n# Allow external context directory (skips boundary check):\nctx status --context-dir /mnt/nas/project-context --allow-outside-cwd\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#priority-order","level":2,"title":"Priority Order","text":"

    When the same setting is configured in multiple layers, the highest-priority layer wins:

    CLI flags  >  Environment variables  >  .ctxrc  >  Built-in defaults\n(highest)                                          (lowest)\n

    Example resolution for context_dir:

    Layer Value Wins? --context-dir/tmp/ctx Yes CTX_DIR/shared/context No .ctxrc.my-context No Default .context No

    The CLI flag /tmp/ctx is used because it has the highest priority.

    If the CLI flag were absent, CTX_DIR=/shared/context would win. If neither the flag nor the env var were set, the .ctxrc value .my-context would be used. With nothing configured, the default .context applies.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_2","level":2,"title":"Examples","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#external-context-directory","level":3,"title":"External .context Directory","text":"

    Store context outside the project tree (useful for monorepos or shared context):

    # .ctxrc\ncontext_dir: /home/team/shared-context\nallow_outside_cwd: true\n
    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-token-budget","level":3,"title":"Custom Token Budget","text":"

    Increase the token budget for projects with large context:

    # .ctxrc\ntoken_budget: 16000\n

    This affects the default budget for ctx agent and ctx load. You can still override per-invocation with ctx agent --budget 4000.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#disabled-scratchpad-encryption","level":3,"title":"Disabled Scratchpad Encryption","text":"

    Turn off encryption for the scratchpad (useful in ephemeral environments where key management is unnecessary):

    # .ctxrc\nscratchpad_encrypt: false\n

    Unencrypted Scratchpads Store Secrets in Plaintext

    Only disable encryption if you understand the security implications.

    The scratchpad may contain sensitive data such as API keys, database URLs, or deployment credentials.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-priority-order","level":3,"title":"Custom Priority Order","text":"

    Reorder context files to prioritize architecture over conventions:

    # .ctxrc\npriority_order:\n  - CONSTITUTION.md\n  - TASKS.md\n  - ARCHITECTURE.md\n  - DECISIONS.md\n  - CONVENTIONS.md\n  - LEARNINGS.md\n  - GLOSSARY.md\n  - AGENT_PLAYBOOK.md\n

    Files not listed in priority_order receive the lowest priority (100). The order affects ctx agent, ctx load, and drift's file-priority calculations.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#billing-token-threshold","level":3,"title":"Billing Token Threshold","text":"

    Get a one-shot warning when your session crosses a token threshold where extra charges begin (e.g., Claude Pro includes 200k tokens; beyond that costs extra):

    # .ctxrc\nbilling_token_warn: 180000   # warn before hitting the 200k paid boundary\n

    The warning fires once per session the first time token usage exceeds the threshold. Set to 0 (or omit) to disable.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#adjusted-drift-thresholds","level":3,"title":"Adjusted Drift Thresholds","text":"

    Raise or lower the entry-count thresholds that trigger drift warnings:

    # .ctxrc\nentry_count_learnings: 50   # warn above 50 learnings (default: 30)\nentry_count_decisions: 10   # warn above 10 decisions (default: 20)\nconvention_line_count: 300  # warn above 300 lines (default: 200)\n

    Set any threshold to 0 to disable that specific check.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

    Get notified when loops complete, hooks fire, or agents reach milestones:

    # Configure the webhook URL (encrypted, safe to commit)\nctx hook notify setup\n\n# Test delivery\nctx hook notify test\n

    Filter which events reach your webhook:

    # .ctxrc\nnotify:\n  events:\n    - loop      # loop completion/max-iteration\n    - nudge     # VERBATIM relay hooks fired\n    # - relay   # all hook output (verbose, for debugging)\n    # - heartbeat  # every-prompt session-alive signal\n

    Notifications are opt-in: No events are sent unless explicitly listed.

    See Webhook Notifications for a step-by-step recipe.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#hook-message-overrides","level":2,"title":"Hook Message Overrides","text":"

    Hook messages control what text hooks emit when they fire. Each message can be overridden per-project by placing a text file at the matching path under .context/:

    .context/hooks/messages/{hook}/{variant}.txt\n

    The override takes priority over the embedded default compiled into the ctx binary. An empty file silences the message while preserving the hook's logic (counting, state tracking, cooldowns).

    Use ctx hook message to discover and manage overrides:

    ctx hook message list                      # see all messages\nctx hook message show qa-reminder gate     # view the current template\nctx hook message edit qa-reminder gate     # copy default for editing\nctx hook message reset qa-reminder gate    # revert to default\n

    See Customizing Hook Messages for detailed examples including Python, JavaScript, and silence configurations.

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#agent-bootstrapping","level":2,"title":"Agent Bootstrapping","text":"

    AI agents need to know the resolved context directory at session start. The ctx system bootstrap command prints the context path, file list, and operating rules in both text and JSON formats:

    ctx system bootstrap          # text output for agents\nctx system bootstrap -q       # just the context directory path\nctx system bootstrap --json   # structured output for automation\n

    The CLAUDE.md template instructs the agent to run this as its first action. Every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: <dir> footer that re-anchors the agent to the correct directory throughout the session.

    This replaces the previous approach of hardcoding .context/ paths in agent instructions.

    See CLI Reference: bootstrap for full details.

    See also: CLI Reference | Context Files | Scratchpad

    ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/context-files/","level":1,"title":"Context Files","text":"","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#context","level":2,"title":".context/","text":"

    Each context file in .context/ serves a specific purpose.

    Files are designed to be human-readable, AI-parseable, and token-efficient.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#file-overview","level":2,"title":"File Overview","text":"

    The core context files live directly under .context/. They are the substrate ctx reads in priority order when assembling the agent context packet:

    File Purpose Priority CONSTITUTION.md Hard rules that must NEVER be violated 1 (highest) TASKS.md Current and planned work 2 CONVENTIONS.md Project patterns and standards 3 ARCHITECTURE.md System overview and components 4 DECISIONS.md Architectural decisions with rationale 5 LEARNINGS.md Lessons learned, gotchas, tips 6 GLOSSARY.md Domain terms and abbreviations 7 AGENT_PLAYBOOK.md Instructions for AI tools 8 (lowest)

    Two subdirectories under .context/ are implementation details that are user-editable but not part of the priority read order:

    • .context/templates/: format templates for ctx add decision and ctx add learning. See templates below.
    • .context/steering/: behavioral rules with YAML frontmatter that get synced into each AI tool's native config. See steering below, and the full Steering files page for the design and workflow.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#outside-context","level":3,"title":"Outside .context/","text":"

    Two other moving parts are often confused with context files but are not under .context/:

    • Skills live in .claude/skills/ (project-local) or are provided by the installed ctx plugin. A typical project doesn't see the plugin's skills at all; they ride with the plugin and are owned by its update cycle. See ctx skill and Skills reference.
    • Hooks: Claude Code PreToolUse/PostToolUse/ UserPromptSubmit entries configured in .claude/settings.json or shipped by a plugin. The ctx plugin registers its own hooks automatically; a typical project does not author hooks by hand, and any local edits to plugin-owned hook files will be overridden on the next plugin update. If you need to customize behavior, edit your own project settings, not the plugin's files. See Hook sequence diagrams.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#read-order-rationale","level":2,"title":"Read Order Rationale","text":"

    The priority order follows a logical progression for AI tools:

    1. CONSTITUTION.md: Inviolable rules first. The AI tool must know what it cannot do before attempting anything.
    2. TASKS.md: Current work items. What the AI tool should focus on.
    3. CONVENTIONS.md: How to write code. Patterns and standards to follow when implementing tasks.
    4. ARCHITECTURE.md: System structure. Understanding of components and boundaries before making changes.
    5. DECISIONS.md: Historical context. Why things are the way they are, to avoid re-debating settled decisions.
    6. LEARNINGS.md: Gotchas and tips. Lessons from past work that inform the current implementation.
    7. GLOSSARY.md: Reference material. Domain terms and abbreviations for lookup as needed.
    8. AGENT_PLAYBOOK.md: Meta instructions last. How to use this context system itself. Loaded last because the agent should understand the content (rules, tasks, patterns) before the operating manual.
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#constitutionmd","level":2,"title":"CONSTITUTION.md","text":"

    Purpose: Define hard invariants: Rules that must NEVER be violated, regardless of the task.

    AI tools read this first and should refuse tasks that violate these rules.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure","level":3,"title":"Structure","text":"
    # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these, the task \nis wrong.\n\n## Security Invariants\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never store customer/user data in context files\n* [ ] Never disable security linters without documented exception\n\n## Quality Invariants\n\n* [ ] All code must pass tests before commit\n* [ ] No `any` types in TypeScript without documented reason\n* [ ] No TODO comments in main branch (*move to `TASKS.md`*)\n\n## Process Invariants\n\n* [ ] All architectural changes require a decision record\n* [ ] Breaking changes require version bump\n* [ ] Generated files are never committed\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines","level":3,"title":"Guidelines","text":"
    • Keep rules minimal and absolute
    • Each rule should be enforceable (can verify compliance)
    • Use checkbox format for clarity
    • Never compromise on these rules
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tasksmd","level":2,"title":"TASKS.md","text":"

    Purpose: Track current work, planned work, and blockers.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_1","level":3,"title":"Structure","text":"

    Tasks are organized by Phase: logical groupings that preserve order and enable replay.

    Tasks stay in their Phase permanently; status is tracked via checkboxes and inline tags.

    # Tasks\n\n## Phase 1: Initial Setup\n\n* [x] Set up project structure\n* [x] Configure linting and formatting\n* [ ] Add CI/CD pipeline `#in-progress`\n\n## Phase 2: Core Features\n\n* [ ] Implement user authentication `#priority:high`\n* [ ] Add API rate limiting `#priority:medium`\n  * Blocked by: Need to finalize auth first\n\n## Backlog\n\n* [ ] Performance optimization `#priority:low`\n* [ ] Add metrics dashboard `#priority:deferred`\n

    Key principles:

    • Tasks never move between sections: mark as [x] or [-] in place
    • Use #in-progress inline tag to indicate current work
    • Phase headers provide structure and replay order
    • Backlog section for unscheduled work
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tags","level":3,"title":"Tags","text":"

    Use inline backtick-wrapped tags for metadata:

    Tag Values Purpose #priorityhigh, medium, low Task urgency #areacore, cli, docs, tests Codebase area #estimate1h, 4h, 1d Time estimate (optional) #in-progress (none) Currently being worked on

    Lifecycle tags (for session correlation):

    Tag Format When to add #addedYYYY-MM-DD-HHMMSS Auto-added by ctx add task#startedYYYY-MM-DD-HHMMSS When beginning work on the task

    These timestamps help correlate tasks with session files and track which session started vs completed work.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-markers","level":3,"title":"Status Markers","text":"Marker Meaning [ ] Pending [x] Completed [-] Skipped (include reason)","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_1","level":3,"title":"Guidelines","text":"
    • Never delete tasks; mark as [x] completed or [-] skipped
    • Never move tasks between sections; use inline tags for status
    • Use ctx task archive periodically to move completed tasks to archive
    • Mark current work with #in-progress inline tag
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#decisionsmd","level":2,"title":"DECISIONS.md","text":"

    Purpose: Record architectural decisions with rationale so they don't get re-debated.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_2","level":3,"title":"Structure","text":"
    # Decisions\n\n## [YYYY-MM-DD] Decision Title\n\n**Status**: Accepted | Superseded | Deprecated\n\n**Context**: What situation prompted this decision?\n\n**Decision**: What was decided?\n\n**Rationale**: Why was this the right choice?\n\n**Consequence**: What are the implications?\n\n**Alternatives Considered**:\n* Alternative A: Why rejected\n* Alternative B: Why rejected\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example","level":3,"title":"Example","text":"
    ## [2025-01-15] Use TypeScript Strict Mode\n\n**Status**: Accepted\n\n**Context**: Starting a new project, need to choose the type-checking level.\n\n**Decision**: Enable TypeScript strict mode with all strict flags.\n\n**Rationale**: Catches more bugs at compile time. Team has experience\nwith strict mode. Upfront cost pays off in reduced runtime errors.\n\n**Consequence**: More verbose type annotations required. Some\nthird-party libraries need type assertions.\n\n**Alternatives Considered**:\n- Basic TypeScript: Rejected because it misses null checks\n- JavaScript with JSDoc: Rejected because tooling support is weaker\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-values","level":3,"title":"Status Values","text":"Status Meaning Accepted Current, active decision Superseded Replaced by newer decision (link to it) Deprecated No longer relevant","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#learningsmd","level":2,"title":"LEARNINGS.md","text":"

    Purpose: Capture lessons learned, gotchas, and tips that shouldn't be forgotten.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_3","level":3,"title":"Structure","text":"
    # Learnings\n\n## Category Name\n\n### Learning Title\n\n**Discovered**: YYYY-MM-DD\n\n**Context**: When/how was this learned?\n\n**Lesson**: What's the takeaway?\n\n**Application**: How should this inform future work?\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example_1","level":3,"title":"Example","text":"
    ## Testing\n\n### Vitest Mocks Must Be Hoisted\n\n**Discovered**: 2025-01-15\n\n**Context**: Tests were failing intermittently when mocking fs module.\n\n**Lesson**: Vitest requires `vi.mock()` calls to be hoisted to the\ntop of the file. Dynamic mocks need `vi.doMock()` instead.\n\n**Application**: Always use `vi.mock()` at file top. Use `vi.doMock()`\nonly when mock needs runtime values.\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#categories","level":3,"title":"Categories","text":"

    Organize learnings by topic:

    • Testing
    • Build & Deploy
    • Performance
    • Security
    • Third-Party Libraries
    • Git and Workflow
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#conventionsmd","level":2,"title":"CONVENTIONS.md","text":"

    Purpose: Document project patterns, naming conventions, and standards.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_4","level":3,"title":"Structure","text":"
    # Conventions\n\n## Naming\n\n* **Files**: kebab-case for all source files\n* **Components**: PascalCase for React components\n* **Functions**: camelCase, verb-first (getUser, parseConfig)\n* **Constants**: SCREAMING_SNAKE_CASE\n\n## Patterns\n\n### Pattern Name\n\n**When to use**: Situation description\n\n**Implementation**:\n// in triple backticks\n// Example code\n\n**Why**: Rationale for this pattern\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_2","level":3,"title":"Guidelines","text":"
    • Include concrete examples
    • Explain the \"why\" not just the \"what\"
    • Keep patterns minimal: Only document what's non-obvious
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#architecturemd","level":2,"title":"ARCHITECTURE.md","text":"

    Purpose: Provide system overview and component relationships.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_5","level":3,"title":"Structure","text":"
    # Architecture\n\n## Overview\n\nBrief description of what the system does and how it's organized.\n\n## Components\n\n### Component Name\n\n**Responsibility**: What this component does\n\n**Dependencies**: What it depends on\n\n**Dependents**: What depends on it\n\n**Key Files**:\n* path/to/file.ts: Description\n\n## Data Flow\n\nDescription or diagram of how data moves through the system.\n\n## Boundaries\n\nWhat's in scope vs out of scope for this codebase.\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_3","level":3,"title":"Guidelines","text":"
    • Keep diagrams simple (Mermaid works well)
    • Focus on boundaries and interfaces
    • Update when major structural changes occur
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#glossarymd","level":2,"title":"GLOSSARY.md","text":"

    Purpose: Define domain terms, abbreviations, and project vocabulary.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_6","level":3,"title":"Structure","text":"
    # Glossary\n\n## Domain Terms\n\n### Term Name\n\n**Definition**: What it means in this project's context\n\n**Not to be confused with**: Similar terms that mean different things\n\n**Example**: How it's used\n\n## Abbreviations\n\n| Abbrev | Expansion                     | Context                |\n|--------|-------------------------------|------------------------|\n| ADR    | Architectural Decision Record | Decision documentation |\n| SUT    | System Under Test             | Testing                |\n
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_4","level":3,"title":"Guidelines","text":"
    • Define project-specific meanings
    • Clarify potentially ambiguous terms
    • Include abbreviations used in code or docs
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#agent_playbookmd","level":2,"title":"AGENT_PLAYBOOK.md","text":"

    Purpose: Explicit instructions for how AI tools should read, apply, and update context.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#key-sections","level":3,"title":"Key Sections","text":"

    Read Order: Priority order for loading context files

    When to Update: Events that trigger context updates

    How to Avoid Hallucinating Memory: Critical rules:

    1. Never assume: If not in files, you don't know it
    2. Never invent history: Don't claim \"we discussed\" without evidence
    3. Verify before referencing: Search files before citing
    4. When uncertain, say so
    5. Trust files over intuition

    Context Update Commands: Format for automated updates via ctx watch:

    <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"complete\">user auth</context-update>\n<context-update type=\"learning\"\n  context=\"Debugging hooks\"\n  lesson=\"Hooks receive JSON via stdin\"\n  application=\"Parse JSON stdin with the host language\"\n>Hook Input Format</context-update>\n<context-update type=\"decision\"\n  context=\"Need a caching layer\"\n  rationale=\"Redis is fast and team has experience\"\n  consequence=\"Must provision Redis infrastructure\"\n>Use Redis for caching</context-update>\n

    See Integrations for full documentation.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#templates","level":2,"title":"templates/","text":"

    Location: .context/templates/. Status: implementation detail, user-editable.

    Purpose: Format templates for ctx add decision and ctx add learning. These control the structure of new entries appended to DECISIONS.md and LEARNINGS.md.

    ctx init deploys two starter templates:

    • decision.md: sections Context, Rationale, Consequence
    • learning.md: sections Context, Lesson, Application
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing","level":3,"title":"Customizing","text":"

    Edit the templates directly. Changes take effect immediately on the next ctx add command. For example, to add a \"References\" section to all new decisions, edit .context/templates/decision.md.

    Templates are committed to git, so customizations are shared with the team.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#steering","level":2,"title":"steering/","text":"

    Location: .context/steering/. Status: implementation detail, user-editable.

    Purpose: Behavioral rules with YAML frontmatter that tell an AI assistant how to behave when a specific kind of prompt arrives. Unlike the core context files (which describe what the project is), steering files describe what to do and ride alongside the prompt through the AI tool's native rule pipeline (Claude Code, Cursor, Kiro, Cline). ctx matches steering files to prompts and syncs them out to each tool's config.

    ctx init scaffolds four foundation files:

    • product.md: who this project serves and why
    • tech.md: the technology stack and its constraints
    • structure.md: how the code is organized
    • workflow.md: how work moves through the system

    Each file carries YAML frontmatter describing when it applies (always, matching prompts, or manually referenced) and what tool scope it covers. The foundation files use inclusion: always by default so every session picks them up.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing_1","level":3,"title":"Customizing","text":"

    Edit the files directly. Add your own steering files with ctx steering add, preview the match set with ctx steering preview, and run ctx steering sync to push them into each AI tool's config after changes. Steering files are committed to git, so they're shared with the team.

    For the design rationale, the full inclusion/priority model, and the end-to-end sync workflow, see the dedicated Steering files page.

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#parsing-rules","level":2,"title":"Parsing Rules","text":"

    All context files follow these conventions:

    1. Headers define structure: # for title, ## for sections, ### for items
    2. Bold keys for fields: **Key**: followed by value
    3. Code blocks are literal: Never parse code block content as structure
    4. Lists are ordered: Items appear in priority/chronological order
    5. Tags are inline: Backtick-wrapped tags like #priority:high
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#further-reading","level":2,"title":"Further Reading","text":"
    • Refactoring with Intent: how persistent context prevents drift during refactoring sessions
    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#token-efficiency","level":2,"title":"Token Efficiency","text":"

    Keep context files concise:

    • Use abbreviations in tags, not prose;
    • Omit obvious words (\"The,\" \"This\");
    • Prefer bullet points over paragraphs;
    • Keep examples minimal but illustrative;
    • Archive old completed items periodically.

    Next Up: Prompting Guide →: effective prompts for AI sessions with ctx

    ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/contributing/","level":1,"title":"Contributing","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#development-setup","level":2,"title":"Development Setup","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#prerequisites","level":3,"title":"Prerequisites","text":"
    • Go (version defined in go.mod)
    • Claude Code
    • Git
    • GNU Make
    • Zensical
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#1-fork-or-clone-the-repository","level":3,"title":"1. Fork (or Clone) the Repository","text":"
    # Fork on GitHub, then:\ngit clone https://github.com/<you>/ctx.git\ncd ctx\n\n# Or, if you have push access:\ngit clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#2-build-and-install-the-binary","level":3,"title":"2. Build and Install the Binary","text":"
    make build\nsudo make install\n

    This compiles the ctx binary and places it in /usr/local/bin/.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#3-install-the-plugin-from-your-local-clone","level":3,"title":"3. Install the Plugin from Your Local Clone","text":"

    The repository ships a Claude Code plugin under internal/assets/claude/. Point Claude Code at your local copy so that skills and hooks reflect your working tree: no reinstall needed after edits:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace
    4. Enter the absolute path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: it points Claude Code to the actual plugin in internal/assets/claude);
    5. Back in /plugin, select Install and choose ctx.

    Claude Code Caches Plugin Files

    Even though the marketplace points at a directory on disk, Claude Code caches skills and hooks. After editing files under internal/assets/claude/, clear the cache and restart:

    make plugin-reload   # then restart Claude Code\n

    See Skill or Hook Changes for details.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#4-verify","level":3,"title":"4. Verify","text":"
    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

    You should see the ctx plugin listed, sourced from your local path.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#project-layout","level":2,"title":"Project Layout","text":"
    ctx/\n├── cmd/ctx/            # CLI entry point\n├── internal/\n│   ├── assets/claude/  # ← Claude Code plugin (skills, hooks)\n│   ├── bootstrap/      # Project initialization templates\n│   ├── claude/         # Claude Code integration helpers\n│   ├── cli/            # Command implementations\n│   ├── config/         # Configuration loading\n│   ├── context/        # Core context logic\n│   ├── crypto/         # Scratchpad encryption\n│   ├── drift/          # Drift detection\n│   ├── index/          # Context file indexing\n│   ├── journal/        # Journal site generation\n│   ├── memory/         # Memory bridge (discover, mirror, import, publish)\n│   ├── notify/         # Webhook notifications\n│   ├── rc/             # .ctxrc parsing\n│   ├── journal/        # Session history, parsers, and state\n│   ├── sysinfo/        # System resource monitoring\n│   ├── task/           # Task management\n│   └── validation/     # Input validation\n├── .claude/\n│   └── skills/         # Dev-only skills (not distributed)\n├── assets/             # Static assets (banners, logos)\n├── docs/               # Documentation site source\n├── editors/            # Editor extensions (VS Code)\n├── examples/           # Example configurations\n├── hack/               # Build scripts\n├── specs/              # Feature specifications\n└── .context/           # ctx's own context (dogfooding)\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skills-two-directories-one-rule","level":3,"title":"Skills: Two Directories, One Rule","text":"Directory What lives here Distributed to users? internal/assets/claude/skills/ The 39 ctx-* skills that ship with the plugin Yes .claude/skills/ Dev-only skills (release, QA, backup, etc.) No

    internal/assets/claude/skills/ is the single source of truth for user-facing skills. If you are adding or modifying a ctx-* skill, edit it there.

    .claude/skills/ holds skills that only make sense inside this repository (release automation, QA checks, backup scripts). These are never distributed to users.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#dev-only-skills-reference","level":4,"title":"Dev-Only Skills Reference","text":"Skill When to use /_ctx-absorb Merge deltas from a parallel worktree or separate checkout /_ctx-audit Detect code-level drift after YOLO sprints or before releases /_ctx-backup Backup context and Claude data to SMB share /_ctx-qa Run QA checks before committing /_ctx-release Run the full release process /_ctx-release-notes Generate release notes for dist/RELEASE_NOTES.md/_ctx-alignment-audit Audit doc claims against agent instructions /_ctx-update-docs Check docs/code consistency after changes /_ctx-command-audit Audit CLI surface after renames, moves, or deletions

    Six skills previously in this list have been promoted to bundled plugin skills and are now available to all ctx users: /ctx-brainstorm, /ctx-link-check, /ctx-permission-sanitize, /ctx-skill-create, /ctx-spec.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#how-to-add-things","level":2,"title":"How to Add Things","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-cli-command","level":3,"title":"Adding a New CLI Command","text":"
    1. Create a package under internal/cli/<name>/ with doc.go, cmd.go, and run.go;
    2. Implement Cmd() *cobra.Command as the entry point;
    3. Add Use* and DescKey* constants in internal/config/embed/cmd/<name>.go;
    4. Add command descriptions in internal/assets/commands/commands.yaml;
    5. Add examples in internal/assets/commands/examples.yaml;
    6. Add flag descriptions in internal/assets/commands/flags.yaml;
    7. Register the command in internal/bootstrap/group.go (add import + entry in the appropriate group function);
    8. Create an output package at internal/write/<name>/ for all user-facing output (see Package Taxonomy);
    9. Create error constructors at internal/err/<name>/ for domain-specific errors;
    10. Add tests in the same package (<name>_test.go);
    11. Add a doc page at docs/cli/<name>.md and update docs/cli/index.md;
    12. Add the page to zensical.toml nav.

    Pattern to follow: internal/cli/pad/pad.go (parent with subcommands) or internal/cli/drift/ (single command).

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#package-taxonomy","level":3,"title":"Package Taxonomy","text":"

    ctx separates concerns into a strict package taxonomy. Knowing where things go prevents code review friction and keeps the AST lint tests happy.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#output-internalwrite","level":4,"title":"Output: internal/write/","text":"

    Every CLI command's user-facing output lives in its own sub-package under internal/write/<domain>/. Output functions accept *cobra.Command and call cmd.Println(...), never fmt.Print* directly. All text strings are loaded from YAML via desc.Text(text.DescKey*), never inline.

    internal/write/add/add.go       # output for ctx add\ninternal/write/stat/stat.go     # output for ctx usage\ninternal/write/resource/        # output for ctx sysinfo\n

    Exception: write/rc/ writes to os.Stderr because rc loads before cobra is initialized.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#errors-internalerr","level":4,"title":"Errors: internal/err/","text":"

    Domain-specific error constructors live under internal/err/<domain>/. Each package mirrors the write structure. Functions return error (never custom error types) and load messages from YAML via desc.Text(text.DescKey*).

    internal/err/add/add.go         # errors for ctx add\ninternal/err/config/config.go   # errors for configuration\ninternal/err/cli/cli.go         # errors for CLI argument validation\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#config-constants-internalconfig","level":4,"title":"Config Constants: internal/config/","text":"

    Pure-constant leaf packages with zero internal dependencies (stdlib only). Over 60 sub-packages, organized by domain. See internal/config/README.md for the full decision tree.

    What you're adding Where it goes File names, extensions, paths config/file/, config/dir/ Regex patterns config/regex/ CLI flag names (--flag-name) config/flag/flag.go Flag description YAML keys config/embed/flag/<cmd>.go Command Use/DescKey strings config/embed/cmd/<cmd>.go User-facing text YAML keys config/embed/text/<domain>.go Time durations, thresholds config/<domain>/","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#the-assets-pipeline","level":4,"title":"The Assets Pipeline","text":"

    User-facing text flows through a three-level chain:

    1. Go constant (config/embed/text/) defines a string key: DescKeyWriteAddedTo = \"write.added-to\"
    2. Call site resolves it: desc.Text(text.DescKeyWriteAddedTo)
    3. YAML (internal/assets/commands/text/write.yaml) holds the actual text: write.added-to: { short: \"Added to %s\" }

    The same pattern applies to command descriptions (commands.yaml), flag descriptions (flags.yaml), and examples (examples.yaml). The TestDescKeyYAMLLinkage test verifies every constant resolves to a non-empty YAML value.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-session-parser","level":3,"title":"Adding a New Session Parser","text":"

    The journal system uses a SessionParser interface. To add support for a new AI tool (e.g. Aider, Cursor):

    1. Create internal/journal/parser/<tool>.go;
    2. Implement parsing logic that returns []*Session;
    3. Register the parser in FindSessions() / FindSessionsForCWD();
    4. Use config.Tool* constants for the tool identifier;
    5. Add test fixtures and parser tests.

    Pattern to follow: the Claude Code JSONL parser in internal/journal/parser/.

    Multilingual Session Headers

    The Markdown parser recognizes session header prefixes configured via session_prefixes in .ctxrc (default: Session:). To support a new language, users add a prefix to their .ctxrc - no code change needed. New parser implementations can use rc.SessionPrefixes() if they also need prefix-based header detection.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-bundled-skill","level":3,"title":"Adding a Bundled Skill","text":"
    1. Create internal/assets/claude/skills/<skill-name>/SKILL.md;
    2. Follow the skill format: trigger, negative triggers, steps, quality gate;
    3. Run make plugin-reload and restart Claude Code to test;
    4. Add a Skill entry to .claude-plugin/plugin.json if user-invocable;
    5. Document in docs/reference/skills.md.

    Pattern to follow: any skill in internal/assets/claude/skills/ctx-status/.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#test-expectations","level":3,"title":"Test Expectations","text":"
    • Unit tests: colocated with source (foo.gofoo_test.go);
    • Test helpers: use t.Helper() so failures point to callers;
    • HOME isolation: use t.TempDir() + t.Setenv(\"HOME\", ...) for tests that touch ~/.claude/ or ~/.ctx/;
    • rc.Reset(): call after os.Chdir in tests that change working directory (rc caches on first access);
    • No network: all tests run offline, use fixtures.

    Run make test before submitting. Target: no failures, no skips.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#day-to-day-workflow","level":2,"title":"Day-to-Day Workflow","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#go-code-changes","level":3,"title":"Go Code Changes","text":"

    After modifying Go source files, rebuild and reinstall:

    make build && sudo make install\n

    The ctx binary is statically compiled. There is no hot reload. You must rebuild for Go changes to take effect.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skill-or-hook-changes","level":3,"title":"Skill or Hook Changes","text":"

    Edit files under internal/assets/claude/skills/ or internal/assets/claude/hooks/.

    Claude Code caches plugin files, so edits aren't picked up automatically.

    Clear the cache and restart:

    make plugin-reload   # nukes ~/.claude/plugins/cache/activememory-ctx/\n# then restart Claude Code\n

    The plugin will be re-installed from your local marketplace on startup. No version bump is needed during development.

    Version Bumps Are for Releases, Not Iteration

    Only bump VERSION, plugin.json, and marketplace.json when cutting a release. During development, make plugin-reload is all you need.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

    The repo ships two .ctxrc source profiles. The working copy (.ctxrc) is gitignored and swapped between them:

    File Purpose .ctxrc.base Golden baseline: all defaults, no logging .ctxrc.dev Dev profile: notify events enabled, verbose logging .ctxrc Working copy (gitignored: copied from one of the above)

    Use ctx commands to switch:

    ctx config switch dev      # switch to dev profile\nctx config switch base     # switch to base profile\nctx config status          # show which profile is active\n

    After cloning, run ctx config switch dev to get started with full logging.

    See Configuration for the full .ctxrc option reference.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#backups","level":3,"title":"Backups","text":"

    Back up project context and global Claude Code data with:

    ctx backup                    # both project + global (default)\nctx backup --scope project    # .context/, .claude/, ideas/ only\nctx backup --scope global     # ~/.claude/ only\n

    Archives are saved to /tmp/. When CTX_BACKUP_SMB_URL is configured, they are also copied to an SMB share. See CLI Reference: backup for details.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-tests","level":3,"title":"Running Tests","text":"
    make test   # fast: all tests\nmake audit  # full: fmt + vet + lint + drift + docs + test\nmake smoke  # build + run basic commands end-to-end\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-the-docs-site-locally","level":3,"title":"Running the Docs Site Locally","text":"
    make site-setup  # one-time: install zensical via pipx\nmake site-serve  # serve at localhost\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#submitting-changes","level":2,"title":"Submitting Changes","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#before-you-start","level":3,"title":"Before You Start","text":"
    1. Check existing issues to avoid duplicating effort;
    2. For large changes, open an issue first to discuss the approach;
    3. Read the specs in specs/ for design context.
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#pull-request-process","level":3,"title":"Pull Request Process","text":"

    Respect the maintainers' time and energy: Keep your pull requests isolated and strive to minimze code changes.

    If you Pull Request solves more than one distinct issues, it's better to create separate pull requests instead of sending them in one large bundle.

    1. Create a feature branch: git checkout -b feature/my-feature;
    2. Make your changes;
    3. Run make audit to catch issues early;
    4. Commit with a clear message;
    5. Push and open a pull request.

    Audit Your Code Before Submitting

    Run make audit before submitting:

    make audit covers formatting, vetting, linting, drift checks, doc consistency, and tests in one pass.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#commit-messages","level":3,"title":"Commit Messages","text":"

    Following conventional commits is recommended but not required:

    Types: feat, fix, docs, test, refactor, chore

    Examples:

    • feat(cli): add ctx export command
    • fix(drift): handle missing files gracefully
    • docs: update installation instructions
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-style","level":3,"title":"Code Style","text":"
    • Follow Go conventions (gofmt, go vet);
    • Keep functions focused and small;
    • Add tests for new functionality;
    • Handle errors explicitly; use descriptive names (readErr, writeErr) not repeated err;
    • No magic strings: all repeated literals go in internal/config/;
    • Output goes through internal/write/ packages, not fmt.Print*;
    • Errors go through internal/err/ constructors, not inline fmt.Errorf;
    • See Package Taxonomy and .context/CONVENTIONS.md for the full reference.
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

    A clear context requires respectful collaboration.

    ctx follows the Contributor Covenant.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#boring-legal-stuff","level":2,"title":"Boring Legal Stuff","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#developer-certificate-of-origin-dco","level":3,"title":"Developer Certificate of Origin (DCO)","text":"

    By contributing, you agree to the Developer Certificate of Origin.

    All commits must be signed off:

    git commit -s -m \"feat: add new feature\"\n
    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#license","level":3,"title":"License","text":"

    Contributions are licensed under the Apache 2.0 License.

    ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/faq/","level":1,"title":"FAQ","text":"","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-markdown","level":2,"title":"Why Markdown?","text":"

    Markdown is human-readable, version-controllable, and tool-agnostic. Every AI model can parse it natively. Every developer can read it in a terminal, a browser, or a code review. There's no schema to learn, no binary format to decode, no vendor lock-in. You can inspect your context with cat, diff it with git diff, and review it in a PR.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-ctx-work-offline","level":2,"title":"Does ctx Work Offline?","text":"

    Yes. ctx is completely local. It reads and writes files on disk, generates context packets from local state, and requires no network access. The only feature that touches the network is the optional webhook notifications hook, which you have to explicitly configure.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-gets-committed-to-git","level":2,"title":"What Gets Committed to Git?","text":"

    The .context/ directory: yes, commit it. That's the whole point. Team members and AI agents read the same context files.

    What not to commit:

    • .ctx.key: your encryption key. Stored at ~/.ctx/.ctx.key, never in the repo. ctx init handles this automatically.
    • journal/ and logs/: generated data, potentially large. ctx init adds these to .gitignore.
    • scratchpad.enc: your choice. It's encrypted, so it's safe to commit if you want shared scratchpad state. See Scratchpad for details.
    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#how-big-should-my-token-budget-be","level":2,"title":"How Big Should My Token Budget Be?","text":"

    The default is 8000 tokens, which works well for most projects. Configure it via .ctxrc or the CTX_TOKEN_BUDGET environment variable:

    # In .ctxrc\ntoken_budget = 12000\n\n# Or as an environment variable\nexport CTX_TOKEN_BUDGET=12000\n\n# Or per-invocation\nctx agent --budget 4000\n

    Higher budgets include more context but cost more tokens per request. Lower budgets force sharper prioritization: ctx drops lower-priority content first, so CONSTITUTION and TASKS always make the cut.

    See Configuration for all available settings.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-not-a-database","level":2,"title":"Why Not a Database?","text":"

    Files are inspectable, diffable, and reviewable in pull requests. You can grep them, cat them, pipe them through jq or awk. They work with every version control system and every text editor.

    A database would add a dependency, require migrations, and make context opaque. The design bet is that context should be as visible and portable as the code it describes.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-it-work-with-tools-other-than-claude-code","level":2,"title":"Does It Work with Tools Other than Claude Code?","text":"

    Yes. ctx agent outputs a context packet that any AI tool can consume: paste it into ChatGPT, Cursor, Copilot, Aider, or anything else that accepts text input.

    Claude Code gets first-class integration via the ctx plugin (hooks, skills, automatic context loading). VS Code Copilot Chat has a dedicated ctx extension. Other tools integrate via generated instruction files or manual pasting.

    See Integrations for tool-specific setup, including the multi-tool recipe.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#can-i-use-ctx-on-an-existing-project","level":2,"title":"Can I Use ctx on an Existing Project?","text":"

    Yes. Run ctx init in any repo and it creates .context/ with template files. Start recording decisions, tasks, and conventions as you work. Context grows naturally; you don't need to backfill everything on day one.

    See Getting Started for the full setup flow, or Joining a ctx Project if someone else already initialized it.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-happens-when-context-files-get-too-big","level":2,"title":"What Happens When Context Files Get Too Big?","text":"

    Token budgeting handles this automatically. ctx agent prioritizes content by file priority (CONSTITUTION first, GLOSSARY last) and trims lower-priority entries when the budget is tight.

    For manual maintenance, ctx compact archives completed tasks and old entries, keeping active context lean. You can also run ctx task archive to move completed tasks out of TASKS.md.

    The goal is to keep context files focused on current state. Historical entries belong in git history or the archive.

    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#is-context-meant-to-be-shared","level":2,"title":"Is .context/ Meant to Be Shared?","text":"

    Yes. Commit it to your repo. Every team member and every AI agent reads the same files. That's the mechanism for shared memory: decisions made in one session are visible in the next, regardless of who (or what) starts it.

    The only per-user state is the encryption key (~/.ctx/.ctx.key) and the optional scratchpad. Everything else is team-shared by design.

    Related:

    • Getting Started - installation and first setup
    • Configuration - .ctxrc, environment variables, and defaults
    • Context Files - what each file does and how to use it
    ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/first-session/","level":1,"title":"Your First Session","text":"

    Here's what a complete first session looks like, from initialization to the moment your AI cites your project context back to you.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-1-initialize-your-project","level":2,"title":"Step 1: Initialize Your Project","text":"

    Run ctx init in your project root:

    cd your-project\nctx init\n

    Sample output:

    Context initialized in .context/\n\n  ✓ CONSTITUTION.md\n  ✓ TASKS.md\n  ✓ DECISIONS.md\n  ✓ LEARNINGS.md\n  ✓ CONVENTIONS.md\n  ✓ ARCHITECTURE.md\n  ✓ GLOSSARY.md\n  ✓ AGENT_PLAYBOOK.md\n\nSetting up encryption key...\n  ✓ ~/.ctx/.ctx.key\n\nClaude Code plugin (hooks + skills):\n  Install: claude /plugin marketplace add ActiveMemory/ctx\n  Then:    claude /plugin install ctx@activememory-ctx\n\nNext steps:\n  1. Edit .context/TASKS.md to add your current tasks\n  2. Run 'ctx status' to see context summary\n  3. Run 'ctx agent' to get AI-ready context packet\n

    This created your .context/ directory with template files.

    For Claude Code, install the ctx plugin to get automatic hooks and skills.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-2-populate-your-context","level":2,"title":"Step 2: Populate Your Context","text":"

    Add a task and a decision: These are the entries your AI will remember:

    ctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to TASKS.md\n\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to DECISIONS.md\n

    These entries are what the AI will recall in future sessions. You don't need to populate everything now: Context grows naturally as you work.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-3-check-your-context","level":2,"title":"Step 3: Check Your Context","text":"
    ctx status\n

    Sample output:

    Context Status\n====================\n\nContext Directory: .context/\nTotal Files: 8\nToken Estimate: 1,247 tokens\n\nFiles:\n  ✓ CONSTITUTION.md (loaded)\n  ✓ TASKS.md (1 items)\n  ✓ DECISIONS.md (1 items)\n  ○ LEARNINGS.md (empty)\n  ✓ CONVENTIONS.md (loaded)\n  ✓ ARCHITECTURE.md (loaded)\n  ✓ GLOSSARY.md (loaded)\n  ✓ AGENT_PLAYBOOK.md (loaded)\n\nRecent Activity:\n  - TASKS.md modified 2 minutes ago\n  - DECISIONS.md modified 1 minute ago\n

    Notice the token estimate: This is how much context your AI will load.

    The next to LEARNINGS.md means it's still empty; it will fill in as you capture lessons during development.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-4-start-an-ai-session","level":2,"title":"Step 4: Start an AI Session","text":"

    With Claude Code (and the ctx plugin), start every session with:

    /ctx-remember\n

    This loads your context and presents a structured readback so you can confirm the agent knows what is going on. Context also loads automatically via hooks, but the explicit ceremony gives you a readback to verify.

    Steering Files Fire Automatically

    If you edited the four foundation files scaffolded by ctx init (.context/steering/product.md, tech.md, structure.md, workflow.md), their inclusion: always rules are prepended to every tool call via the plugin's PreToolUse hook, with no /ctx-remember needed, no MCP call. Edit a file, save, and the next tool call in Claude Code picks it up. See Steering files for details on the inclusion modes.

    Using VS Code?

    With VS Code Copilot Chat (and the ctx extension), type @ctx /agent in chat to load your context packet, or @ctx /status to check your project context. Run ctx setup copilot --write once to generate .github/copilot-instructions.md for automatic context loading.

    If you are not using Claude Code, generate a context packet for your AI tool:

    ctx agent --budget 8000\n

    Sample output:

    # Context Packet\nGenerated: 2026-02-14T15:30:45Z | Budget: 8000 tokens | Used: ~2450\n\n## Read These Files (in order)\n1. .context/CONSTITUTION.md\n2. .context/TASKS.md\n3. .context/CONVENTIONS.md\n...\n\n## Current Tasks\n- [ ] Implement user authentication\n- [ ] Add rate limiting to API endpoints\n\n## Key Conventions\n- Use gofmt for formatting\n- Path construction uses filepath.Join\n\n## Recent Decisions\n## [2026-02-14-120000] Use PostgreSQL for the primary database\n\n**Context**: Evaluated PostgreSQL, MySQL, and SQLite...\n**Rationale**: PostgreSQL offers better JSON support...\n\n## Key Learnings\n## [2026-02-14-100000] Connection pool sizing matters\n\n**Context**: Hit connection limits under load...\n**Lesson**: Default pool size of 10 is too low for concurrent requests...\n\n## Also Noted\n- Use JWT for session management\n- Always validate input at API boundary\n

    Paste this output into your AI tool's system prompt or conversation start.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-5-verify-it-works","level":2,"title":"Step 5: Verify It Works","text":"

    Ask your AI: \"What are our current tasks?\"

    A working setup produces a response like:

    Based on the project context, you have one active task:\n\n- **Implement user authentication** (pending)\n\nThere's also a recent architectural decision to **use PostgreSQL for\nthe primary database**, chosen for its ACID compliance and JSON support.\n\nWant me to start on the authentication task?\n

    That's the success moment:

    The AI is citing your exact context entries from Step 2, not hallucinating or asking you to re-explain.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-gets-created","level":2,"title":"What Gets Created","text":"
    .context/\n├── CONSTITUTION.md     # Hard rules: NEVER violate these\n├── TASKS.md            # Current and planned work\n├── CONVENTIONS.md      # Project patterns and standards\n├── ARCHITECTURE.md     # System overview\n├── DECISIONS.md        # Architectural decisions with rationale\n├── LEARNINGS.md        # Lessons learned, gotchas, tips\n├── GLOSSARY.md         # Domain terms and abbreviations\n└── AGENT_PLAYBOOK.md   # How AI tools should use this\n

    Claude Code integration (hooks + skills) is provided by the ctx plugin: See Integrations/Claude Code.

    VS Code Copilot Chat integration is provided by the ctx extension: See Integrations/VS Code.

    See Context Files for detailed documentation of each file.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-to-gitignore","level":2,"title":"What to .gitignore","text":"

    Rule of Thumb

    • If it's knowledge (decisions, tasks, learnings, conventions), commit it.
    • If it's generated output, raw session data, or a secret, .gitignore it.

    Commit your .context/ knowledge files: that's the whole point.

    You should .gitignore the generated and sensitive paths:

    # Journal data (large, potentially sensitive)\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Hook logs (machine-specific)\n.context/logs/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

    ctx init Patches Your .Gitignore for You

    ctx init automatically adds these entries to your .gitignore.

    Review the additions with cat .gitignore after init.

    See also:

    • Security Considerations
    • Scratchpad Encryption
    • Session Journal

    Next Up: Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history.

    ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/getting-started/","level":1,"title":"Getting Started","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"

    ctx does not require git, but using version control with your .context/ directory is strongly recommended:

    AI sessions occasionally modify or overwrite context files inadvertently. With git, the AI can check history and restore lost content: Without it, the data is gone.

    Also, several ctx features (journal changelog, blog generation) also use git history directly.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#installation","level":2,"title":"Installation","text":"

    Every setup starts with the ctx binary: the CLI tool itself.

    If you use Claude Code, you also install the ctx plugin, which adds hooks (context autoloading, persistence nudges) and 25+ /ctx-* skills. For other AI tools, ctx integrates via generated instruction files or manual context pasting: see Integrations for tool-specific setup.

    Pick one of the options below to install the binary. Claude Code users should also follow the plugin steps included in each option.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-1-build-from-source-recommended","level":3,"title":"Option 1: Build from Source (Recommended)","text":"

    Requires Go (version defined in go.mod) and Claude Code.

    git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\nmake build\nsudo make install\n

    Install the Claude Code plugin from your local clone:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace
    4. Enter the path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: It points Claude Code to the actual plugin in internal/assets/claude)
    5. Back in /plugin, select Install and choose ctx

    This points Claude Code at the plugin source on disk. Changes you make to hooks or skills take effect immediately: No reinstall is needed.

    Local Installs Need Manual Enablement

    Unlike marketplace installs, local plugin installs are not auto-enabled globally. The plugin will only work in projects that explicitly enable it. Run ctx init in each project (it auto-enables the plugin), or add the entry to ~/.claude/settings.json manually:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Verify:

    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

    Use the Source, Luke

    Building from source gives you the latest features and bug fixes.

    Since ctx is predominantly a developer tool, this is the recommended approach:

    You get the freshest code, can inspect what you are installing, and the plugin stays in sync with the binary.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-2-binary-download-marketplace","level":3,"title":"Option 2: Binary Download + Marketplace","text":"

    Pre-built binaries are available from the releases page.

    Linux (x86_64)Linux (ARM64)macOS (Apple Silicon)macOS (Intel)Windows
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64\nchmod +x ctx-0.8.1-linux-amd64\nsudo mv ctx-0.8.1-linux-amd64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-arm64\nchmod +x ctx-0.8.1-linux-arm64\nsudo mv ctx-0.8.1-linux-arm64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-arm64\nchmod +x ctx-0.8.1-darwin-arm64\nsudo mv ctx-0.8.1-darwin-arm64 /usr/local/bin/ctx\n
    curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-amd64\nchmod +x ctx-0.8.1-darwin-amd64\nsudo mv ctx-0.8.1-darwin-amd64 /usr/local/bin/ctx\n

    Download ctx-0.8.1-windows-amd64.exe from the releases page and add it to your PATH.

    Claude Code users: install the plugin from the marketplace:

    1. Launch claude;
    2. Type /plugin and press Enter;
    3. Select Marketplaces → Add Marketplace;
    4. Enter ActiveMemory/ctx;
    5. Back in /plugin, select Install and choose ctx.

    Other tool users: see Integrations for tool-specific setup (Cursor, Copilot, Aider, Windsurf, etc.).

    Verify the Plugin Is Enabled

    After installing, confirm the plugin is enabled globally. Check ~/.claude/settings.json for an enabledPlugins entry. If missing, run ctx init in your project (it auto-enables the plugin), or add it manually:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Verify:

    ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed (Claude Code only)\n
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#verifying-checksums","level":4,"title":"Verifying Checksums","text":"

    Each binary has a corresponding .sha256 checksum file. To verify your download:

    # Download the checksum file\ncurl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64.sha256\n\n# Verify the binary\nsha256sum -c ctx-0.8.1-linux-amd64.sha256\n

    On macOS, use shasum -a 256 -c instead of sha256sum -c.

    Plugin Details

    After installation (either option) you get:

    • Context autoloading: ctx agent runs on every tool use (with cooldown)
    • Persistence nudges: reminders to capture learnings and decisions
    • Post-commit hooks: nudge context capture after git commit
    • Context size monitoring: alerts as sessions grow large
    • Project skills: /ctx-status, /ctx-task-add, /ctx-history, and more

    See Integrations for the full hook and skill reference.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#quick-start","level":2,"title":"Quick Start","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#1-initialize-context","level":3,"title":"1. Initialize Context","text":"
    cd your-project\nctx init\n

    This creates a .context/ directory with template files and an encryption key at ~/.ctx/ for the encrypted scratchpad. For Claude Code, install the ctx plugin for automatic hooks and skills.

    ctx init also scaffolds four foundation steering files in .context/steering/; these are behavioral-rule templates that tell your AI how to act on your project:

    File What it captures product.md Product context, goals, and target users tech.md Technology stack, constraints, key dependencies structure.md Project structure and directory conventions workflow.md Development workflow and process rules

    Each file starts with a self-documenting HTML comment explaining the three inclusion modes (always / auto / manual), priority, and tool scoping. The defaults are set to inclusion: always and priority: 10, so they fire on every AI tool call until you edit them.

    You should open each of these files and replace the placeholder content with your project's actual rules. Running ctx init again won't clobber your edits; existing files are left alone. To opt out entirely, use ctx init --no-steering-init.

    See Writing Steering Files for the full walkthrough, or ctx steering for the command reference.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#2-check-status","level":3,"title":"2. Check Status","text":"
    ctx status\n

    Shows context summary: files present, token estimate, and recent activity.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3-start-using-with-ai","level":3,"title":"3. Start Using with AI","text":"

    With Claude Code (and the ctx plugin installed), context loads automatically via hooks.

    With VS Code Copilot Chat, install the ctx extension and use @ctx /status, @ctx /agent, and other slash commands directly in chat. Run ctx setup copilot --write to generate .github/copilot-instructions.md for automatic context loading.

    For other tools, paste the output of:

    ctx agent --budget 8000\n
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3b-set-up-for-your-ai-tool","level":3,"title":"3B. Set Up for Your AI Tool","text":"

    If you use an MCP-compatible tool, generate the integration config with ctx setup:

    KiroCursorCline
    ctx setup kiro --write\n# Creates .kiro/settings/mcp.json and syncs steering files\n
    ctx setup cursor --write\n# Creates .cursor/mcp.json and syncs steering files\n
    ctx setup cline --write\n# Creates .vscode/mcp.json and syncs steering files\n

    This registers the ctx MCP server and syncs any steering files into the tool's native format. Re-run after adding or changing steering files.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#4-verify-it-works","level":3,"title":"4. Verify It Works","text":"

    Ask your AI: \"Do you remember?\"

    It should cite specific context: current tasks, recent decisions, or previous session topics.

    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#5-set-up-companion-tools-highly-recommended","level":3,"title":"5. Set Up Companion Tools (Highly Recommended)","text":"

    ctx works on its own, but two companion MCP servers unlock significantly better agent behavior. The investment is small and the benefits compound over sessions:

    • Gemini Search grounded web search with citations. Skills like /ctx-code-review and /ctx-explain use it for up-to-date documentation lookups instead of relying on training data.
    • GitNexus: code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Skills like /ctx-refactor and /ctx-code-review use it for impact analysis and dependency awareness.

    # Index your project for GitNexus (run once, then after major changes)\nnpx gitnexus analyze\n

    Both are optional MCP servers: if they are not connected, skills degrade gracefully to built-in capabilities. See Companion Tools for setup details and verification.

    Next Up:

    • Your First Session →: a step-by-step walkthrough from ctx init to verified recall
    • Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history
    ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/hub/","level":1,"title":"Hub","text":"","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#sharing-is-caring","level":2,"title":"Sharing Is Caring","text":"

    ctx projects are normally independent: each project has its own .context/ directory, its own decisions, its own learnings, its own journal. That's the right default, since most work is project-local, and mixing context across projects tends to dilute more than it helps.

    But sometimes a decision or a learning should cross project boundaries. A convention you codified in one project deserves to be visible in another. A gotcha you discovered debugging service A is the same gotcha waiting for you in service B. The ctx Hub is the feature that makes those specific entries travel, without replicating everything else.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#what-the-hub-actually-is","level":2,"title":"What the Hub Actually Is","text":"

    In one paragraph: the ctx Hub is a fan-out channel for four specific kinds of structured entries: decision, learning, convention, and task. You publish an entry with ctx add --share in one project, and it appears in .context/hub/ for every other project subscribed to that type. When you run ctx agent --include-hub, those shared entries become part of your next agent context packet.

    That is the entire feature. The Hub does not:

    • Share your session journal (.context/journal/). That stays local to each project.
    • Share your scratchpad (.context/pad). Encrypted notes never leave the machine that created them.
    • Share your TASKS.md, DECISIONS.md, LEARNINGS.md, or CONVENTIONS.md wholesale. Only entries you explicitly --share cross the boundary.
    • Provide user identity or attribution. The Hub identifies projects, not people.

    If you want \"my agent in project B sees everything my agent did in project A,\" that's not the Hub. Local session density stays local.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#who-its-for","level":2,"title":"Who It's For","text":"

    Two shapes, same mechanics, different trust models.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

    One developer, many projects. You want a learning from project A to show up when you open project B a week later. You want a convention you codified in your dotfiles project to be visible everywhere else on your workstation. Run a Hub on localhost, register each project, done.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#small-trusted-team","level":3,"title":"Small Trusted Team","text":"

    A few teammates on a LAN or a hub.ctx-like self-hosted server. You want team conventions to propagate without a wiki. You want lessons from one on-call engineer's 3 AM incident to reach everyone else's agent on the next session. Same mechanics as the personal case, plus TLS in front and a short security runbook.

    The Hub is not a multi-tenant public service. It assumes everyone holding a client token is friendly. Don't stand up hub.example.com for untrusted participants.

    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#going-further","level":2,"title":"Going Further","text":"
    • First-time setup: Hub: Getting Started, a five-minute walkthrough on localhost.
    • Mental model and user stories: Hub Overview, what flows, what doesn't, and when not to use it.
    • Team / LAN deployment: Multi-machine setup.
    • Redundancy: HA cluster.
    • Operating a Hub: Hub Operations and Hub Failure Modes.
    • Security posture: Hub Security Model.
    • Command reference: ctx serve, ctx connect, ctx hub.
    ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/is-ctx-right/","level":1,"title":"Is It Right for Me?","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#good-fit","level":2,"title":"Good Fit","text":"

    ctx shines when context matters more than code.

    If any of these sound like your project, it's worth trying:

    • Multi-session AI work: You use AI across many sessions on the same codebase, and re-explaining is slowing you down.
    • Architectural decisions that matter: Your project has non-obvious choices (database, auth strategy, API design) that the AI keeps second-guessing.
    • \"Why\" matters as much as \"what\": you need the AI to understand rationale, not just current code
    • Team handoffs: Multiple people (or multiple AI tools) work on the same project and need shared context.
    • AI-assisted development across tools: Uou switch between Claude Code, Cursor, Copilot, or other tools and want context to follow the project, not the tool.
    • Long-lived projects: Anything you'll work on for weeks or months, where accumulated knowledge has compounding value.
    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#may-not-be-the-right-fit","level":2,"title":"May Not Be the Right Fit","text":"

    ctx adds overhead that isn't worth it for every project. Be honest about when to skip it:

    • One-off scripts: If the project is a single file you'll finish today, there's nothing to remember.
    • RAG-only workflows: If retrieval from an external knowledge base already gives the agent everything it needs for each session, adding ctx may be unnecessary. RAG retrieves information; ctx defines the project's working memory: They are complementary.
    • No AI involvement: ctx is designed for human-AI workflows; without an AI consumer, the files are just documentation.
    • Enterprise-managed context platforms: If your organization provides centralized context services, ctx may duplicate that layer.

    For a deeper technical comparison with RAG, prompt management tools, and agent frameworks, see ctx and Similar Tools.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#project-size-guide","level":2,"title":"Project Size Guide","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#solo-developer-single-repo","level":3,"title":"Solo Developer, Single Repo","text":"

    This is ctx's sweet spot.

    You get the most value here: one person, one project, decisions, and learnings accumulating over time. Setup takes 5 minutes and the .context/ directory directory stays small, and every session gets faster.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#small-team-one-or-two-repos","level":3,"title":"Small Team, One or Two Repos","text":"

    Works well.

    Context files commit to git, so the whole team shares the same decisions and conventions. Each person's AI starts with the team's decisions already loaded. Merge conflicts on .context/ files are rare and easy to resolve (they are just Markdown).

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#multiple-repos-or-larger-teams","level":3,"title":"Multiple Repos or Larger Teams","text":"

    ctx operates per repository.

    Each repo has its own .context/ directory with its own decisions, tasks, and learnings. This matches the way code, ownership, and history already work in git.

    There is no built-in cross-repo context layer.

    For organizations that need centralized, organization-wide knowledge, ctx complements a platform solution by providing durable, project-local working memory for AI sessions.

    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#5-minute-trial","level":2,"title":"5-Minute Trial","text":"

    Zero commitment. Try it, and delete .context/ if it's not for you.

    Using Claude Code?

    Install the ctx plugin from the Marketplace for Claude-native hooks, skills, and automatic context loading:

    1. Type /plugin and press Enter
    2. Select Marketplaces → Add Marketplace
    3. Enter ActiveMemory/ctx
    4. Back in /plugin, select Install and choose ctx

    You'll still need the ctx binary for the CLI: See Getting Started for install options.

    # 1. Initialize\ncd your-project\nctx init\n\n# 2. Add one real decision from your project\nctx add decision \"Your actual architectural choice\" \\\n  --context \"What prompted this decision\" \\\n  --rationale \"Why you chose this approach\" \\\n  --consequence \"What changes as a result\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# 3. Check what the AI will see\nctx status\n\n# 4. Start an AI session and ask: \"Do you remember?\"\n

    If the AI cites your decision back to you, it's working.

    Want to remove it later? One command:

    rm -rf .context/\n

    No dependencies to uninstall. No configuration to revert. Just files.

    Ready to try it out?

    • Join the Community→: Open Source is better together.
    • Getting Started →: Full installation and setup.
    • ctx and Similar Tools →: Detailed comparison with other approaches.
    ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/joining-a-project/","level":1,"title":"Joining a Project","text":"

    You've joined a team or inherited a project, and there's a .context/ directory in the repo. Good news: someone already set up persistent context. This page gets you oriented fast.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#what-to-read-first","level":2,"title":"What to Read First","text":"

    The files in .context/ have a deliberate priority order. Read them top-down:

    1. CONSTITUTION.md: Hard rules. Read this before you touch anything. These are inviolable constraints the team has agreed on.
    2. TASKS.md: Current and planned work. Shows what's in progress, what's pending, and what's blocked.
    3. CONVENTIONS.md: How the team writes code. Naming patterns, file organization, preferred idioms.
    4. ARCHITECTURE.md: System overview. Components, boundaries, data flow.
    5. DECISIONS.md: Why things are the way they are. Saves you from re-proposing something the team already evaluated and rejected.
    6. LEARNINGS.md: Gotchas, tips, and hard-won lessons. The stuff that doesn't fit anywhere else but will save you hours.

    See Context Files for detailed documentation of each file's structure and purpose.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#checking-context-health","level":2,"title":"Checking Context Health","text":"

    Before you start working, check whether the context is current:

    ctx status\n

    This shows file counts, token estimates, and recent activity. If files haven't been touched in weeks, the context may be stale.

    ctx drift\n

    This compares context files against recent code changes and flags potential drift: decisions that no longer match the codebase, conventions that have shifted, or tasks that look outdated.

    If things are stale, mention it to the team. Don't silently fix it yourself on day one.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#starting-your-first-session","level":2,"title":"Starting Your First Session","text":"

    Generate a context packet to prime your AI:

    ctx agent --budget 8000\n

    This outputs a token-budgeted summary of the project context, ordered by priority. With Claude Code and the ctx plugin, context loads automatically via hooks. You can also use the /ctx-remember skill to get a structured readback of what the AI knows.

    The readback is your verification step: if the AI can cite specific tasks and decisions, the context is working.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#adding-context","level":2,"title":"Adding Context","text":"

    As you work, you'll discover things worth recording. Use the CLI:

    # Record a decision you made or learned about\nctx add decision \"Use connection pooling for DB access\" \\\n  --rationale \"Reduces connection overhead under load\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Capture a gotcha you hit\nctx add learning \"Redis timeout defaults to 5s\" \\\n  --context \"Hit timeouts during bulk operations\" \\\n  --application \"Set explicit timeout for batch jobs\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add a convention you noticed the team follows\nctx add convention \"All API handlers return structured errors\"\n

    You can also just tell the AI: \"Record this as a learning\" or \"Add this decision to context.\" With the ctx plugin, context-update commands handle the file writes.

    See the Knowledge Capture recipe for the full workflow.

    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#session-etiquette","level":2,"title":"Session Etiquette","text":"

    A few norms for working in a ctx-managed project:

    • Respect existing conventions. If CONVENTIONS.md says \"use filepath.Join,\" use filepath.Join. If you disagree, propose a change, don't silently diverge.
    • Don't restructure context files without asking. The file layout and section structure are shared state. Reorganizing them affects every team member and every AI session.
    • Mark tasks done when complete. Check the box ([x]) in place. Don't move tasks between sections or delete them.
    • Add context as you go. Decisions, learnings, and conventions you discover are valuable to the next person (or the next session).
    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

    Ignoring CONSTITUTION.md. The constitution exists for a reason. If a task conflicts with a constitution rule, the task is wrong. Raise it with the team instead of working around the constraint.

    Deleting tasks. Never delete a task from TASKS.md. Mark it [x] (done) or [-] (skipped with a reason). The history matters for session replay and audit.

    Bypassing hooks. If the project uses ctx hooks (pre-commit nudges, context autoloading), don't disable them. They exist to keep context fresh. If a hook is noisy or broken, fix it or file a task.

    Over-contributing on day one. Read first, then contribute. Adding a dozen learnings before you understand the project's norms creates noise, not signal.

    Related:

    • Getting Started: installation and setup from scratch
    • Context Files: detailed file reference
    • Knowledge Capture: recording decisions, learnings, and conventions
    • Session Lifecycle: how a typical AI session flows with ctx
    ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/keeping-ai-honest/","level":1,"title":"Keeping AI Honest","text":"","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-problem","level":2,"title":"The Problem","text":"

    AI agents confabulate. They invent history that never happened, claim familiarity with decisions that were never made, and sometimes declare a task complete when it is not. This is not malice - it is the default behavior of a system optimizing for plausible-sounding responses.

    When your AI says \"we decided to use Redis for caching last week,\" can you verify that? When it says \"the auth module is complete,\" can you confirm it? Without grounded, persistent context, the answer is no. You are trusting vibes.

    ctx replaces vibes with verifiable artifacts.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#grounded-memory","level":2,"title":"Grounded Memory","text":"

    Every entry in ctx context files has a timestamp and structured fields. When the AI cites a decision, you can check it.

    ## [2026-01-28-143022] Use Event Sourcing for Audit Trail\n\n**Status**: Accepted\n\n**Context**: Compliance requires full mutation history.\n\n**Decision**: Event sourcing for the audit subsystem only.\n\n**Rationale**: Append-only log meets compliance requirements\nwithout imposing event sourcing on the entire domain model.\n

    The timestamp 2026-01-28-143022 is not decoration. It is a verifiable anchor. If the AI references this decision, you can open DECISIONS.md, find the entry, and confirm it says what the AI claims. If the entry does not exist, the AI is hallucinating - and you know immediately.

    This is grounded memory: claims that trace back to artifacts you control and can audit.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#constitutionmd-hard-guardrails","level":2,"title":"CONSTITUTION.md: Hard Guardrails","text":"

    CONSTITUTION.md defines rules the AI must treat as inviolable. These are not suggestions or best practices - they are constraints that override task requirements.

    # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these,\nthe task is wrong.\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] All public API changes require a decision record\n* [ ] Never delete context files without explicit user approval\n

    The AI reads these at session start, before anything else. A well- integrated agent will refuse a task that conflicts with a constitutional rule, citing the specific rule it would violate.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-agent-playbooks-anti-hallucination-rules","level":2,"title":"The Agent Playbook's Anti-Hallucination Rules","text":"

    The AGENT_PLAYBOOK.md file includes a section called \"How to Avoid Hallucinating Memory\" with five explicit rules:

    1. Never assume. If it is not in the context files, you do not know it.
    2. Never invent history. Do not claim \"we discussed\" something without a file reference.
    3. Verify before referencing. Search files before citing them.
    4. When uncertain, say so. \"I don't see a decision on this\" is always better than a fabricated one.
    5. Trust files over intuition. If the files say PostgreSQL but your training data suggests MySQL, the files win.

    These rules create a behavioral contract. The AI is not left to guess how confident it should be - it has explicit instructions to ground every claim in the context directory.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#drift-detection","level":2,"title":"Drift Detection","text":"

    Context files can go stale. You rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist. Stale context is almost as dangerous as no context: the AI treats outdated information as current truth.

    ctx drift detects this divergence:

    ctx drift\n

    It scans context files for references to files, paths, and symbols that no longer exist in the codebase. Stale references get flagged so you can update or remove them before they mislead the next session.

    Regular drift checks - weekly, or after major refactors - keep your context files honest the same way tests keep your code honest.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-verification-loop","level":2,"title":"The Verification Loop","text":"

    The /ctx-commit skill includes a built-in verification step: before staging, it maps claims to evidence and runs self-audit questions to surface gaps. This catches inconsistencies at the point where they matter most: right before code is committed.

    This closes the loop. You write context. The AI reads context. The verification step confirms that context still matches reality. When it does not, you fix it - and the next session starts from truth, not from drift.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#trust-through-structure","level":2,"title":"Trust through Structure","text":"

    The common thread across all of these mechanisms is structure over prose. Timestamps make claims verifiable. Constitutional rules make boundaries explicit. Drift detection makes staleness visible. The playbook makes behavioral expectations concrete.

    You do not need to trust the AI. You need to trust the system -- and verify when it matters.

    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#further-reading","level":2,"title":"Further Reading","text":"
    • Detecting and Fixing Drift: the full workflow for keeping context files accurate
    • Invariants: the properties that must hold for any valid ctx implementation
    • Agent Security: threat model and mitigations for AI agents operating with persistent context
    ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/prompting-guide/","level":1,"title":"Prompting Guide","text":"

    New to ctx?

    This guide references context files like TASKS.md, DECISIONS.md, and LEARNINGS.md:

    These are plain Markdown files that ctx maintains in your project's .context/ directory.

    If terms like \"context packet\" or \"session ceremony\" are unfamiliar,

    • start with the ctx Manifesto for the why,
    • About for the big picture,
    • then Getting Started to set up your first project.
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#literature-matters","level":2,"title":"Literature Matters","text":"

    This guide is about crafting effective prompts for working with AI assistants in ctx-enabled projects, but the guidelines given here apply to other AI systems, too.

    The right prompt triggers the right behavior.

    This guide documents prompts that reliably produce good results.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#tldr","level":2,"title":"TL;DR","text":"Goal Prompt Load context \"Do you remember?\" Resume work \"What's the current state?\" What's next /ctx-next Debug \"Why doesn't X work?\" Validate \"Is this consistent with our decisions?\" Impact analysis \"What would break if we...\" Reflect /ctx-reflect Wrap up /ctx-wrap-up Persist \"Add this as a learning\" Explore \"How does X work in this codebase?\" Sanity check \"Is this the right approach?\" Completeness \"What am I missing?\" One more thing \"What's the single smartest addition?\" Set tone \"Push back if my assumptions are wrong.\" Constrain scope \"Only change files in X. Nothing else.\" Course correct \"Stop. That's not what I meant.\" Check health \"Run ctx drift\" Commit /ctx-commit","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#session-start","level":2,"title":"Session Start","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#do-you-remember","level":3,"title":"\"do you remember?\"","text":"

    Triggers the AI to silently read TASKS.md, DECISIONS.md, LEARNINGS.md, and check recent history via ctx journal before responding with a structured readback:

    1. Last session: most recent session topic and date
    2. Active work: pending or in-progress tasks
    3. Recent context: 1-2 recent decisions or learnings
    4. Next step: offer to continue or ask what to focus on

    Use this at the start of every important session.

    Do you remember what we were working on?\n

    This question implies prior context exists. The AI checks files rather than admitting ignorance. The expected response cites specific context (session names, task counts, decisions), not vague summaries.

    If the AI instead narrates its discovery process (\"Let me check if there are files...\"), it has not loaded CLAUDE.md or AGENT_PLAYBOOK.md properly.

    For a detailed case study on making agents actually follow this protocol (including the failure modes, the timing problem, and the hook design that solved it) see The Dog Ate My Homework.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#whats-the-current-state","level":3,"title":"\"What's the Current State?\"","text":"

    Prompts reading of TASKS.md, recent sessions, and status overview.

    Use this when resuming work after a break.

    Variants:

    • \"Where did we leave off?\"
    • \"What's in progress?\"
    • \"Show me the open tasks.\"
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#during-work","level":2,"title":"During Work","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-doesnt-x-work","level":3,"title":"\"Why Doesn't X Work?\"","text":"

    This triggers root cause analysis rather than surface-level fixes.

    Use this when something fails unexpectedly.

    Framing as \"why\" encourages investigation before action. The AI will trace through code, check configurations, and identify the actual cause.

    Real Example

    \"Why can't I run /ctx-reflect?\" led to discovering missing permissions in settings.local.json bootstrapping.

    This was a fix that benefited all users of ctx.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-consistent-with-our-decisions","level":3,"title":"\"Is This Consistent with Our Decisions?\"","text":"

    This prompts checking DECISIONS.md before implementing.

    Use this before making architectural choices.

    Variants:

    • \"Check if we've decided on this before\"
    • \"Does this align with our conventions?\"
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-would-break-if-we","level":3,"title":"\"What Would Break If We...\"","text":"

    This triggers defensive thinking and impact analysis.

    Use this before making significant changes.

    What would break if we change the Settings struct?\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#before-you-start-read-x","level":3,"title":"\"Before You Start, Read X\"","text":"

    This ensures specific context is loaded before work begins.

    Use this when you know the relevant context exists in a specific file.

    Before you start, check ctx journal source for the auth discussion session\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-control","level":3,"title":"Scope Control","text":"

    Constrain the AI to prevent sprawl. These are some of the most useful prompts in day-to-day work.

    Only change files in internal/cli/add/. Nothing else.\n
    No new files. Modify the existing implementation.\n
    Keep the public API unchanged. Internal refactor only.\n

    Use these when the AI tends to \"helpfully\" modify adjacent code, add documentation you didn't ask for, or create new abstractions.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#course-correction","level":3,"title":"Course Correction","text":"

    Steer the AI when it goes off-track: Don't wait for it to finish a wrong approach.

    Stop! That's not what I meant. Let me clarify.\n
    Let's step back. Explain what you're about to do before changing anything.\n
    Undo that last change and try a different approach.\n

    These work because they interrupt momentum.

    Without explicit course correction, the AI tends to commit harder to a wrong path rather than reconsidering.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#failure-modes","level":3,"title":"Failure Modes","text":"

    When the AI misbehaves, match the symptom to the recovery prompt:

    Symptom Recovery prompt Hand-waves (\"should work now\") \"Show evidence: file/line refs, command output, or test name.\" Creates unnecessary files \"No new files. Modify the existing implementation.\" Expands scope unprompted \"Stop after the smallest working change. Ask before expanding scope.\" Narrates instead of acting \"Skip the explanation. Make the change and show the diff.\" Repeats a failed approach \"That didn't work last time. Try a different approach.\" Claims completion without proof \"Run the test. Show me the output.\"

    These are recovery handles, not rules to paste into CLAUDE.md.

    Use them in the moment when you see the behavior.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reflection-and-persistence","level":2,"title":"Reflection and Persistence","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-did-we-learn","level":3,"title":"\"What Did We Learn?\"","text":"

    This prompts reflection on the session and often triggers adding learnings to LEARNINGS.md.

    Use this after completing a task or debugging session.

    This is an explicit reflection prompt. The AI will summarize insights and often offer to persist them.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#add-this-as-a-learningdecision","level":3,"title":"\"Add This as a Learning/decision\"","text":"

    This is an explicit persistence request.

    Use this when you have discovered something worth remembering.

    Add this as a learning: \"JSON marshal escapes angle brackets by default\"\n\n# or simply.\nAdd this as a learning.\n# and let the AI autonomously infer and summarize.\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#save-context-before-we-end","level":3,"title":"\"Save Context Before We End\"","text":"

    This triggers context persistence before the session closes.

    Use it at the end of the session or before switching topics.

    Variants:

    • \"Let's persist what we did\"
    • \"Update the context files\"
    • /ctx-wrap-up:the recommended end-of-session ceremony (see Session Ceremonies)
    • /ctx-reflect: mid-session reflection checkpoint
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#exploration-and-research","level":2,"title":"Exploration and Research","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-the-codebase-for-x","level":3,"title":"\"Explore the Codebase for X\"","text":"

    This triggers thorough codebase search rather than guessing.

    Use this when you need to understand how something works.

    This works because \"Explore\" signals that investigation is needed, not immediate action.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#how-does-x-work-in-this-codebase","level":3,"title":"\"How Does X Work in This Codebase?\"","text":"

    This prompts reading actual code rather than explaining general concepts.

    Use this to understand the existing implementation.

    How does session saving work in this codebase?\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#find-all-places-where-x","level":3,"title":"\"Find All Places Where X\"","text":"

    This triggers a comprehensive search across the codebase.

    Use this before refactoring or understanding the impact.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#meta-and-process","level":2,"title":"Meta and Process","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-should-we-document-from-this","level":3,"title":"\"What Should We Document from This?\"","text":"

    This prompts identifying learnings, decisions, and conventions worth persisting.

    Use this after complex discussions or implementations.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-the-right-approach","level":3,"title":"\"Is This the Right Approach?\"","text":"

    This invites the AI to challenge the current direction.

    Use this when you want a sanity check.

    This works because it allows AI to disagree.

    AIs often default to agreeing; this prompt signals you want an honest assessment.

    Stronger variant: \"Push back if my assumptions are wrong.\" This sets the tone for the entire session: The AI will flag questionable choices proactively instead of waiting to be asked.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-am-i-missing","level":3,"title":"\"What Am I Missing?\"","text":"

    This prompts thinking about edge cases, overlooked requirements, or unconsidered approaches.

    Use this before finalizing a design or implementation.

    Forward-looking variant: \"What's the single smartest addition you could make to this at this point?\" Use this after you think you're done: It surfaces improvements you wouldn't have thought to ask for. The constraint to one thing prevents feature sprawl.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#cli-commands-as-prompts","level":2,"title":"CLI Commands as Prompts","text":"

    Asking the AI to run ctx commands is itself a prompt. These load context or trigger specific behaviors:

    Command What it does \"Run ctx status\" Shows context summary, file presence, staleness \"Run ctx agent\" Loads token-budgeted context packet \"Run ctx drift\" Detects dead paths, stale files, missing context","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ctx-skills","level":3,"title":"ctx Skills","text":"

    The SKILS.md Standard

    Skills are formalized prompts stored as SKILL.md files.

    The /slash-command syntax below is Claude Code specific.

    Other agents can use the same skill files, but invocation may differ.

    Use ctx skills by name:

    Skill When to use /ctx-status Quick context summary /ctx-agent Load full context packet /ctx-remember Recall project context and structured readback /ctx-wrap-up End-of-session context persistence /ctx-history Browse session history for past discussions /ctx-reflect Structured reflection checkpoint /ctx-next Suggest what to work on next /ctx-commit Commit with context persistence /ctx-drift Detect and fix context drift /ctx-implement Execute a plan step-by-step with verification /ctx-loop Generate autonomous loop script /ctx-pad Manage encrypted scratchpad /ctx-archive Archive completed tasks /check-links Audit docs for dead links

    Ceremony vs. Workflow Skills

    Most skills work conversationally: \"what should we work on?\" triggers /ctx-next, \"save that as a learning\" triggers /ctx-learning-add. Natural language is the recommended approach.

    Two skills are the exception: /ctx-remember and /ctx-wrap-up are ceremony skills for session boundaries: Invoke them as explicit slash commands: conversational triggers risk partial execution. See Session Ceremonies.

    Skills combine a prompt, tool permissions, and domain knowledge into a single invocation.

    Skills beyond Claude Code

    The /slash-command syntax above is Claude Code native, but the underlying SKILL.md files are a standard markdown format that any agent can consume. If you use a different coding agent, consult its documentation for how to load skill files as prompt templates.

    See Integrations for setup details.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#anti-patterns","level":2,"title":"Anti-Patterns","text":"

    Based on our ctx development experience (i.e., \"sipping our own champagne\") so far, here are some prompts that tend to produce poor results:

    Prompt Problem Better Alternative \"Fix this\" Too vague, may patch symptoms \"Why is this failing?\" \"Make it work\" Encourages quick hacks \"What's the right way to solve this?\" \"Just do it\" Skips planning \"Plan this, then implement\" \"You should remember\" Confrontational \"Do you remember?\" \"Obviously...\" Discourages questions State the requirement directly \"Idiomatic X\" Triggers language priors \"Follow project conventions\" \"Implement everything\" No phasing, sprawl risk Break into tasks, implement one at a time \"You should know this\" Assumes context is loaded \"Before you start, read X\"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reliability-checklist","level":2,"title":"Reliability Checklist","text":"

    Before sending a non-trivial prompt, check these four elements. This is the guide's DNA in one screenful.

    1. Goal in one sentence: What does \"done\" look like?
    2. Files to read: What existing code or context should the AI review before acting?
    3. Verification command: How will you prove it worked? (test name, CLI command, expected output)
    4. Scope boundary: What should the AI not touch?

    A prompt that covers all four is almost always good enough.

    A prompt missing #3 is how you get \"should work now\" without evidence.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#safety-invariants","level":2,"title":"Safety Invariants","text":"

    These Are Invariants: Not Suggestions

    A prompting guide earns its trust by being honest about risk.

    These four rules mentioned below don't change with model versions, agent frameworks, or project size.

    Build them into your workflow once and stop thinking about them.

    Tool-using agents can read files, run commands, and modify your codebase. That power makes them useful. It also creates a trust boundary you should be aware of.

    These invariants apply regardless of which agent or model you use.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#treat-the-repository-text-as-untrusted-input","level":3,"title":"Treat the Repository Text as \"Untrusted Input\"","text":"

    Issue descriptions, PR comments, commit messages, documentation, and even code comments can contain text that looks like instructions. An agent that reads a GitHub issue and then runs a command found inside it is executing untrusted input.

    The rule: Before running any command the agent found in repo text (issues, docs, comments), restate the command explicitly and confirm it does what you expect. Don't let the agent copy-paste from untrusted sources into a shell.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ask-before-destructive-operations","level":3,"title":"Ask Before Destructive Operations","text":"

    git push --force, rm -rf, DROP TABLE, docker system prune: these are irreversible or hard to reverse. A good agent should pause before running them, but don't rely on that.

    The rule: For any operation that deletes data, overwrites history, or affects shared infrastructure, require explicit confirmation. If the agent runs something destructive without asking, that's a course-correction moment: \"Stop. Never run destructive commands without asking first.\"

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-the-blast-radius","level":3,"title":"Scope the Blast Radius","text":"

    An agent told to \"fix the tests\" might modify test fixtures, change assertions, or delete tests that inconveniently fail. An agent told to \"deploy\" might push to production. Broad mandates create broad risk.

    The rule: Constrain scope before starting work. The Reliability Checklist's scope boundary (#4) is your primary safety lever. When in doubt, err on the side of a tighter boundary.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#secrets-never-belong-in-context","level":3,"title":"Secrets Never Belong in Context","text":"

    LEARNINGS.md, DECISIONS.md, and session transcripts are plain-text files that may be committed to version control.

    Don't persist API keys, passwords, tokens, or credentials in context files.

    The rule: If the agent encounters a secret during work, it should use it transiently (environment variable, an alias to the secret instead of the actual secret, etc.) and never write it to a context file.

    Any Secret Seen IS Exposed

    If you see a secret in a context file, remove it immediately and rotate the credential.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-plan-implement","level":2,"title":"Explore → Plan → Implement","text":"

    For non-trivial work, name the phase you want:

    Explore src/auth and summarize the current flow.\nThen propose a plan. After I approve, implement with tests.\n

    This prevents the AI from jumping straight to code.

    The three phases map to different modes of thinking:

    • Explore: read, search, understand: no changes
    • Plan: propose approach, trade-offs, scope: no changes
    • Implement: write code, run tests, verify: changes

    Small fixes skip straight to implement. Complex or uncertain work benefits from all three.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#prompts-by-task-type","level":2,"title":"Prompts by Task Type","text":"

    Different tasks need different prompt structures. The pattern: symptom + location + verification.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#bugfix","level":3,"title":"Bugfix","text":"
    Users report search returns empty results for queries with hyphens.\nReproduce in src/search/. Write a failing test for \"foo-bar\",\nfix the root cause, run: go test ./internal/search/...\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#refactor","level":3,"title":"Refactor","text":"
    Inspect src/auth/ and list duplication hotspots.\nPropose a refactor plan scoped to one module.\nAfter approval, remove duplication without changing behavior.\nAdd a test if coverage is missing. Run: make audit\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#research","level":3,"title":"Research","text":"
    Explore the request flow around src/api/.\nSummarize likely bottlenecks with evidence.\nPropose 2-3 hypotheses. Do not implement yet.\n
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#docs","level":3,"title":"Docs","text":"
    Update docs/cli-reference.md to reflect the new --format flag.\nConfirm the flag exists in the code and the example works.\n

    Notice each prompt includes what to verify and how. Without that, you get a \"should work now\" instead of evidence.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#writing-tasks-as-prompts","level":2,"title":"Writing Tasks as Prompts","text":"

    Tasks in TASKS.md are indirect prompts to the AI. How you write them shapes how the AI approaches the work.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-motivation-not-just-the-goal","level":3,"title":"State the Motivation, Not Just the Goal","text":"

    Tell the AI why you are building something, not just what.

    Bad: \"Build a calendar view.\"

    Good: \"Build a calendar view. The motivation is that all notes and tasks we build later should be viewable here.\"

    The second version lets the AI anticipate downstream requirements:

    It will design the calendar's data model to be compatible with future features: Without you having to spell out every integration point. Motivation turns a one-off task into a directional task.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-deliverable-not-just-steps","level":3,"title":"State the Deliverable, Not Just Steps","text":"

    Bad task (implementation-focused):

    - [ ] T1.1.0: Parser system\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

    The AI may complete all subtasks but miss the actual goal. What does \"Parser system\" deliver to the user?

    Good task (deliverable-focused):

    - [ ] T1.1.0: Parser CLI command\n  **Deliverable**: `ctx journal source` command that shows parsed sessions\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

    Now the AI knows the subtasks serve a specific user-facing deliverable.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#use-acceptance-criteria","level":3,"title":"Use Acceptance Criteria","text":"

    For complex tasks, add explicit \"done when\" criteria:

    - [ ] T2.0: Authentication system\n  **Done when**:\n  - [ ] User can register with email\n  - [ ] User can log in and get a token\n  - [ ] Protected routes reject unauthenticated requests\n

    This prevents premature \"task complete\" when only the implementation details are done, but the feature doesn't actually work.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#subtasks-parent-task","level":3,"title":"Subtasks ≠ Parent Task","text":"

    Completing all subtasks does not mean the parent task is complete.

    The parent task describes what the user gets.

    Subtasks describe how to build it.

    Always re-read the parent task description before marking it complete. Verify the stated deliverable exists and works.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-do-these-approaches-work","level":2,"title":"Why Do These Approaches Work?","text":"

    The patterns in this guide aren't invented here: They are practitioner translations of well-established, peer-reviewed research, most of which predate the current AI (hype) wave.

    The underlying ideas come from decades of work in machine learning, cognitive science, and numerical optimization. For a concrete case study showing how these principles play out when an agent decides whether to follow instructions (attention competition, optimization toward least-resistance paths, and observable compliance as a design goal) see The Dog Ate My Homework.

    Phased work (\"Explore → Plan → Implement\") applies chain-of-thought reasoning: Decomposing a problem into sequential steps before acting. Forcing intermediate reasoning steps measurably improves output quality in language models, just as it does in human problem-solving. Wei et al., Chain-of-Thought Prompting Elicits Reasoning in Large Language Models (2022).

    Root-cause prompts (\"Why doesn't X work?\") use step-back abstraction: Retreating to a higher-level question before diving into specifics. This mirrors how experienced engineers debug: they ask \"what should happen?\" before asking \"what went wrong?\" Zheng et al., Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models (2023).

    Exploring alternatives (\"Propose 2-3 approaches\") leverages self-consistency: Generating multiple independent reasoning paths and selecting the most coherent result. The idea traces back to ensemble methods in ML: A committee of diverse solutions outperforms any single one. Wang et al., Self-Consistency Improves Chain of Thought Reasoning in Language Models (2022).

    Impact analysis (\"What would break if we...\") is a form of tree-structured exploration: Branching into multiple consequence paths before committing. This is the same principle behind game-tree search (minimax, MCTS) that has powered decision-making systems since the 1950s. Yao et al., Tree of Thoughts: Deliberate Problem Solving with Large Language Models (2023).

    Motivation prompting (\"Build X because Y\") works through goal conditioning: Providing the objective function alongside the task. In optimization terms, you are giving the gradient direction, not just the loss. The model can make locally coherent decisions that serve the global objective because it knows what \"better\" means.

    Scope constraints (\"Only change files in X\") apply constrained optimization: Bounding the search space to prevent divergence. This is the same principle behind regularization in ML: Without boundaries, powerful optimizers find solutions that technically satisfy the objective but are practically useless.

    CLI commands as prompts (\"Run ctx status\") interleave reasoning with acting: The model thinks, acts on external tools, observes results, then thinks again. Grounding reasoning in real tool output reduces hallucination because the model can't ignore evidence it just retrieved. Yao et al., ReAct: Synergizing Reasoning and Acting in Language Models (2022).

    Task decomposition (\"Prompts by Task Type\") applies least-to-most prompting: Breaking a complex problem into subproblems and solving them sequentially, each building on the last. This is the research version of \"plan, then implement one slice.\" Zhou et al., Least-to-Most Prompting Enables Complex Reasoning in Large Language Models (2022).

    Explicit planning (\"Explore → Plan → Implement\") is directly supported by plan-and-solve prompting, which addresses missing-step failures in zero-shot reasoning by extracting a plan before executing. The phased structure prevents the model from jumping to code before understanding the problem. Wang et al., Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models (2023).

    Session reflection (\"What did we learn?\", /ctx-reflect) is a form of verbal reinforcement learning: Improving future performance by persisting linguistic feedback as memory rather than updating weights. This is exactly what LEARNINGS.md and DECISIONS.md provide: a durable feedback signal across sessions. Shinn et al., Reflexion: Language Agents with Verbal Reinforcement Learning (2023).

    These aren't prompting \"hacks\" that you will find in the \"1000 AI Prompts for the Curious\" listicles: They are applications of foundational principles:

    • Decomposition,
    • Abstraction,
    • Ensemble Reasoning,
    • Search,
    • and Constrained Optimization.

    They work because language models are, at their core, optimization systems navigating probabilistic landscapes.

    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#further-reading","level":2,"title":"Further Reading","text":"
    • The Attention Budget: Why your AI forgets what you just told it, and how token budgets shape context strategy
    • The Dog Ate My Homework: A case study in making agents follow instructions: attention timing, delegation decay, and observable compliance as a design goal
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#contributing","level":2,"title":"Contributing","text":"

    Found a prompt that works well? Open an issue or PR with:

    1. The prompt text;
    2. What behavior it triggers;
    3. When to use it;
    4. Why it works (optional but helpful).

    Dive Deeper:

    • Recipes: targeted how-to guides for specific tasks
    • CLI Reference: all commands and flags
    • Integrations: setup for Claude Code, Cursor, Aider
    ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/repeated-mistakes/","level":1,"title":"My AI Keeps Making the Same Mistakes","text":"","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-problem","level":2,"title":"The Problem","text":"

    You found a bug last Tuesday. You debugged it, understood the root cause, and moved on. Today, a new session hits the exact same bug. The AI rediscovers it from scratch, burning twenty minutes on something you already solved.

    Worse: you spent an hour last week evaluating two database migration strategies, picked one, documented why in a comment somewhere, and now the AI is cheerfully suggesting the approach you rejected. Again.

    This is not a model problem. It is a memory problem. Without persistent context, every session starts with amnesia.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#how-ctx-stops-the-loop","level":2,"title":"How ctx Stops the Loop","text":"

    ctx gives your AI three files that directly prevent repeated mistakes, each targeting a different failure mode.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#decisionsmd-stop-relitigating-settled-choices","level":3,"title":"DECISIONS.md: Stop Relitigating Settled Choices","text":"

    When you make an architectural decision, record it with rationale and rejected alternatives. The AI reads this at session start and treats it as settled.

    ## [2026-02-12] Use JWT for Authentication\n\n**Status**: Accepted\n\n**Context**: Need stateless auth for the API layer.\n\n**Decision**: JWT with short-lived access tokens and refresh rotation.\n\n**Rationale**: Stateless, scales horizontally, team has prior experience.\n\n**Alternatives Considered**:\n- Session-based auth: Rejected. Requires sticky sessions or shared store.\n- API keys only: Rejected. No user identity, no expiry rotation.\n

    Next session, when the AI considers auth, it reads this entry and builds on the decision instead of re-debating it. If someone asks \"why not sessions?\", the rationale is already there.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#learningsmd-capture-gotchas-once","level":3,"title":"LEARNINGS.md: Capture Gotchas Once","text":"

    Learnings are the bugs, quirks, and non-obvious behaviors that cost you time the first time around. Write them down so they cost you zero time the second time.

    ## Build\n\n### CGO Required for SQLite on Alpine\n\n**Discovered**: 2026-01-20\n\n**Context**: Docker build failed silently with \"no such table\" at runtime.\n\n**Lesson**: The go-sqlite3 driver requires CGO_ENABLED=1 and gcc\ninstalled in the build stage. Alpine needs apk add build-base.\n\n**Application**: Always use the golang:alpine image with build-base\nfor SQLite builds. Never set CGO_ENABLED=0.\n

    Without this entry, the next session that touches the Dockerfile will hit the same wall. With it, the AI knows before it starts.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#constitutionmd-draw-hard-lines","level":3,"title":"CONSTITUTION.md: Draw Hard Lines","text":"

    Some mistakes are not about forgetting - they are about boundaries the AI should never cross. CONSTITUTION.md sets inviolable rules.

    * [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never disable security linters without a documented exception\n* [ ] All database migrations must be reversible\n

    The AI reads these as absolute constraints. It does not weigh them against convenience. It refuses tasks that would violate them.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-accumulation-effect","level":2,"title":"The Accumulation Effect","text":"

    Each of these files grows over time. Session one captures two decisions. Session five adds a tricky learning about timezone handling. Session twelve records a convention about error message formatting.

    By session twenty, your AI has a knowledge base that no single person carries in their head. New team members - human or AI - inherit it instantly.

    The key insight: you are not just coding. You are building a knowledge layer that makes every future session faster.

    ctx files version with your code in git. They survive branch switches, team changes, and model upgrades. The context outlives any single session.

    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#getting-started","level":2,"title":"Getting Started","text":"

    Capture your first decision or learning right now:

    ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a relational database for the project\" \\\n  --rationale \"Team expertise, JSONB support, mature ecosystem\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\nctx add learning \"Vitest mock hoisting\" \\\n  --context \"Tests failing intermittently\" \\\n  --lesson \"vi.mock() must be at file top level\" \\\n  --application \"Use vi.doMock() for dynamic mocks\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#further-reading","level":2,"title":"Further Reading","text":"
    • Knowledge Capture: the full workflow for persisting decisions, learnings, and conventions
    • Context Files Reference: structure and format for every file in .context/
    • About ctx: the bigger picture - why persistent context changes how you work with AI
    ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/steering/","level":1,"title":"Steering Files","text":"","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#steering-files","level":2,"title":"Steering Files","text":"

    ctx projects talk to AI assistants through several layers (context files, decisions, conventions, the agent context packet) but none of those can tell the assistant how to behave when a specific kind of prompt arrives. That's what steering files are for.

    A steering file is a small markdown document with YAML frontmatter that says: \"when the user asks about X, prepend these rules to the prompt.\" ctx manages those files in .context/steering/, decides which ones match each prompt, and syncs them out to each AI tool's native config (Claude Code, Cursor, Kiro, Cline) so the rules actually land in the prompt pipeline.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#not-the-same-as-decisions-or-conventions","level":2,"title":"Not the Same as Decisions or Conventions","text":"

    The three look similar on disk but serve different purposes:

    Kind Purpose Decisions (DECISIONS.md) What was chosen and why Conventions (CONVENTIONS.md) How the codebase is written Steering (.context/steering/*.md) How the AI should behave on matching prompts

    If you find yourself writing \"the AI should always do X when asked about Y,\" that belongs in steering, not decisions.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#your-first-steering-files","level":2,"title":"Your First Steering Files","text":"

    ctx init scaffolds four foundation steering files in .context/steering/ so you start with something to edit rather than an empty directory:

    File What to fill in product.md What the project is, who it's for, what's out of scope tech.md Languages, frameworks, runtime, hard constraints structure.md Directory layout, where new files go, naming rules workflow.md Branch strategy, commit conventions, pre-commit checks

    Each file starts with an inline HTML comment explaining the three inclusion modes, priority semantics, and tool scoping. The comment is invisible in rendered markdown but visible when you open the file to edit it; it's self-documenting scaffolding, not forever guidance. Delete the comment once you've customized the file.

    Default settings for foundation files:

    • inclusion: always: fires on every AI tool call
    • priority: 10: injected near the top of the prompt
    • tools: []: applies to every configured AI tool

    You should open each of these files and replace the placeholder content with your project's actual rules. Re-running ctx init is safe: existing files are left alone, so your edits survive. Use ctx init --no-steering-init to opt out of the scaffold entirely.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#inclusion-modes","level":2,"title":"Inclusion Modes","text":"

    Each steering file declares an inclusion mode in its frontmatter:

    Mode When the file is included always Every prompt, unconditionally auto When the prompt keywords match the file's description manual Only when the user explicitly names the file

    Which mode to pick depends on the AI tool you use, because the two tool families consume steering very differently.

    Claude Code and Codex: prefer inclusion: always for rules that must fire reliably. These tools have two delivery channels:

    1. The plugin's PreToolUse hook runs ctx agent with an empty prompt, so only always files match and get injected automatically on every tool call.
    2. The ctx_steering_get MCP tool, registered automatically when the ctx plugin is installed. Claude can call this tool mid-task to fetch auto or manual files matching a specific prompt. Verify with claude mcp list; look for ctx: ✓ Connected.

    Use always for invariants and anything that must fire every session. Use auto for situational rules where \"Claude fetches this when the prompt is relevant\" is the right behavior; those still land, just on Claude's judgment. Use manual for reference libraries you'll name explicitly.

    Cursor, Cline, Kiro: auto is the natural default. These tools read .cursor/rules/, .clinerules/, or .kiro/steering/ natively and resolve the description match on their own, so auto files fire when the prompt matches. manual files load on explicit invocation. always still works but consumes context budget on every turn.

    Mixed setups: if a rule must fire on Claude Code, pick always, even if it's overkill for your Cursor setup. The context budget cost is small; the alternative (silently not firing) is worse.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-families-of-ai-tools-two-delivery-paths","level":2,"title":"Two Families of AI Tools, Two Delivery Paths","text":"

    Not every AI tool consumes steering the same way. ctx handles two tool families differently, and it's worth knowing which family your editor is in before you wonder why a rule isn't firing.

    Native-rules tools (Cursor, Cline, Kiro) have a built-in rules primitive. They read a specific directory (.cursor/rules/, .clinerules/, .kiro/steering/) and apply the rules they find there. ctx handles these via ctx steering sync, which exports your files into the tool-native format. Run sync whenever you edit a steering file.

    Hook + MCP tools (Claude Code, Codex) have no native rules primitive, so ctx steering sync is a no-op for them. Instead, ctx delivers steering through two non-sync channels:

    1. Automatic injection via a PreToolUse hook. The ctx setup claude-code plugin wires a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them by the active prompt, and includes matching bodies in the context packet it prints. Claude Code feeds that output back into its context. Every tool call, automatically.
    2. On-demand via the ctx_steering_get MCP tool. The ctx MCP server exposes a tool Claude can call mid-task to fetch matching steering files for a specific prompt. Claude decides when to call it; it's not automatic.

    Both channels activate when you run ctx setup claude-code --write. After that, steering just works for Claude Code.

    Practical takeaway:

    • Using Cursor/Cline/Kiro only? Run ctx steering sync after edits.
    • Using Claude Code or Codex only? Never run sync; the hook+MCP pipeline handles it.
    • Using both? Run sync for the native-rules tools; the hook+MCP pipeline covers Claude Code automatically.
    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-shapes-of-automation-rules-and-scripts","level":2,"title":"Two Shapes of Automation: Rules and Scripts","text":"

    Steering is one of two hook-like layers ctx provides for customizing AI behavior. They're complementary:

    • Steering: persistent rules that get prepended to prompts. Declarative, text-only, scored by match.
    • Triggers: executable shell scripts that fire at lifecycle events. Imperative, runs arbitrary code, gated by exit codes.

    Pick steering when you want \"always remind the AI of X.\" Pick triggers when you want \"do Y when event Z happens.\" They can coexist; many projects use both.

    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Writing Steering Files: a six-step walkthrough: scaffold, write the rule, preview matches, list, get-rules-in-front-of-the-AI (two paths depending on tool family), verify.
    • ctx steering reference: full command, flag, and frontmatter reference; includes the per-tool delivery-mechanism table and a dedicated section on how Claude Code and Codex consume steering.
    • ctx setup: configure which AI tools receive steering. For Cursor/Cline/Kiro this is about sync targets; for Claude Code/Codex it installs the plugin that wires the PreToolUse hook and MCP server.
    • Lifecycle Triggers: the imperative companion to steering files.
    ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/triggers/","level":1,"title":"Lifecycle Triggers","text":"","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#lifecycle-triggers","level":2,"title":"Lifecycle Triggers","text":"

    Some things can't be expressed as a rule you want the AI to follow. Sometimes you want something to happen: block a dangerous tool call, inject today's standup notes into the next session, log every file save to a journal. That's what triggers are for.

    A trigger is an executable shell script that ctx runs at a specific lifecycle event: the start of a session, before a tool call, when a file is saved, and so on. Triggers read a JSON payload from stdin, do whatever they need, and write a JSON response on stdout. They can allow, block, or inject context into the pipeline depending on the event type.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#trigger-types","level":2,"title":"Trigger Types","text":"Type Fires when Use case session-start A new AI session begins Inject rotating context, standup notes session-end An AI session ends Persist summaries, send notifications pre-tool-use Before a tool call executes Block, gate, or audit post-tool-use After a tool call completes Log, react, post-process file-save A file is saved Lint on save, update indices context-add A new entry is added to .context/ Cross-link, notify, enrich","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-are-arbitrary-code-treat-them-like-pre-commit-hooks","level":2,"title":"Triggers Are Arbitrary Code: Treat Them like Pre-Commit Hooks","text":"

    Only Enable Scripts You've Read and Understand

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    ctx trigger add intentionally creates new scripts disabled (no executable bit). You must ctx trigger enable <name> after reviewing the contents. That's not a suggestion; it's the security model.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#three-hook-like-layers-in-ctx","level":2,"title":"Three Hook-like Layers in ctx","text":"

    Triggers are one of three distinct hook-like concepts in ctx. The names are similar but the owners and use cases are not:

    Layer Owned by Where they live When to use ctx trigger You .context/hooks/<type>/*.sh Project-specific automation, any AI tool ctx system hooks ctx itself built-in, wired into tool configs Built-in nudges (you don't author these) Claude Code hooks Claude Code .claude/settings.local.json Claude-Code-only tool-specific integration

    This page is about the first category. The other two run automatically and are invisible to you.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-vs-steering-same-problem-different-shape","level":2,"title":"Triggers vs Steering: Same Problem, Different Shape","text":"

    Triggers are the imperative counterpart to steering files. Steering expresses persistent rules the AI reads before each prompt; triggers express side effects that run on lifecycle events. They're complementary, not competing:

    • Want the AI to remember something? → Steering.
    • Want a script to run when something happens? → Trigger.

    Most projects use both.

    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Authoring Lifecycle Triggers: walkthrough with security guidance: scaffold, test, enable, iterate.
    • ctx trigger reference: command reference, trigger type table, input/output contract.
    • Steering files: the declarative counterpart to triggers.
    ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"operations/","level":1,"title":"Operations","text":"

    Guides for installing, upgrading, integrating, and running ctx. Split into three groups by audience.

    ","path":["Operations"],"tags":[]},{"location":"operations/#day-to-day","level":2,"title":"Day-to-Day","text":"

    Everyday operation guides for anyone running ctx in a project or adopting it in a team.

    ","path":["Operations"],"tags":[]},{"location":"operations/#integration","level":3,"title":"Integration","text":"

    Adopt ctx in an existing project: initialize context files, migrate from other tools, and onboard team members.

    ","path":["Operations"],"tags":[]},{"location":"operations/#upgrade","level":3,"title":"Upgrade","text":"

    Upgrade between versions with step-by-step migration notes and breaking-change guidance.

    ","path":["Operations"],"tags":[]},{"location":"operations/#ai-tools","level":3,"title":"AI Tools","text":"

    Configure ctx with Claude Code, Cursor, Aider, Copilot, Windsurf, and other AI coding tools.

    ","path":["Operations"],"tags":[]},{"location":"operations/#autonomous-loops","level":3,"title":"Autonomous Loops","text":"

    Run an unattended AI agent that works through tasks overnight, with ctx providing persistent memory between iterations.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub","level":2,"title":"Hub","text":"

    Operator guides for running a ctx Hub, the gRPC server that fans out structured entries across projects. If you're a client connecting to a Hub someone else runs, see ctx connect and the Hub recipes instead.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub-operations","level":3,"title":"Hub Operations","text":"

    Data directory layout, daemon management, systemd unit, backup and restore, log rotation, monitoring, and upgrades.

    ","path":["Operations"],"tags":[]},{"location":"operations/#hub-failure-modes","level":3,"title":"Hub Failure Modes","text":"

    What can go wrong in network, storage, cluster, auth, and clock layers, and what you should do about each one. Includes the short-list table oncall engineers will want bookmarked.

    ","path":["Operations"],"tags":[]},{"location":"operations/#maintainers","level":2,"title":"Maintainers","text":"

    Runbooks for people shipping ctx itself.

    ","path":["Operations"],"tags":[]},{"location":"operations/#cutting-a-release","level":3,"title":"Cutting a Release","text":"

    Step-by-step runbook for maintainers: bump version, generate release notes, run the release script, and verify the result.

    ","path":["Operations"],"tags":[]},{"location":"operations/#runbooks","level":2,"title":"Runbooks","text":"

    Step-by-step procedures you run with your agent. Each runbook includes a prompt to paste into a Claude Code session and guidance on triaging the results.

    Runbook Purpose When to run Release checklist Full pre-release sequence Before every release Plugin release Plugin-specific release steps Plugin changes ship Breaking migration Guide users across breaking changes Releases with renames Hub deployment Set up a ctx Hub end-to-end First-time hub setup New contributor Onboarding: clone to first session New contributors Codebase audit AST audits, magic strings, dead code, doc alignment Before release, quarterly Docs semantic audit Narrative gaps, weak pages, structural problems Before release, after adding pages Sanitize permissions Clean .claude/settings.local.json of over-broad grants After heavy permission granting Architecture exploration Systematic architecture docs across repos New codebase onboarding, reviews

    Recommended cadence:

    • Before every release: release checklist (which includes codebase audit + docs semantic audit)
    • Monthly: sanitize permissions
    • Quarterly: full sweep of all audit runbooks
    ","path":["Operations"],"tags":[]},{"location":"operations/autonomous-loop/","level":1,"title":"Autonomous Loops","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#autonomous-ai-development","level":2,"title":"Autonomous AI Development","text":"

    Iterate until done.

    An autonomous loop is an iterative AI development workflow where an agent works on tasks until completion, without constant human intervention.

    ctx provides the memory that makes this possible:

    • ctx provides the memory: persistent context that survives across iterations
    • The loop provides the automation: continuous execution until done

    Together, they enable fully autonomous AI development where the agent remembers everything across iterations.

    Origin

    This pattern is inspired by Geoffrey Huntley's Ralph Wiggum technique.

    We use generic terminology here so the concepts remain clear regardless of trends.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#how-it-works","level":2,"title":"How It Works","text":"
    graph TD\n    A[Start Loop] --> B[Load .context/loop.md]\n    B --> C[AI reads .context/]\n    C --> D[AI picks task from TASKS.md]\n    D --> E[AI completes task]\n    E --> F[AI updates context files]\n    F --> G[AI commits changes]\n    G --> H{Check signals}\n    H -->|SYSTEM_CONVERGED| I[Done - all tasks complete]\n    H -->|SYSTEM_BLOCKED| J[Done - needs human input]\n    H -->|Continue| B
    1. Loop reads .context/loop.md and invokes AI
    2. AI loads context from .context/
    3. AI picks one task and completes it
    4. AI updates context files (mark task done, add learnings)
    5. AI commits changes
    6. Loop checks for completion signals
    7. Repeat until converged or blocked
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#quick-start-shell-while-loop-recommended","level":2,"title":"Quick Start: Shell While Loop (Recommended)","text":"

    The best way to run an autonomous loop is a plain shell script that invokes your AI tool in a fresh process on each iteration. This is \"pure ralph\":

    The only state that carries between iterations is what lives in .context/ and the git history. No context window bleed, no accumulated tokens, no hidden state.

    Create a loop.sh:

    #!/bin/bash\n# loop.sh: an autonomous iteration loop\n\nPROMPT_FILE=\"${1:-.context/loop.md}\"\nMAX_ITERATIONS=\"${2:-10}\"\nOUTPUT_FILE=\"/tmp/loop_output.txt\"\n\nfor i in $(seq 1 $MAX_ITERATIONS); do\n  echo \"=== Iteration $i ===\"\n\n  # Invoke AI with prompt\n  cat \"$PROMPT_FILE\" | claude --print > \"$OUTPUT_FILE\" 2>&1\n\n  # Display output\n  cat \"$OUTPUT_FILE\"\n\n  # Check for completion signals\n  if grep -q \"SYSTEM_CONVERGED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop complete: All tasks done\"\n    break\n  fi\n\n  if grep -q \"SYSTEM_BLOCKED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop blocked: Needs human input\"\n    break\n  fi\n\n  sleep 2\ndone\n

    Make it executable and run:

    chmod +x loop.sh\n./loop.sh\n

    You can also generate this script with ctx loop (see CLI Reference).

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-do-we-use-a-shell-loop","level":3,"title":"Why Do We Use a Shell Loop?","text":"

    Each iteration starts a fresh AI process with zero context window history. The agent knows only what it reads from .context/ files: Exactly the information you chose to persist.

    This is the core loop principle: memory is explicit, not accidental.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#alternative-claude-codes-built-in-loop","level":2,"title":"Alternative: Claude Code's Built-in Loop","text":"

    Claude Code has built-in loop support:

    # Start autonomous loop\n/loop\n\n# Cancel running loop\n/cancel-loop\n

    This is convenient for quick iterations, but be aware of important caveats:

    This Loop Is Not Pure

    Claude Code's /loop runs all iterations within the same session. This means:

    • State leaks between iterations: The context window accumulates output from every previous iteration. The agent \"remembers\" things it saw earlier (even if they were never persisted to .context/).
    • Token budget degrades: Each iteration adds to the context window, leaving less room for actual work in later iterations.
    • Not ergonomic for long runs: Users report that the built-in loop is less predictable for 10+ iteration runs compared to a shell loop.

    For short explorations (2-5 iterations) or interactive use, /loop works fine. For overnight unattended runs or anything where iteration independence matters, use the shell while loop instead.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#the-contextloopmd-file","level":2,"title":"The .context/loop.md File","text":"

    The prompt file instructs the AI on how to work autonomously. Here's a template:

    # Autonomous Development Prompt\n\nYou are working on this project autonomously. Follow these steps:\n\n## 1. Load Context\n\nRead these files in order:\n\n1. `.context/CONSTITUTION.md`: NEVER violate these rules\n2. `.context/TASKS.md`: Find work to do\n3. `.context/CONVENTIONS.md`: Follow these patterns\n4. `.context/DECISIONS.md`: Understand past choices\n\n## 2. Pick One Task\n\nFrom `.context/TASKS.md`, select ONE task that is:\n\n- Not blocked\n- Highest priority available\n- Within your capabilities\n\n## 3. Complete the Task\n\n- Write code following conventions\n- Run tests if applicable\n- Keep changes focused and minimal\n\n## 4. Update Context\n\nAfter completing work:\n\n- Mark task complete in `TASKS.md`\n- Add any learnings to `LEARNINGS.md`\n- Add any decisions to `DECISIONS.md`\n\n## 5. Commit Changes\n\nCreate a focused commit with clear message.\n\n## 6. Signal Status\n\nEnd your response with exactly ONE of:\n\n- `SYSTEM_CONVERGED`: All tasks in TASKS.md are complete\n- `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n- (no signal): More work remains, continue to next iteration\n\n## Rules\n\n- ONE task per iteration\n- NEVER skip tests\n- NEVER violate CONSTITUTION.md\n- Commit after each task\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#completion-signals","level":2,"title":"Completion Signals","text":"

    The loop watches for these signals in AI output:

    Signal Meaning When to Use SYSTEM_CONVERGED All tasks complete No pending tasks in TASKS.md SYSTEM_BLOCKED Cannot proceed Needs clarification, access, or decision BOOTSTRAP_COMPLETE Initial setup done Project scaffolding finished","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-usage","level":3,"title":"Example Usage","text":"

    converged state

    I've completed all tasks in TASKS.md:\n- [x] Set up project structure\n- [x] Implement core API\n- [x] Add authentication\n- [x] Write tests\n\nNo pending tasks remain.\n\nSYSTEM_CONVERGED\n

    blocked state

    I cannot proceed with the \"Deploy to production\" task because:\n- Missing AWS credentials\n- Need confirmation on region selection\n\nPlease provide credentials and confirm deployment region.\n\nSYSTEM_BLOCKED\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-ctx-and-loops-work-well-together","level":2,"title":"Why ctx and Loops Work Well Together","text":"Without ctx With ctx Each iteration starts fresh Each iteration has full history Decisions get re-made Decisions persist in DECISIONS.md Learnings are lost Learnings accumulate in LEARNINGS.md Tasks can be forgotten Tasks tracked in TASKS.md","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#automatic-context-updates","level":3,"title":"Automatic Context Updates","text":"

    During the loop, the AI should update context files:

    Mark task complete:

    ctx task complete \"implement user auth\"\n

    Or emit an update command (parsed by ctx watch):

    <context-update type=\"complete\">user auth</context-update>\n

    Add learning:

    ctx add learning \"Rate limiting requires Redis connection\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    Or via update command:

    <context-update type=\"learning\"\n  context=\"Implementing rate limiter\"\n  lesson=\"Rate limiting requires Redis connection\"\n  application=\"Ensure Redis is provisioned before enabling rate limits\"\n>Rate Limiting Redis Dependency</context-update>\n

    Record decision:

    ctx add decision \"Use JWT tokens for API authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#advanced-watch-mode","level":2,"title":"Advanced: Watch Mode","text":"

    Run ctx watch alongside the loop to automatically process context updates:

    # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

    The watch command processes context updates from the loop output in real time.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#project-setup","level":2,"title":"Project Setup","text":"

    Initialize a project for autonomous loop operation:

    ctx init\n

    The loop prompt template is deployed to .context/loop.md during initialization. It instructs the agent to:

    • Work autonomously without asking clarifying questions;
    • Follow one-task-per-iteration discipline;
    • Use SYSTEM_CONVERGED / SYSTEM_BLOCKED signals;
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-project-structure","level":2,"title":"Example Project Structure","text":"
    my-project/\n├── .context/\n│   ├── CONSTITUTION.md\n│   ├── TASKS.md          # Work items for the loop\n│   ├── DECISIONS.md\n│   ├── LEARNINGS.md\n│   ├── CONVENTIONS.md\n│   └── sessions/         # Loop iteration history\n├── loop.sh               # Loop script (if not using Claude Code)\n└── src/                  # Your code\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#sample-tasksmd-for-autonomous-loops","level":3,"title":"Sample TASKS.md for Autonomous Loops","text":"
    # Tasks\n\n## Phase 1: Setup\n\n- [x] Initialize project structure\n- [x] Set up testing framework\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Polish\n\n- [ ] Add rate limiting `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n

    The loop will work through these systematically, marking each complete.

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#loop-runs-forever","level":3,"title":"Loop Runs Forever","text":"

    Cause: AI not emitting completion signals

    Fix: Ensure .context/loop.md explicitly instructs signaling:

    End EVERY response with one of:\n- SYSTEM_CONVERGED (if all tasks done)\n- SYSTEM_BLOCKED (if stuck)\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#context-not-persisting","level":3,"title":"Context Not Persisting","text":"

    Cause: AI not updating context files

    Fix: Add explicit instructions to .context/loop.md:

    After completing a task, you MUST:\n1. Run: ctx task complete \"<task>\"\n2. Add learnings: ctx add learning \"...\" --session-id abc12345 --branch main --commit 68fbc00a\n

    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#tasks-getting-repeated","level":3,"title":"Tasks Getting Repeated","text":"

    Cause: Task not marked complete before next iteration

    Fix: Ensure commit happens after context update:

    Order of operations:\n1. Complete coding work\n2. Update context files (*`ctx task complete`, `ctx add`*)\n3. Commit **ALL** changes including `.context/`\n4. Then signal status\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#ai-violating-constitution","level":3,"title":"AI Violating Constitution","text":"

    Cause: Constitution not read first

    Fix: Make constitution check explicit in .context/loop.md:

    BEFORE any work:\n1. Read .context/CONSTITUTION.md\n2. If task would violate ANY rule, emit SYSTEM_BLOCKED\n3. Explain which rule prevents the work\n
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#further-reading","level":2,"title":"Further Reading","text":"
    • Building ctx Using ctx: The dogfooding story: how autonomous loops built the tool that powers them
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#resources","level":2,"title":"Resources","text":"
    • Geoffrey Huntley's Ralph Wiggum Technique: The original inspiration
    • Context CLI: Command reference
    • Integrations: Tool-specific setup
    ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/hub-failure-modes/","level":1,"title":"Hub Failure Modes","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#ctx-hub-failure-modes","level":1,"title":"ctx Hub: Failure Modes","text":"

    What can go wrong, what the system does about it, and what you should do. Complementary to ctx Hub Operations.

    Design Posture

    The hub is best-effort knowledge sharing, not a durable ledger. Local .context/ files are the source of truth for each project; the hub is a fan-out channel. This framing informs every failure-mode decision below.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#network","level":2,"title":"Network","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#client-loses-connection-mid-stream","level":3,"title":"Client Loses Connection Mid-Stream","text":"

    What happens: ctx connection listen detects the EOF, waits with exponential backoff, and reconnects. On reconnect it passes its last-seen sequence; the hub replays everything newer.

    What you should do: nothing. If reconnects are looping, check firewall state on the hub and ctx hub status output.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-majority-side-reachable","level":3,"title":"Partition: Majority Side Reachable","text":"

    What happens: clients routed to the majority side continue to publish and listen. The minority nodes step down to followers that cannot accept writes (Raft quorum lost).

    What you should do: let it heal. When the partition closes, followers catch up via sequence-based sync automatically.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-split-brain-no-quorum","level":3,"title":"Partition: Split Brain (No Quorum)","text":"

    What happens: no node holds a majority, so no leader is elected. All nodes become read-only. ctx connection publish and ctx add --share fail with a \"no leader\" error; local writes still succeed.

    What you should do: fix the network. If the partition is permanent (e.g., a data center is gone), bootstrap a new cluster from the survivors with ctx hub peer remove for the dead nodes.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#hub-unreachable-during-ctx-add-share","level":3,"title":"Hub Unreachable during ctx add --share","text":"

    What happens: the local write succeeds; the share step prints a warning and exits non-zero on the share leg only. --share is best-effort; it never blocks local context updates.

    What you should do: run ctx connection publish later to backfill, or rely on another --share for the same entry ID. The hub deduplicates by entry ID.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#storage","level":2,"title":"Storage","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#disk-full-on-the-leader","level":3,"title":"Disk Full on the Leader","text":"

    What happens: entries.jsonl append fails. The hub rejects writes with an error and stays up for read traffic. Clients retry; followers keep their in-sync status using whatever the leader already wrote.

    What you should do: free disk or grow the volume, then nothing else; the hub resumes accepting writes on the next append attempt.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#corrupt-entriesjsonl","level":3,"title":"Corrupt entries.jsonl","text":"

    What happens: if the last line is a partial JSON write from a crash, the hub truncates it on startup and logs a warning. If any earlier line is malformed, the hub refuses to start.

    What you should do: inspect with jq -c . <data-dir>/entries.jsonl > /dev/null to find the bad line. Move the bad region to a .quarantine file, then start. Nothing is ever silently dropped.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#metajson-entriesjsonl-sequence-mismatch","level":3,"title":"meta.json / entries.jsonl Sequence Mismatch","text":"

    What happens: the hub refuses to start. This usually means someone copied one file without the other.

    What you should do: restore both files from the same backup, or accept the higher sequence by regenerating meta.json from entries.jsonl (manual for now; file a bug).

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#cluster","level":2,"title":"Cluster","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-clean-shutdown","level":3,"title":"Leader Crash, Clean Shutdown","text":"

    What happens: ctx hub stop triggers stepdown first, so a new leader is elected before the old one exits. In-flight writes drain. Clients reconnect to the new leader transparently.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-hard-fail-kill-9-power-loss","level":3,"title":"Leader Crash, Hard Fail (Kill -9, Power Loss)","text":"

    What happens: Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted but had not yet replicated can be lost. See the Raft-lite warning in the cluster recipe.

    What you should do: if you need stronger durability, run ctx connection listen on a dedicated \"collector\" project that persists entries locally as a write-ahead backup.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#split-brain-after-rejoin","level":3,"title":"Split-Brain After Rejoin","text":"

    What happens: Raft reconciles: the minority side's uncommitted writes are discarded, and the majority's log is authoritative.

    What you should do: nothing automatic. If you know the minority had important writes, grep for them in <data-dir>/entries.jsonl.rejected (written by the reconciliation pass) and replay them with ctx connection publish.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#auth-and-tokens","level":2,"title":"Auth and Tokens","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#lost-admin-token","level":3,"title":"Lost Admin Token","text":"

    What happens: you cannot register new projects.

    What you should do: retrieve it from <data-dir>/admin.token. If that file is also gone, stop the hub and regenerate. Note that all existing client tokens keep working; only new registrations need the admin token.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-admin-token","level":3,"title":"Compromised Admin Token","text":"

    What happens: anyone with the token can register new projects and publish. They cannot read existing entries without a client token for a project that subscribes.

    What you should do: rotate the admin token (regenerate <data-dir>/admin.token and restart), revoke suspicious client registrations via clients.json, and audit entries.jsonl for unexpected origins.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-client-token","level":3,"title":"Compromised Client Token","text":"

    What happens: the attacker can publish as that project and read anything that project is subscribed to. Because Origin is self-asserted on publish, the attacker can also publish entries tagged with any other project's name, so attribution in entries.jsonl cannot be trusted after a token compromise.

    What you should do: remove the client's entry from clients.json, restart the hub, and re-register the legitimate project with a fresh token. Audit entries.jsonl for entries published after the compromise timestamp and quarantine any that look suspicious; remember that Origin on those entries proves nothing.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-hub-host","level":3,"title":"Compromised Hub Host","text":"

    What happens: <data-dir>/clients.json stores client tokens verbatim (not hashed). Anyone with read access to that file has every client token in hand and can impersonate any registered project until each one is rotated.

    What you should do: treat it as a total hub compromise. Stop the hub, wipe <data-dir> (keep a forensic copy first), regenerate the admin token, and have every client re-register. See Security model for the mitigations that reduce the blast radius while the hashing follow-up is pending.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#clock-skew","level":2,"title":"Clock Skew","text":"

    Hub entries carry a timestamp assigned by the publishing client. The hub does not rewrite timestamps. Clients with significant clock skew will publish entries that look out of order in the shared feed.

    What you should do: run NTP on all client machines. If you see entries dated in the future or far past, the publisher's clock is the culprit.

    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#the-short-list","level":2,"title":"The Short List","text":"Symptom First thing to check Client can't reach hub Firewall, then ctx hub status \"No leader\" errors Cluster quorum; run ctx hub status on each peer Hub won't start after crash Last line of entries.jsonl Entries missing after restore Check clients.json sequence vs local .sync-state.json Duplicate entries in shared feed Client replayed after restore, safe (dedup by ID) Followers lagging Disk or network on the follower, not the leader","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub Operations
    • ctx Hub security model
    • HA cluster recipe
    ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub/","level":1,"title":"Hub Operations","text":"","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#ctx-hub-operations","level":1,"title":"ctx Hub: Operations","text":"

    Running the ctx ctx Hub in production. This page is for operators: people running a hub for themselves or a team, not people writing to a hub someone else is running.

    If you have not read it yet, start with the ctx Hub overview. It explains what the hub is, the two user stories it supports (personal cross-project brain vs small trusted team), and what it does not do. A client-side tour is in Getting Started.

    Operator Cheat Sheet

    • The hub fans out four entry types only: decision, learning, convention, task. Journals, scratchpad, and other local state are out of scope.
    • Identity is per-project, not per-user. Attribution is limited to Origin, which is self-asserted by the publishing client.
    • The data model is an append-only JSONL log plus two small JSON sidecar files. Nothing is rewritten in place.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#data-directory-layout","level":2,"title":"Data Directory Layout","text":"

    The hub stores everything under a single data directory (default ~/.ctx/hub-data/, override with --data-dir).

    <data-dir>/\n  admin.token        # Initial admin token (chmod 600)\n  clients.json       # Registered client tokens and project names\n  meta.json          # Sequence counter, version, cluster metadata\n  entries.jsonl      # Append-only log (single source of truth)\n  hub.pid            # Daemon PID file (daemon mode only)\n  raft/              # Raft state (cluster mode only)\n    log.db\n    stable.db\n    snapshots/\n

    Invariants:

    • entries.jsonl is append-only. Every line is a valid JSON object. Corrupt lines are fatal at startup: fix or truncate before restart.
    • meta.json is authoritative for the next sequence number. On restart, the hub reads the last valid line of entries.jsonl and refuses to start if the sequences disagree.
    • clients.json holds hashed client tokens; losing it invalidates all client registrations.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#starting-and-stopping","level":2,"title":"Starting and Stopping","text":"ForegroundDaemon
    ctx hub start                    # Ctrl-C to stop\nctx hub start --port 8080        # Custom port\nctx hub start --data-dir /srv/ctx-hub\n
    ctx hub start --daemon           # Fork to background\nctx hub stop                      # Graceful shutdown\n

    --stop sends SIGTERM to the PID in hub.pid, waits for in-flight RPCs to drain, then exits. If the daemon is wedged, remove hub.pid and send SIGKILL manually. entries.jsonl is crash-safe, so you will not lose accepted writes.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#systemd-unit","level":2,"title":"Systemd Unit","text":"

    For production single-node deployments, run the hub as a systemd service instead of --daemon:

    # /etc/systemd/system/ctx-hub.service\n[Unit]\nDescription=ctx `ctx` Hub\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nType=simple\nUser=ctx\nGroup=ctx\nExecStart=/usr/local/bin/ctx hub start --port 9900 \\\n    --data-dir /var/lib/ctx-hub\nRestart=on-failure\nRestartSec=5\nNoNewPrivileges=true\nProtectSystem=strict\nProtectHome=true\nReadWritePaths=/var/lib/ctx-hub\nPrivateTmp=true\n\n[Install]\nWantedBy=multi-user.target\n
    sudo systemctl enable --now ctx-hub\nsudo journalctl -u ctx-hub -f\n
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#backup-and-restore","level":2,"title":"Backup and Restore","text":"

    Because entries.jsonl is append-only, backups are trivial:

    # Hot backup, safe while the hub is running.\ncp <data-dir>/entries.jsonl backups/entries-$(date +%F).jsonl\ncp <data-dir>/meta.json      backups/meta-$(date +%F).json\ncp <data-dir>/clients.json   backups/clients-$(date +%F).json\n

    For a consistent snapshot across all three files, stop the hub, copy, then start again, or use a filesystem-level snapshot (LVM, ZFS, Btrfs).

    Restore:

    ctx hub stop                           # Stop the hub\ncp backups/entries-2026-04-10.jsonl <data-dir>/entries.jsonl\ncp backups/meta-2026-04-10.json      <data-dir>/meta.json\ncp backups/clients-2026-04-10.json   <data-dir>/clients.json\nctx hub start --daemon\n

    Clients that pushed sequences above the restored watermark will re-publish on the next listen reconnect, because the hub now reports a lower sequence than what clients have on disk. This is safe; the store deduplicates by entry ID.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#log-rotation","level":2,"title":"Log Rotation","text":"

    entries.jsonl grows unbounded. For long-lived hubs, rotate it offline:

    ctx hub stop\nmv <data-dir>/entries.jsonl <data-dir>/entries-$(date +%F).jsonl.old\n# Replay the last N days into a fresh entries.jsonl if you want a\n# trimmed active log, or leave the old file in place as history.\nctx hub start --daemon\n

    Do not truncate entries.jsonl while the hub is running. The hub holds an open file handle; an in-place truncation confuses the sequence counter and loses writes.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#monitoring","level":2,"title":"Monitoring","text":"

    Liveness probe:

    ctx hub status --exit-code\n

    Exit code 0 means the node is healthy (leader or in-sync follower); non-zero means degraded. Wire this into your monitoring of choice.

    For cluster deployments, watch for:

    • Role flaps: the leader changing more than once per hour suggests network instability or disk contention.
    • Replication lag: ctx hub status shows per-peer sequence offsets. Sustained lag > 100 sequences on a follower is worth investigating.
    • entries.jsonl growth rate: sudden spikes often indicate a misbehaving ctx connection listen reconnect loop.
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#upgrading","level":2,"title":"Upgrading","text":"

    The JSONL format is versioned in meta.json. ctx refuses to start against a newer store version than it understands; older store versions are upgraded in place at first start after an upgrade.

    Always back up <data-dir>/ before upgrading.

    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub failure modes
    • ctx Hub security model
    • ctx serve reference
    • ctx hub reference
    ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/integrations/","level":1,"title":"AI Tools","text":"","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#ai-tools","level":2,"title":"AI Tools","text":"

    Context works with any AI tool that can read files. This guide covers setup for popular AI coding assistants.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#claude-code-full-integration","level":2,"title":"Claude Code (Full Integration)","text":"

    Claude Code has the deepest integration via the ctx plugin.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup","level":3,"title":"Setup","text":"

    First, install ctx and initialize your project:

    ctx init\n

    Then, install the ctx plugin in Claude Code:

    # From the ctx repository\nclaude /plugin install ./internal/assets/claude\n\n# Or from the marketplace\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

    Ensure the Plugin Is Enabled

    Installing a plugin registers it, but local installs may not auto-enable it globally. Verify ~/.claude/settings.json contains:

    { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

    Without this, the plugin's hooks and skills won't appear in other projects. Running ctx init auto-enables the plugin; use --no-plugin-enable to skip this step.

    This gives you:

    Component Purpose .context/ All context files CLAUDE.md Bootstrap instructions Plugin hooks Lifecycle automation Plugin skills Agent Skills","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#how-it-works","level":3,"title":"How It Works","text":"
    graph TD\n    A[Session Start] --> B[Claude reads CLAUDE.md]\n    B --> C[PreToolUse hook runs]\n    C --> D[ctx agent loads context]\n    D --> E[Work happens]\n    E --> F[Session End]
    1. Session start: Claude reads CLAUDE.md, which tells it to check .context/
    2. First tool use: PreToolUse hook runs ctx agent and emits the context packet (subsequent invocations within the cooldown window are silent)
    3. Next session: Claude reads context files and continues with context
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#plugin-hooks","level":3,"title":"Plugin Hooks","text":"

    The ctx plugin provides lifecycle hooks implemented as Go subcommands (ctx system *):

    Hook Event Purpose ctx system context-load-gate PreToolUse (.*) Auto-inject context on first tool use ctx system block-non-path-ctx PreToolUse (Bash) Block ./ctx or go run: force $PATH install ctx system qa-reminder PreToolUse (Bash) Remind agent to lint/test before committing ctx system specs-nudge PreToolUse (EnterPlanMode) Nudge agent to use project specs when planning ctx system check-context-size UserPromptSubmit Nudge context assessment as sessions grow ctx system check-ceremonies UserPromptSubmit Nudge /ctx-remember and /ctx-wrap-up adoption ctx system check-persistence UserPromptSubmit Remind to persist learnings/decisions ctx system check-journal UserPromptSubmit Remind to export/enrich journal entries ctx system check-reminders UserPromptSubmit Relay pending reminders at session start ctx system check-version UserPromptSubmit Warn when binary/plugin versions diverge ctx system check-resources UserPromptSubmit Warn when memory/swap/disk/load hit DANGER level ctx system check-knowledge UserPromptSubmit Nudge when knowledge files grow large ctx system check-map-staleness UserPromptSubmit Nudge when ARCHITECTURE.md is stale ctx system heartbeat UserPromptSubmit Session-alive signal with prompt count metadata ctx system post-commit PostToolUse (Bash) Nudge context capture and QA after git commits

    A catch-all PreToolUse hook also runs ctx agent on every tool use (with cooldown) to autoload context.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#hook-configuration","level":3,"title":"Hook Configuration","text":"

    The plugin's hooks.json wires everything automatically: no manual configuration in settings.local.json needed:

    {\n  \"hooks\": {\n    \"PreToolUse\": [\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system context-load-gate\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system block-non-path-ctx\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system qa-reminder\" }\n        ]\n      },\n      {\n        \"matcher\": \"EnterPlanMode\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system specs-nudge\" }\n        ]\n      },\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx agent --budget 4000 2>/dev/null || true\" }\n        ]\n      }\n    ],\n    \"PostToolUse\": [\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system post-commit\" }\n        ]\n      }\n    ],\n    \"UserPromptSubmit\": [\n      {\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system check-context-size\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-ceremonies\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-persistence\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-journal\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-reminders\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-version\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-resources\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-knowledge\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-map-staleness\" },\n          { \"type\": \"command\", \"command\": \"ctx system heartbeat\" }\n        ]\n      }\n    ]\n  }\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#customizing-token-budget-and-cooldown","level":3,"title":"Customizing Token Budget and Cooldown","text":"

    Edit the PreToolUse command to change the token budget or cooldown:

    \"command\": \"ctx agent --budget 8000 --session $PPID >/dev/null || true\"\n\"command\": \"ctx agent --budget 4000 --cooldown 5m --session $PPID >/dev/null || true\"\n

    The --session $PPID flag isolates the cooldown per session: $PPID resolves to the Claude Code process PID, so concurrent sessions don't interfere. The default cooldown is 10 minutes; use --cooldown 0 to disable it.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#verifying-setup","level":3,"title":"Verifying Setup","text":"
    1. Start a new Claude Code session;
    2. Ask: \"Do you remember?\"
    3. Claude should cite specific context:
      • Current tasks from .context/TASKS.md;
      • Recent decisions or learnings;
      • Recent session history from ctx journal.
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#local-plugin-development","level":3,"title":"Local Plugin Development","text":"

    When developing ctx locally (adding skills, hooks, or changing plugin behavior), Claude Code caches the plugin by version. You must bump the version in both files and update the marketplace for changes to take effect:

    1. Bump version in both:
    2. internal/assets/claude/.claude-plugin/plugin.json (plugin manifest), .claude-plugin/marketplace.json (marketplace listing*);

    3. Update the marketplace in Claude Code:

    4. Open the Plugins UI (/plugins or Esc menu),
    5. Go to Marketplaces tab,
    6. Select the activememory-ctx Marketplace,
    7. Choose Update marketplace;

    8. Start a new Claude Code session: skill changes aren't reflected in existing sessions.

    Both Version Files Must Match

    If you only bump plugin.json but not marketplace.json (or vice versa), Claude Code may not detect the update. Always bump both together.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#troubleshooting","level":3,"title":"Troubleshooting","text":"Issue Solution Context not loading Check ctx is in PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list New skill not visible Bump version in both plugin.json files, update marketplace","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-load","level":3,"title":"Manual Context Load","text":"

    If hooks aren't working, manually load context:

    # Get context packet\nctx agent --budget 4000\n\n# Or paste into conversation\ncat .context/TASKS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#agent-skills","level":3,"title":"Agent Skills","text":"

    The ctx plugin ships Agent Skills following the agentskills.io specification.

    These are invoked in Claude Code with /skill-name.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-lifecycle-skills","level":4,"title":"Session Lifecycle Skills","text":"Skill Description /ctx-remember Recall project context at session start (ceremony) /ctx-wrap-up End-of-session context persistence (ceremony) /ctx-status Show context summary (tasks, decisions, learnings) /ctx-agent Get AI-optimized context packet /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Review session and suggest what to persist /ctx-remind Manage session-scoped reminders /ctx-pause Pause context hooks for this session /ctx-resume Resume context hooks after a pause","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-persistence-skills","level":4,"title":"Context Persistence Skills","text":"Skill Description /ctx-task-add Add a task to TASKS.md /ctx-learning-add Add a learning to LEARNINGS.md /ctx-decision-add Add a decision with context/rationale/consequence /ctx-convention-add Add a coding convention to CONVENTIONS.md /ctx-archive Archive completed tasks","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#scratchpad-skills","level":4,"title":"Scratchpad Skills","text":"Skill Description /ctx-pad Manage encrypted scratchpad entries","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-history-skills","level":4,"title":"Session History Skills","text":"Skill Description /ctx-history Browse AI session history /ctx-journal-enrich Enrich a journal entry with frontmatter/tags /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#blogging-skills","level":4,"title":"Blogging Skills","text":"

    Blogging Is a Better Way of Creating Release Notes

    The blogging workflow can also double as generating release notes:

    AI reads your git commit history and creates a \"narrative\", which is essentially what a release note is for.

    Skill Description /ctx-blog Generate blog post from recent activity /ctx-blog-changelog Generate blog post from commit range with theme","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#auditing-health-skills","level":4,"title":"Auditing & Health Skills","text":"Skill Description /ctx-doctor Troubleshoot ctx behavior with structural health checks /ctx-drift Detect and fix context drift (structural + semantic) /ctx-consolidate Merge redundant learnings or decisions into denser entries /ctx-alignment-audit Audit doc claims against playbook instructions /ctx-prompt-audit Analyze session logs for vague prompts /check-links Audit docs for dead internal and external links","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#planning-execution-skills","level":4,"title":"Planning & Execution Skills","text":"Skill Description /ctx-loop Generate a Ralph Loop iteration script /ctx-implement Execute a plan step-by-step with checks /ctx-plan-import Import Claude Code plan files into project specs /ctx-worktree Manage git worktrees for parallel agents /ctx-architecture Build and maintain architecture maps","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples","level":4,"title":"Usage Examples","text":"
    /ctx-status\n/ctx-learning-add \"Token refresh requires explicit cache invalidation\"\n/ctx-journal-enrich twinkly-stirring-kettle\n

    Skills support partial matching where applicable (e.g., session slugs).

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#cursor-ide","level":2,"title":"Cursor IDE","text":"

    Cursor can use context files through its system prompt or by reading files directly.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_1","level":3,"title":"Setup","text":"
    # Generate Cursor configuration\nctx setup cursor\n\n# Initialize context\nctx init --minimal\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration","level":3,"title":"Configuration","text":"

    Add to Cursor settings (.cursor/settings.json):

    // split to multiple lines for readability\n{\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and \n  .context/CONVENTIONS.md before responding. \n  Follow rules in .context/CONSTITUTION.md.\",\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage","level":3,"title":"Usage","text":"
    1. Open your project in Cursor
    2. Context files are available in the file tree
    3. Reference them in prompts: \"Check .context/DECISIONS.md for our approach to...\"
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-injection","level":3,"title":"Manual Context Injection","text":"

    For more control, paste context directly:

    # Get AI-ready packet\nctx agent --budget 4000 | pbcopy  # macOS\nctx agent --budget 4000 | xclip  # Linux\n

    Paste into Cursor's chat.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#aider","level":2,"title":"Aider","text":"

    Aider works well with context files through its --read flag.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_2","level":3,"title":"Setup","text":"
    # Generate Aider configuration\nctx setup aider\n\n# Initialize context\nctx init\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_1","level":3,"title":"Configuration","text":"

    Create .aider.conf.yml:

    read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_1","level":3,"title":"Usage","text":"
    # Start Aider (reads context files automatically)\naider\n\n# Or specify files explicitly\naider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#with-watch-mode","level":3,"title":"With Watch Mode","text":"

    Run ctx watch alongside Aider to capture context updates:

    # Terminal 1: Run Aider\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/aider.log\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#github-copilot","level":2,"title":"GitHub Copilot","text":"

    GitHub Copilot integrates with ctx at three levels: an automated instructions file, a VS Code Chat extension, and manual patterns.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_3","level":3,"title":"Setup","text":"
    # Initialize context\nctx init\n\n# Generate .github/copilot-instructions.md\nctx setup copilot --write\n

    The --write flag creates .github/copilot-instructions.md, which Copilot reads automatically at the start of every session. This file contains your project's constitution rules, current tasks, conventions, and architecture: giving Copilot persistent context without manual copy-paste.

    Re-run ctx setup copilot --write after updating your .context/ files to regenerate the instructions.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#vs-code-chat-extension-ctx","level":3,"title":"VS Code Chat Extension (@ctx)","text":"

    The ctx VS Code extension adds a @ctx chat participant to GitHub Copilot Chat, giving you direct access to all context commands from within the editor.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#installation","level":4,"title":"Installation","text":"
    1. Build from source (requires Node.js 18+):
    cd editors/vscode\nnpm install\nnpm run build\nnpx @vscode/vsce package\n
    1. Install the .vsix file:
    code --install-extension ctx-context-0.8.1.vsix\n
    1. Reload VS Code. Type @ctx in Copilot Chat to verify.
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#slash-commands","level":4,"title":"Slash Commands","text":"Command Description @ctx /init Initialize .context/ directory with template files @ctx /status Show context summary with token estimate @ctx /agent Print AI-ready context packet @ctx /drift Detect stale or invalid context @ctx /journal Browse and search AI session history @ctx /hook Generate AI tool integration configs @ctx /add Add a task, decision, or learning @ctx /load Output assembled context Markdown @ctx /compact Archive completed tasks and clean up @ctx /sync Reconcile context with codebase","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples_1","level":4,"title":"Usage Examples","text":"
    @ctx /init\n@ctx /status\n@ctx /add task Implement user authentication\n@ctx /drift\n@ctx /hook copilot\n@ctx /journal\n

    Typing @ctx without a command shows help with all available commands. The extension also supports natural language: asking @ctx about \"status\" or \"drift\" routes to the correct command automatically.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_2","level":4,"title":"Configuration","text":"Setting Default Description ctx.executablePathctx Path to the ctx binary. Set this if ctx is not in your PATH.","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#follow-up-suggestions","level":4,"title":"Follow-Up Suggestions","text":"

    After each command, the extension suggests relevant next steps. For example, after /init it suggests /status and /hook; after /drift it suggests /sync.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-persistence","level":3,"title":"Session Persistence","text":"

    ctx init creates a .context/sessions/ directory for storing session data from non-Claude tools. The Markdown session parser scans this directory during ctx journal, enabling session history for Copilot and other tools.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-patterns","level":3,"title":"Manual Patterns","text":"

    These patterns work without the extension, using Copilot's built-in file awareness:

    Pattern 1: Keep context files open

    Open .context/CONVENTIONS.md in a split pane. Copilot will reference it.

    Pattern 2: Reference in comments

    // See .context/CONVENTIONS.md for naming patterns\n// Following decision in .context/DECISIONS.md: Use PostgreSQL\n\nfunction getUserById(id: string) {\n  // Copilot now has context\n}\n

    Pattern 3: Paste context into Copilot Chat

    ctx agent --budget 2000\n

    Paste output into Copilot Chat for context-aware responses.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#windsurf-ide","level":2,"title":"Windsurf IDE","text":"

    Windsurf supports custom instructions and file-based context.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_4","level":3,"title":"Setup","text":"
    # Generate Windsurf configuration\nctx setup windsurf\n\n# Initialize context\nctx init\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_3","level":3,"title":"Configuration","text":"

    Add to Windsurf settings:

    // Split to multiple lines for readability\n{\n  \"ai.customInstructions\": \"Always read .context/CONSTITUTION.md first. \n  Check .context/TASKS.md for current work. \n  Follow patterns in .context/CONVENTIONS.md.\"\n}\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_2","level":3,"title":"Usage","text":"

    Context files appear in the file tree. Reference them when chatting:

    • \"What's in our task list?\" → AI reads .context/TASKS.md
    • \"What convention do we use for naming?\" → AI reads .context/CONVENTIONS.md
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#generic-integration","level":2,"title":"Generic Integration","text":"

    For any AI tool that can read files, use these patterns:

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-loading","level":3,"title":"Manual Context Loading","text":"
    # Get full context\nctx load\n\n# Get AI-optimized packet\nctx agent --budget 8000\n\n# Get specific file\ncat .context/TASKS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#system-prompt-template","level":3,"title":"System Prompt Template","text":"
    You are working on a project with persistent context in .context/\n\nBefore responding:\n1. Read .context/CONSTITUTION.md - NEVER violate these rules\n2. Check .context/TASKS.md for current work\n3. Follow .context/CONVENTIONS.md patterns\n4. Reference .context/DECISIONS.md for architectural choices\n\nWhen you learn something new, note it for .context/LEARNINGS.md\nWhen you make a decision, document it for .context/DECISIONS.md\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#automated-updates","level":3,"title":"Automated Updates","text":"

    If your AI tool outputs to a log, use ctx watch:

    # Watch log file for context-update commands\nyour-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

    The AI can emit updates like:

    <context-update type=\"complete\">implement caching</context-update>\n<context-update type=\"learning\"\n  context=\"Implementing caching layer\"\n  lesson=\"Important thing learned today\"\n  application=\"Apply this insight going forward\"\n>Caching Insight</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-update-commands","level":2,"title":"Context Update Commands","text":"

    The ctx watch command parses update commands from AI output. Use this format:

    <context-update type=\"TYPE\" [attributes]>Content</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#supported-types","level":3,"title":"Supported Types","text":"Type Target File Required Attributes task TASKS.md None decision DECISIONS.md context, rationale, consequencelearning LEARNINGS.md context, lesson, applicationconvention CONVENTIONS.md None complete TASKS.md None","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#simple-format-tasks-conventions-complete","level":3,"title":"Simple Format (Tasks, Conventions, Complete)","text":"
    <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"convention\">Use kebab-case for files</context-update>\n<context-update type=\"complete\">rate limiting</context-update>\n
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#structured-format-learnings-decisions","level":3,"title":"Structured Format (Learnings, Decisions)","text":"

    Learnings and decisions support structured attributes for better documentation:

    Learning with full structure:

    <context-update type=\"learning\"\n  context=\"Debugging Claude Code hooks\"\n  lesson=\"Hooks receive JSON via stdin, not environment variables\"\n  application=\"Parse JSON stdin with the host language (Go, Python, etc.): no jq needed\"\n>Hook Input Format</context-update>\n

    Decision with full structure:

    <context-update type=\"decision\"\n  context=\"Need a caching layer for API responses\"\n  rationale=\"Redis is fast, well-supported, and team has experience\"\n  consequence=\"Must provision Redis infrastructure; team training on Redis patterns\"\n>Use Redis for caching</context-update>\n

    Learnings require: context, lesson, application attributes. Decisions require: context, rationale, consequence attributes. Updates missing required attributes are rejected with an error.

    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#further-reading","level":2,"title":"Further Reading","text":"
    • Skills That Fight the Platform: Common pitfalls in skill design that work against the host tool
    • The Anatomy of a Skill That Works: What makes a skill reliable: the E/A/R framework and quality gates
    ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/migration/","level":1,"title":"Integration","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#adopting-ctx-in-existing-projects","level":2,"title":"Adopting ctx in Existing Projects","text":"

    Claude Code User?

    You probably want the plugin instead of this page.

    Install ctx from the marketplace: (/plugin → search \"ctx\" → Install) and you're done: hooks, skills, and updates are handled for you.

    See Getting Started for the full walkthrough.

    This guide covers adopting ctx in existing projects regardless of which tools your team uses.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#quick-paths","level":2,"title":"Quick Paths","text":"You have... Command What happens Nothing (greenfield) ctx init Creates .context/, CLAUDE.md, permissions Existing CLAUDE.mdctx init --merge Backs up your file, inserts ctx block after the H1 Existing CLAUDE.md + ctx markers ctx init --force Replaces the ctx block, leaves your content intact .cursorrules / .aider.conf.ymlctx initctx ignores those files: they coexist cleanly Team repo, first adopter ctx init --merge && git add .context/ CLAUDE.md Initialize and commit for the team","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-claudemd","level":2,"title":"Existing CLAUDE.md","text":"

    This is the most common scenario:

    You have a CLAUDE.md with project-specific instructions and don't want to lose them.

    You Own CLAUDE.md

    After initialization, CLAUDE.md is yours: edit it freely.

    Add project instructions, remove sections you don't need, reorganize as you see fit.

    The only part ctx manages is the block between the <!-- ctx:context --> and <!-- ctx:end --> markers; everything outside those markers is yours to change at any time.

    If you remove the markers, nothing breaks: ctx simply treats the file as having no ctx content and will offer to merge again on the next ctx init.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-ctx-init-does","level":3,"title":"What ctx init Does","text":"

    When ctx init detects an existing CLAUDE.md, it checks for ctx markers (<!-- ctx:context --> ... <!-- ctx:end -->):

    State Default behavior With --merge With --force No CLAUDE.md Creates from template Creates from template Creates from template Exists, no ctx markers Prompts to merge Auto-merges (no prompt) Auto-merges (no prompt) Exists, has ctx markers Skips (already set up) Skips Replaces the ctx block only","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-merge-flag","level":3,"title":"The --merge Flag","text":"

    --merge auto-merges without prompting. The merge process:

    1. Backs up your existing CLAUDE.md to CLAUDE.md.<timestamp>.bak;
    2. Finds the H1 heading (e.g., # My Project) in your file;
    3. Inserts the ctx block immediately after it;
    4. Preserves everything else untouched.

    Your content before and after the ctx block remains exactly as it was.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#before-after-example","level":3,"title":"Before / After Example","text":"

    Before: your existing CLAUDE.md:

    # My Project\n\n## Build Commands\n\n-`npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

    After ctx init --merge:

    # My Project\n\n<!-- ctx:context -->\n<!-- DO NOT REMOVE: This marker indicates ctx-managed content -->\n\n## IMPORTANT: You Have Persistent Memory\n\nThis project uses Context (`ctx`) for context persistence across sessions.\n...\n\n<!-- ctx:end -->\n\n## Build Commands\n\n- `npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

    Your build commands and code style sections are untouched. The ctx block sits between markers and can be updated independently.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-force-flag","level":3,"title":"The --force Flag","text":"

    If your CLAUDE.md already has ctx markers (from a previous ctx init), the default behavior is to skip it. Use --force to replace the ctx block with the latest template: This is useful after upgrading ctx:

    ctx init --force\n

    This only replaces content between <!-- ctx:context --> and <!-- ctx:end -->. Your own content outside the markers is preserved. A timestamped backup is created before any changes.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#undoing-a-merge","level":3,"title":"Undoing a Merge","text":"

    Every merge creates a backup:

    $ ls CLAUDE.md*.bak\nCLAUDE.md.1738000000.bak\n

    To restore:

    cp CLAUDE.md.1738000000.bak CLAUDE.md\n

    Or if you are using git, simply:

    git checkout CLAUDE.md\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-cursorrules-aider-copilot","level":2,"title":"Existing .cursorrules / Aider / Copilot","text":"

    ctx doesn't touch tool-specific config files. It creates its own files (.context/, CLAUDE.md) and coexists with whatever you already have.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-does-ctx-create","level":3,"title":"What Does ctx Create?","text":"ctx creates ctx does NOT touch .context/ directory .cursorrulesCLAUDE.md (or merges into) .aider.conf.yml.claude/settings.local.json (seeded by ctx init; the plugin manages hooks and skills) .github/copilot-instructions.md.windsurfrules Any other tool-specific config

    Claude Code hooks and skills are provided by the ctx plugin, installed from the Claude Code marketplace (/plugin → search \"ctx\" → Install).

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#running-ctx-alongside-other-tools","level":3,"title":"Running ctx Alongside Other Tools","text":"

    The .context/ directory is the source of truth. Tool-specific configs point to it:

    • Cursor: Reference .context/ files in your system prompt (see Cursor setup)
    • Aider: Add .context/ files to the read: list in .aider.conf.yml (see Aider setup)
    • Copilot: Keep .context/ files open or reference them in comments (see Copilot setup)

    You can generate a tool-specific configuration with:

    ctx setup cursor    # Generate Cursor config snippet\nctx setup aider     # Generate .aider.conf.yml\nctx setup copilot   # Generate Copilot tips\nctx setup windsurf  # Generate Windsurf config\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#migrating-content-into-context","level":3,"title":"Migrating Content into .context/","text":"

    If you have project knowledge scattered across .cursorrules or custom prompt files, consider migrating it:

    1. Rules / invariants → .context/CONSTITUTION.md
    2. Code patterns → .context/CONVENTIONS.md
    3. Architecture notes → .context/ARCHITECTURE.md
    4. Known issues / tips → .context/LEARNINGS.md

    You don't need to delete the originals: ctx and tool-specific files can coexist. But centralizing in .context/ means every tool gets the same context.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#team-adoption","level":2,"title":"Team Adoption","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#context-is-designed-to-be-committed","level":3,"title":".context/ Is Designed to Be Committed","text":"

    The context files (tasks, decisions, learnings, conventions, architecture) are meant to live in version control. However, some subdirectories are personal or sensitive and should not be committed.

    ctx init automatically adds these .gitignore entries:

    # Journals contain full session transcripts: personal, potentially large\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Runtime state and logs (ephemeral, machine-specific):\n.context/state/\n.context/logs/\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

    With those in place, committing is straightforward:

    # One person initializes\nctx init --merge\n\n# Commit context files (journals and keys are already gitignored)\ngit add .context/ CLAUDE.md\ngit commit -m \"Add ctx context management\"\ngit push\n

    Teammates pull and immediately have context. No per-developer setup needed.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-about-claude","level":3,"title":"What about .claude/?","text":"

    The .claude/ directory contains permissions that ctx init seeds. Hooks and skills are provided by the ctx plugin (not per-project files).

    File Commit? Why .claude/settings.local.json No Machine-specific, accumulates session permissions .claude/settings.golden.json Yes Curated permission snapshot (via ctx permission snapshot)","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#merge-conflicts-in-context-files","level":3,"title":"Merge Conflicts in Context Files","text":"

    Context files are plain Markdown. Resolve conflicts the same way you would for any other documentation file:

    # After a conflicting pull\ngit diff .context/TASKS.md    # See both sides\n# Edit to keep both sets of tasks, then:\ngit add .context/TASKS.md\ngit commit\n

    Common conflict scenarios:

    • TASKS.md: Two people added tasks: Keep both.
    • DECISIONS.md: Same decision recorded differently: Unify the entry.
    • LEARNINGS.md: Parallel discoveries: Keep both, remove duplicates.
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#gradual-adoption","level":3,"title":"Gradual Adoption","text":"

    You don't need the whole team to switch at once:

    1. One person runs ctx init --merge and commits;
    2. CLAUDE.md instructions work immediately for Claude Code users;
    3. Other tool users can adopt at their own pace using ctx setup <tool>;
    4. Context files benefit everyone who reads them, even without tool integration.
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verifying-it-worked","level":2,"title":"Verifying It Worked","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#check-status","level":3,"title":"Check Status","text":"
    ctx status\n

    You should see your context files listed with token counts and no warnings.

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#test-memory","level":3,"title":"Test Memory","text":"

    Start a new AI session and ask: \"Do you remember?\"

    The AI should cite specific context:

    • Current tasks from .context/TASKS.md;
    • Recent decisions or learnings;
    • Session history (if you've had prior sessions);

    If it responds with generic \"I don't have memory\", check that ctx is in your PATH (which ctx) and that hooks are configured (see Troubleshooting).

    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verify-the-merge","level":3,"title":"Verify the Merge","text":"

    If you used --merge, check that your original content is intact:

    # Your original content should still be there\ncat CLAUDE.md\n\n# The ctx block should be between markers\ngrep -c \"ctx:context\" CLAUDE.md  # Should print 1\ngrep -c \"ctx:end\" CLAUDE.md      # Should print 1\n
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#further-reading","level":2,"title":"Further Reading","text":"
    • Getting Started: Full setup walkthrough
    • Context Files: What each .context/ file does
    • Integrations: Per-tool setup (Claude Code, Cursor, Aider, Copilot)
    • CLI Reference: All ctx commands and flags
    ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/release/","level":1,"title":"Cutting a Release","text":"

    Full Release Checklist

    This page covers the mechanics of cutting a release (bump, tag, push). For the complete pre-release ceremony (audits, tests, verification, and post-release steps), see the Release Checklist runbook.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#prerequisites","level":2,"title":"Prerequisites","text":"

    Before you can cut a release you need:

    • Push access to origin (GitHub)
    • GPG signing configured (make gpg-test)
    • Go installed (version in go.mod)
    • Zensical installed (make site-setup)
    • A clean working tree (git status shows nothing to commit)
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#step-by-step","level":2,"title":"Step-by-Step","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#1-update-the-version-file","level":3,"title":"1. Update the VERSION File","text":"
    echo \"0.9.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.9.0\"\n

    The VERSION file uses bare semver (0.9.0), no v prefix. The release script adds the v prefix for git tags.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#2-generate-release-notes","level":3,"title":"2. Generate Release Notes","text":"

    In Claude Code:

    /_ctx-release-notes\n

    This analyzes commits since the last tag and writes dist/RELEASE_NOTES.md. The release script refuses to proceed without this file.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#3-verify-docs-and-commit-any-remaining-changes","level":3,"title":"3. Verify Docs and Commit Any Remaining Changes","text":"
    /ctx-link-check    # audit docs for dead links\nmake audit          # full check: fmt, vet, lint, style, test\ngit status          # must be clean\n
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#4-run-the-release","level":3,"title":"4. Run the Release","text":"
    make release\n

    Or, if you are in a Claude Code session:

    /_ctx-release\n

    The release script does everything in order:

    Step What happens 1 Reads VERSION, verifies release notes exist 2 Verifies working tree is clean 3 Updates version in 4 config files (plugin.json, marketplace.json, VS Code package.json + lock) 4 Updates download URLs in 3 doc files (index.md, getting-started.md, integrations.md) 5 Adds new row to versions.md 6 Rebuilds the documentation site (make site) 7 Commits all version and docs updates 8 Runs make test and make smoke 9 Builds binaries for all 6 platforms via hack/build-all.sh 10 Creates a signed git tag (v0.9.0) 11 Pushes the tag to origin 12 Updates and pushes the latest tag","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#5-github-ci-takes-over","level":3,"title":"5. GitHub CI Takes Over","text":"

    Pushing a v* tag triggers .github/workflows/release.yml:

    1. Checks out the tagged commit
    2. Runs the full test suite
    3. Builds binaries for all platforms
    4. Creates a GitHub Release with auto-generated notes
    5. Uploads binaries and SHA256 checksums
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#6-verify","level":3,"title":"6. Verify","text":"
    • GitHub Releases shows the new version
    • All 6 binaries are attached (linux/darwin x amd64/arm64, windows x amd64)
    • SHA256 files are attached
    • Release notes look correct
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#what-gets-updated-automatically","level":2,"title":"What Gets Updated Automatically","text":"

    The release script updates 8 files so you do not have to:

    File What changes internal/assets/claude/.claude-plugin/plugin.json Plugin version .claude-plugin/marketplace.json Marketplace version (2 fields) editors/vscode/package.json VS Code extension version editors/vscode/package-lock.json VS Code lock version (2 fields) docs/index.md Download URLs docs/home/getting-started.md Download URLs docs/operations/integrations.md VSIX filename version docs/reference/versions.md New version row + latest pointer

    The Go binary version is injected at build time via -ldflags from the VERSION file. No source file needs editing.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#build-targets-reference","level":2,"title":"Build Targets Reference","text":"Target What it does make release Full release (script + tag + push) make build Build binary for current platform make build-all Build all 6 platform binaries make test Unit tests make smoke Integration smoke tests make audit Full check (fmt + vet + lint + drift + docs + test) make site Rebuild documentation site","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#release-notes-not-found","level":3,"title":"\"Release Notes Not Found\"","text":"
    ERROR: dist/RELEASE_NOTES.md not found.\n

    Run /_ctx-release-notes in Claude Code first, or write dist/RELEASE_NOTES.md manually.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#working-tree-is-not-clean","level":3,"title":"\"Working Tree Is Not Clean\"","text":"
    ERROR: Working tree is not clean.\n

    Commit or stash all changes before running make release.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#tag-already-exists","level":3,"title":"\"Tag Already Exists\"","text":"
    ERROR: Tag v0.9.0 already exists.\n

    You cannot release the same version twice. Either bump VERSION to a new version, or delete the old tag if the release was incomplete:

    git tag -d v0.9.0\ngit push origin :refs/tags/v0.9.0\n
    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#ci-build-fails-after-tag-push","level":3,"title":"CI Build Fails After Tag Push","text":"

    The tag is already published. Fix the issue, bump to a patch version (e.g. 0.9.1), and release again. Do not force-push tags that others may have already fetched.

    ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/upgrading/","level":1,"title":"Upgrade","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade","level":2,"title":"Upgrade","text":"

    New versions of ctx may ship updated permissions, CLAUDE.md directives, or plugin hooks and skills.

    Claude Code User?

    The marketplace can update skills, hooks, and prompts independently: /plugin → select ctx → Update now (or enable auto-update).

    The ctx binary is separate: rebuild from source or download a new release when one is available, then run ctx init --force --merge. Knowledge files are preserved automatically.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#tldr","level":2,"title":"TL:DR","text":"
    # Plugin users (Claude Code)\n# /plugin → select ctx → Update now\n# Then update the binary and reinitialize:\nctx init --force --merge\n\n# From-source / manual users\n# install new ctx binary, then:\nctx init --force --merge\n# /plugin → select ctx → Update now   (if using Claude Code)\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-changes-between-versions","level":2,"title":"What Changes between Versions","text":"

    ctx init generates two categories of files:

    Category Examples Changes between versions? Infrastructure .claude/settings.local.json (permissions), ctx-managed sections in CLAUDE.md, ctx plugin (hooks + skills) Yes Knowledge .context/TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md, ARCHITECTURE.md, GLOSSARY.md, CONSTITUTION.md, AGENT_PLAYBOOK.md No: this is your data

    Infrastructure is regenerated by ctx init and plugin updates. Knowledge files are yours and should never be overwritten.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade-steps","level":2,"title":"Upgrade Steps","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#1-install-the-new-version","level":3,"title":"1. Install the New Version","text":"

    Build from source or download the binary:

    cd /path/to/ctx-source\ngit pull\nmake build\nsudo make install\nctx --version   # verify\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#2-reinitialize","level":3,"title":"2. Reinitialize","text":"
    ctx init --force --merge\n
    • --force regenerates infrastructure files (permissions, ctx-managed sections in CLAUDE.md).
    • --merge preserves your content outside ctx markers.

    Knowledge files (.context/TASKS.md, DECISIONS.md, etc.) are preserved automatically: ctx init only overwrites infrastructure, never your data.

    Encryption key: The encryption key lives at ~/.ctx/.ctx.key (outside the project). Reinit does not affect it. If you have a legacy key at .context/.ctx.key or ~/.local/ctx/keys/, copy it manually (see Syncing Scratchpad Notes).

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#3-update-the-ctx-plugin","level":3,"title":"3. Update the ctx Plugin","text":"

    If you use Claude Code, update the plugin to get new hooks and skills:

    1. Open /plugin in Claude Code.
    2. Select ctx.
    3. Click Update now.

    Or enable auto-update so the plugin stays current without manual steps.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#4-review-custom-settings","level":3,"title":"4. Review Custom Settings","text":"

    If you added custom permissions to .claude/settings.local.json beyond what ctx init provides, diff and merge:

    diff .claude.bak/settings.local.json .claude/settings.local.json\n

    Manually add back any custom entries that the new init dropped.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#5-verify","level":3,"title":"5. Verify","text":"
    ctx status          # context files intact\nctx drift           # no broken references\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#6-clean-up","level":3,"title":"6. Clean Up","text":"

    If you made manual backups, remove them once satisfied:

    rm -rf .context.bak .claude.bak CLAUDE.md.bak\n
    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-if-i-skip-the-upgrade","level":2,"title":"What If I Skip the Upgrade?","text":"

    The old binary still works with your existing .context/ files. But you may miss:

    • New plugin hooks that enforce better practices or catch mistakes;
    • Updated skill prompts that produce better results;
    • New .gitignore entries for directories added in newer versions;
    • Bug fixes in the CLI itself.

    The plugin and the binary can be updated independently. You can update the plugin (for new hooks/skills) even if you stay on an older binary, and vice versa.

    Context files are plain Markdown: They never break between versions.

    The surrounding infrastructure is what evolves.

    ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/","level":1,"title":"Architecture Exploration","text":"","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#architecture-exploration","level":1,"title":"Architecture Exploration","text":"

    Systematically build architecture documentation across one or more repositories using ctx skills. Each invocation does one unit of work; a simple loop drives the agent through all phases.

    When to use: When onboarding to a new codebase, performing architecture reviews, or building up .context/ documentation across a workspace of repos.

    Prerequisites: ctx installed, repos cloned under a shared workspace directory (e.g., ~/WORKSPACE/).

    Companion skills:

    • /ctx-architecture: structural baseline and principal analysis
    • /ctx-architecture-enrich: code intelligence enrichment via GitNexus
    • /ctx-architecture-failure-analysis: adversarial failure analysis
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#overview","level":2,"title":"Overview","text":"

    The agent progresses through phases per repo, depth-first:

    Phase Skill What it does bootstrapctx init + /ctx-architecture Initialize context and build structural baseline principal/ctx-architecture principal Deep analysis: vision, bottlenecks, alternatives enriched/ctx-architecture-enrich Quantify with code intelligence (blast radius, flows) frontier-N/ctx-architecture (re-run) Explore unexplored areas found in convergence report lens-*/ctx-architecture with lens Focused exploration through conceptual lenses

    Exploration stops when convergence >= 0.85, frontier runs plateau, or all lenses are exhausted.

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#setup","level":2,"title":"Setup","text":"

    Create a tracking directory in your workspace root:

    cd ~/WORKSPACE\nmkdir -p .arch-explorer\n

    Create .arch-explorer/manifest.json listing your repos:

    {\n  \"repos\": [\"ctx\", \"portal\", \"infra\"],\n  \"current_repo_index\": 0,\n  \"progress\": {}\n}\n

    Create .arch-explorer/run-log.md (empty, the agent appends to it).

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#prompt","level":2,"title":"Prompt","text":"

    Save this as .arch-explorer/PROMPT.md and invoke with your agent. The prompt is self-contained: the agent reads the manifest, picks the next unit of work, executes it, updates tracking, and stops.

    You are an autonomous architecture exploration agent. Your job is to\nsystematically build and evolve architecture documentation across all\nrepositories in this workspace using ctx skills.\n\n## Execution Protocol\n\n### Step 1: Read State\n\nRead `.arch-explorer/manifest.json`. This tells you:\n- Which repos exist and their order\n- What has been done per repo (`progress` object)\n- Which repo to work on next (`current_repo_index`)\n\n### Step 2: Pick the Next Unit of Work\n\n**Strategy: depth-first, sequential.**\n\nFind the current repo (by `current_repo_index`). Determine its next\nphase from the progression below. If all phases are exhausted for this\nrepo (convergence score >= 0.85 or 3+ frontier runs with no new\nfindings), advance `current_repo_index` and pick the next repo.\n\n### Phase Progression (per repo)\n\nEach repo progresses through these phases in order:\n\n| Phase | Skill | Prerequisite |\n|-------|-------|-------------|\n| `bootstrap` | `ctx init` + `/ctx-architecture` | None |\n| `principal` | `/ctx-architecture principal` | bootstrap done |\n| `enriched` | `/ctx-architecture-enrich` | principal done, GitNexus indexed |\n| `frontier-N` | `/ctx-architecture` (re-run) | enriched done |\n\n**`bootstrap` is a single composite unit:** `ctx init` followed by\nstructural analysis. This is the ONLY phase that combines two actions.\nNo other phase may chain actions.\n\n**Frontier runs** are numbered: `frontier-1`, `frontier-2`, etc.\nEach frontier run reads CONVERGENCE-REPORT.md and picks unexplored\nareas. The skill handles this automatically.\n\nAfter the third frontier run OR when convergence >= 0.85, apply\n**conceptual lenses** (one per run):\n\n| Lens | Focus Areas |\n|------|-------------|\n| `security` | Auth flows, input validation, secrets, attack surfaces, trust boundaries |\n| `performance` | Hot paths, caching, concurrency, resource lifecycle, allocation patterns |\n| `stability` | Error handling, retries, graceful degradation, circuit breakers, timeouts |\n| `observability` | Logging, metrics, tracing, alerting, debugging affordances |\n| `data-integrity` | Storage, serialization, migrations, consistency, backup, recovery |\n\nFor lens runs, prepend the lens context as an explicit instruction to\nthe skill invocation:\n\n> \"Focus exploration on security: auth flows, input validation, secrets,\n> attack surfaces, trust boundaries.\"\n\nDo NOT wait for the skill to ask what to explore. Provide the lens\nfocus as input upfront.\n\n### Step 3: Do the Work\n\n1. `cd` into the repo directory (`~/WORKSPACE/<repo-name>`)\n2. If phase is `bootstrap`:\n    - Run `ctx init`, confirm `.context/` exists\n    - Then run `/ctx-architecture` (structural baseline)\n3. If phase is `principal` or `frontier-*`:\n    - Run `/ctx-architecture` (add `principal` argument for principal phase)\n    - The skill will read existing artifacts and build on them\n4. If phase is `enriched`:\n    - Verify GitNexus is connected: call `mcp__gitnexus__list_repos`\n    - Success = non-empty list returned with no error\n    - If GitNexus unavailable, log as `enriched-skipped` and advance\n      to `frontier-1`\n    - Run `/ctx-architecture-enrich`\n5. If phase is a lens run (`lens-security`, etc.):\n    - Run `/ctx-architecture` with lens focus prepended as instruction\n      (see lens table above for exact wording)\n\n### Step 4: Extract Results\n\nAfter the skill completes, gather:\n\n- **Convergence score**: from `map-tracking.json`, computed as:\n  average of all module `confidence` values (0.0-1.0). If\n  `map-tracking.json` is missing or has no confidence values,\n  record `null` and log a warning.\n- **Frontier count**: from CONVERGENCE-REPORT.md, count the number\n  of listed unexplored areas. If CONVERGENCE-REPORT.md is missing,\n  record `frontier_count: null` and log a warning. Treat missing\n  as \"exploration should continue\" (do not stall).\n- **Key findings**: 2-3 bullet points of what was discovered or\n  changed in this run (new modules mapped, danger zones found, etc.)\n- **New artifacts**: list any new files created in `.context/`\n\n### Step 5: Update Tracking\n\nUpdate `.arch-explorer/manifest.json`:\n\n```json\n{\n  \"progress\": {\n    \"ctx\": {\n      \"phases_completed\": [\"bootstrap\", \"principal\"],\n      \"current_phase\": \"enriched\",\n      \"lenses_explored\": [],\n      \"last_run\": \"2026-04-07T14:00:00Z\",\n      \"convergence_score\": 0.72,\n      \"frontier_count\": 3,\n      \"total_runs\": 2,\n      \"findings_summary\": \"14 modules mapped, 3 danger zones, 2 extension points\"\n    }\n  }\n}\n```\n\nAppend to `.arch-explorer/run-log.md`:\n\n```markdown\n## 2026-04-07T14:00:00Z / ctx / principal\n\n**Phase:** principal\n**Convergence:** 0.45 -> 0.72\n**Frontiers remaining:** 3\n**Key findings:**\n- Identified CLI dispatch as primary bottleneck (fan-out to 12 subsystems)\n- Security: context files readable by any process (no access control)\n- Strategic recommendation: extract context engine into library package\n\n**Artifacts updated:** ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md, map-tracking.json\n```\n\n### Step 6: Report and Stop\n\nPrint this exact format as the FINAL output of the invocation:\n\n```\n[arch-explorer] DONE\n  repo: ctx\n  phase: principal\n  convergence: 0.72\n  frontiers: 3\n  runs_on_repo: 3\n  next: ctx / enriched\n```\n\nThe `[arch-explorer] DONE` line is the terminal marker. After printing\nit, produce no further output. Execution is complete.\n\n## Rules\n\n1. **One unit per invocation.** The only composite unit is `bootstrap`\n   (init + structural). All other phases are exactly one skill run.\n2. **Additive only.** Never delete or overwrite existing artifacts.\n   The skills already handle incremental updates.\n3. **No duplicated work.** Read manifest before acting. If a phase is\n   already recorded as completed, skip it.\n4. **Log everything.** Every run gets a run-log entry, even failures\n   and skips.\n5. **Fail gracefully.** If a skill fails (missing GitNexus, broken repo,\n   etc.), log the failure with reason and advance to the next phase or\n   repo. Don't retry in the same invocation.\n6. **Respect ctx conventions.** Each repo gets its own `.context/`\n   directory. Never write architecture artifacts outside `.context/`.\n\n## Stopping Logic\n\nA repo is considered \"explored\" when ANY of these is true:\n- Convergence score >= 0.85 (from map-tracking.json)\n- 3+ frontier runs produced no new findings (frontier_count unchanged\n  across consecutive runs)\n- All 5 lenses have been applied\n- Convergence score is `null` after 3 attempts (artifacts aren't being\n  generated properly; log warning and move on)\n\nWhen a repo is explored, advance `current_repo_index` in the manifest.\n\n## When All Repos Are Done\n\nWhen every repo has reached its stopping condition, print:\n\n```\n[arch-explorer] ALL DONE\n  - ctx: 0.92 convergence, 8 runs, 5 lenses\n  - portal: 0.87 convergence, 6 runs, 3 lenses\n  ...\n```\n
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#invocation","level":2,"title":"Invocation","text":"

    Single run (safest for quota):

    cd ~/WORKSPACE\nclaude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n

    Batch of N runs:

    cd ~/WORKSPACE\nfor i in $(seq 1 5); do\n  claude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n  echo \"--- Run $i complete ---\"\ndone\n

    Resume after interruption:

    Just run again. The manifest tracks state; the agent picks up where it left off.

    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#tips","level":2,"title":"Tips","text":"
    • Start small: list 1-2 repos in the manifest first. Add more once you're confident in the output quality.
    • GitNexus is optional: the enrichment phase is skipped gracefully if GitNexus isn't connected. You still get structural and principal analysis.
    • Review between batches: check the run-log and generated artifacts between batch runs. The agent is additive-only, but early course correction saves wasted runs.
    • Lens runs are the payoff: the first three phases build the map; lens runs find the interesting things (security gaps, performance cliffs, stability risks).
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#history","level":2,"title":"History","text":"
    • 2026-04-07: Original prompt created as hack/agents/architecture-explorer.md.
    • 2026-04-16: Moved to docs as a runbook for discoverability.
    ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/","level":1,"title":"Breaking Migration","text":"","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#breaking-migration-guide","level":1,"title":"Breaking Migration Guide","text":"

    Template for upgrading across breaking CLI renames or behavior changes. Use this as a starting point when writing migration notes for a specific release, or hand it to your agent as context for generating release-specific guidance.

    When to use: When a release includes breaking changes (command renames, removed flags, changed defaults) that require user action.

    Companion: Upgrade guide covers the general upgrade flow. This runbook covers the breaking-change specifics.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-1-identify-what-changed","level":2,"title":"Step 1: Identify What Changed","text":"

    Ask your agent to diff the CLI surface between the old and new version:

    Compare the CLI command surface between the previous release tag\nand HEAD. For each change, categorize as: renamed, removed,\nnew, or changed-behavior. Include old and new command signatures.\n

    Or use the /_ctx-command-audit skill after the rename.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-2-regenerate-infrastructure","level":2,"title":"Step 2: Regenerate Infrastructure","text":"
    # Install the new binary\nmake build && sudo make install\n\n# Regenerate CLAUDE.md and permissions\nctx init --force --merge\n

    --merge preserves your knowledge files (TASKS.md, DECISIONS.md, etc.) while regenerating infrastructure (permissions, CLAUDE.md managed sections).

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-3-update-the-plugin","level":2,"title":"Step 3: Update the Plugin","text":"
    /plugin -> select ctx -> Update now\n

    Or, if using a local clone:

    make plugin-reload\n# restart Claude Code\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-4-update-personal-scripts","level":2,"title":"Step 4: Update Personal Scripts","text":"

    Search your scripts and aliases for old command names:

    # Example: find references to old command names\ngrep -r \"ctx old-command\" ~/scripts/ ~/.zshrc ~/.bashrc\n

    Replace with the new names per the changelog.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-5-update-hook-configs","level":2,"title":"Step 5: Update Hook Configs","text":"

    If you have custom hooks in .claude/settings.local.json that reference ctx commands, update them:

    jq '.hooks' .claude/settings.local.json | grep \"ctx \"\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
    ctx status          # context files intact\nctx drift           # no broken references\nmake test           # if you're a contributor\n
    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#writing-release-specific-migration-notes","level":2,"title":"Writing Release-Specific Migration Notes","text":"

    When preparing a release with breaking changes, create a section in the release notes using this template:

    ## Breaking Changes\n\n### `old-command` renamed to `new-command`\n\n**What changed**: `ctx old-command` is now `ctx new-command`.\nThe old name is removed (no deprecation alias).\n\n**Action required**:\n1. Run `ctx init --force --merge` to update CLAUDE.md\n2. Update any scripts referencing `ctx old-command`\n3. Update hook configs if applicable\n\n**Why**: [brief rationale for the rename]\n

    Repeat for each breaking change. Users should be able to follow the notes mechanically without needing to understand the codebase.

    ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/codebase-audit/","level":1,"title":"Codebase Audit","text":"","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#codebase-audit","level":1,"title":"Codebase Audit","text":"

    A structured audit of the codebase: dead code, magic strings, documentation drift, security surface, and roadmap opportunities.

    When to run: Before a release, after a long YOLO sprint, quarterly, or when planning the next phase of work.

    Time: ~15-30 minutes with a team of agents.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#how-to-use-this-runbook","level":2,"title":"How to Use This Runbook","text":"

    Start a Claude Code session with a clean git state (git stash or commit first). Paste or adapt the prompt below. The agent does the analysis; you triage the findings.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#prompt","level":2,"title":"Prompt","text":"
    I want you to create an agent team to audit this codebase. Save each report as\na separate markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable: every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (session mining)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (godoc + inline)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check package-level comments match\npackage names. Output: drift items ranked by severity with exact file:line refs.\n\n### 3. Maintainability\nLook for: functions >80 lines that have logical split points; switch blocks\nwith >5 cases that could be table-driven or extracted; inline comments that\nsay \"step 1\", \"step 2\" or similar (sign the block wants to be a function);\nfiles with >400 lines; packages with flat structure that could benefit from\nsub-packages; functions that seem misplaced in their file. Do NOT flag\nthings that are fine as-is just because they could theoretically be different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app: focus on CLI-relevant attack surface, not web OWASP:\nfile path traversal (does user input flow into file paths unsanitized?),\ncommand injection (does user input flow into exec calls?), symlink following\n(does the tool follow symlinks when writing to .context/?), permission\nhandling (are file permissions set correctly?), sensitive data in outputs\n(do any commands leak secrets or session content?). Output: findings with\nseverity ratings and exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git log,\nrecent session discussions, and DECISIONS.md for story arcs worth writing\nabout. Suggest 3-5 blog post themes with: title, angle, target audience,\nkey commits/sessions to reference, and a 2-sentence pitch. Prioritize\nthemes that build a coherent narrative across posts.\n\n### 6. Roadmap & Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses:\nwhat are the highest-value improvements? Consider: user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with effort/impact estimates (not time estimates).\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and any user docs. Suggest improvements\nstructured as use-case pages: the problem, how ctx solves it, typical\nworkflow, gotchas. Identify gaps where a user would get stuck without\nreading source code. Output: list of documentation gaps and suggested\npage outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each: team composition (roles, agent types),\ntask distribution strategy, coordination approach, and which types of work\nit suits. Ground suggestions in actual project patterns, not generic advice.\n
    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#tips","level":2,"title":"Tips","text":"
    • Clean state matters: the prompt says \"no code changes\" but accidents happen. Start from a clean git state so you can git checkout . if needed.

    • Adjust scope: drop analyses you don't need. Analyses 1-4 are the most actionable. Analyses 5-8 are planning/creative and can be skipped if you just want a technical audit.

    • Reports feed TASKS.md: after the audit, read each report and create tasks in the appropriate Phase section. The reports are input, not output.

    • ideas/ is gitignored: reports saved there won't be committed. Move specific findings to TASKS.md, DECISIONS.md, or LEARNINGS.md to persist them.

    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#history","level":2,"title":"History","text":"
    • 2026-02-08: Original prompt created after a codebase audit sprint.
    • 2026-02-17: Improved with read-only agents, report structure template, CLI-scoped security review, and maintainability thresholds.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/","level":1,"title":"Docs Semantic Audit","text":"","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#documentation-semantic-audit","level":1,"title":"Documentation Semantic Audit","text":"

    Find structural problems that linters and link checkers cannot: weak pages that should be merged, heavy pages that should be split, missing cross-links, and narrative arcs that don't land.

    When to run: Before a release, after adding several new pages, when the site feels sprawling, or when you suspect narrative gaps.

    Time: ~20-40 minutes with an agent session.

    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#why-this-is-a-runbook","level":2,"title":"Why This Is a Runbook","text":"

    These judgments are inherently subjective and context-dependent. A page is \"weak\" relative to its neighbors; a narrative arc only matters if the docs intend to tell a story. Deterministic tools (broken-link checkers, word counters) can't do this. An LLM reading the full doc set can.

    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#prompt","level":2,"title":"Prompt","text":"

    Paste or adapt the following into a Claude Code session. The agent needs read access to docs/ and the site nav structure.

    Read every file under docs/ (including docs/blog/ and docs/recipes/).\nFor each file, note: title, word count, outbound links, inbound links\n(how many other pages link to it), and a one-line summary of its purpose.\n\nThen produce a report with these sections:\n\n## 1. Weak Dangling Pages\n\nPages that are thin, isolated, or redundant. Signs:\n- Under ~300 words with no unique content (just restates what another page says)\n- Zero or one inbound links (orphaned in the nav)\n- Content that would be stronger merged into an adjacent page\n- \"Try it in 5 minutes\" sections that assume installation already happened\n- Pages whose title doesn't work as a nav entry (too long, too vague)\n\nFor each: identify the page, explain why it's weak, and recommend\nmerge target or deletion.\n\n## 2. Overly Heavy Pages\n\nPages doing too much. Signs:\n- Over ~1500 words with multiple distinct topics\n- More than 4 H2 sections that could stand alone\n- Reader has to scroll past irrelevant content to find what they need\n- Mixed audience (beginner setup + advanced config on same page)\n\nFor each: identify the page, list the distinct topics, and suggest\nsplit points.\n\n## 3. Missing Cross-Links\n\nPlaces where a reader would naturally want to jump to related content\nbut no link exists. Look for:\n- Concepts mentioned but not linked (e.g., \"scratchpad\" without linking\n  to the scratchpad page)\n- Blog posts that describe features without linking to the reference docs\n- Recipes that reference workflows without linking to the relevant\n  getting-started section\n- Pages that end without a \"Next Up\" or \"See Also\" pointer\n\nFor each: source page, anchor text, suggested link target.\n\n## 4. Narrative Gaps\n\nThe docs should tell a coherent story: problem -> install -> first session\n-> daily workflow -> advanced patterns -> contributing. Look for:\n- Gaps in the progression (e.g., no bridge from \"first session\" to\n  \"daily habits\")\n- Blog posts that introduce concepts the reference docs don't cover\n- Recipes that assume knowledge no other page teaches\n- Features documented in CLI reference but missing from workflows/recipes\n\nFor each: describe the gap and suggest what page or section would fill it.\n\n## 5. Blog Cross-Linking Opportunities\n\nBlog posts are often written in isolation. Look for:\n- Posts that cover the same theme but don't reference each other\n- Posts that describe the evolution of a feature (natural \"part 1 / part 2\")\n- Posts that would benefit from a \"Related posts\" footer\n- Thematic clusters that could be linked from a recipe or reference page\n\nFor each: list the posts, the shared theme, and the suggested links.\n\n## Output Format\n\nFor every finding, include:\n- File path (docs/whatever.md)\n- Severity: high (actively confusing), medium (missed opportunity),\n  low (nice to have)\n- Concrete recommendation (merge into X, split at H2 Y, add link to Z)\n\nEnd with a prioritized action list: what to fix first.\n
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#after-the-audit","level":2,"title":"After the Audit","text":"
    1. Triage findings: not everything needs fixing. Focus on high severity.
    2. Merge weak pages first: fewer pages is almost always better.
    3. Add cross-links: cheapest improvement, highest reader impact.
    4. File split decisions in DECISIONS.md: page splits are architectural.
    5. Regenerate the site and spot-check nav after structural changes.
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#history","level":2,"title":"History","text":"
    • 2026-02-17: Created after merging docs/re-explaining.md into docs/about.md, which surfaced the pattern of weak standalone pages that dilute rather than add.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/hub-deployment/","level":1,"title":"Hub Deployment","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#hub-deployment","level":1,"title":"Hub Deployment","text":"

    Linear runbook for setting up a ctx Hub for yourself or a team. Consolidates pieces currently scattered across hub recipes and operations docs.

    When to use: First-time hub setup, or when onboarding a new team onto an existing hub.

    Prerequisites: ctx binary installed, network connectivity between hub and clients.

    Companion docs:

    • Hub overview: what the hub is and is not
    • Hub operations: data directory, systemd, backup, monitoring
    • Hub failure modes: what can go wrong
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"Quick Start (foreground)Production (systemd)
    ctx hub start\n

    See Hub Operations: Systemd Unit for the full unit file.

    sudo systemctl enable --now ctx-hub\n

    The hub creates admin.token on first start. Save this token; it is the only way to register clients.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-2-generate-the-admin-token","level":2,"title":"Step 2: Generate the Admin Token","text":"

    On first start, the hub writes admin.token to the data directory (default ~/.ctx/hub-data/):

    cat ~/.ctx/hub-data/admin.token\n

    This token has full admin privileges. Keep it secret.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-3-register-clients","level":2,"title":"Step 3: Register Clients","text":"

    For each client (person or machine) that will connect:

    # On the hub machine\nctx hub register --name \"volkan-laptop\" --admin-token <admin-token>\n

    This returns a client token. Distribute it securely to the client.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-4-connect-clients","level":2,"title":"Step 4: Connect Clients","text":"

    On each client machine:

    ctx connect <hub-address> --token <client-token>\n

    Verify the connection:

    ctx connection status\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-5-verify-sync","level":2,"title":"Step 5: Verify Sync","text":"

    Push a test entry from one client and verify it arrives:

    # Client A\nctx add learning \"Hub sync test\" --context \"Verifying hub setup\"\n\n# Client B (after a moment)\nctx status   # should show the new learning\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-6-configure-backup","level":2,"title":"Step 6: Configure Backup","text":"

    Set up regular backups of the hub data directory. See Hub Operations: Backup and Restore.

    Minimum:

    # Add to cron\n0 */6 * * * cp ~/.ctx/hub-data/entries.jsonl ~/backups/entries-$(date +\\%F).jsonl\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-7-configure-tls-when-available","level":2,"title":"Step 7: Configure TLS (When Available)","text":"

    Coming Soon

    TLS support is planned (H-01/H-02). Until then, run the hub on a trusted network or behind a reverse proxy with TLS termination.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#team-onboarding-checklist","level":2,"title":"Team Onboarding Checklist","text":"

    When adding a new team member to an existing hub:

    • Generate a client token (ctx hub register --name \"<name>\")
    • Share the token and hub address securely
    • Have them run ctx connect <hub-address> --token <token>
    • Verify with ctx connection status
    • Point them to the Hub Getting Started recipe
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#connection-refused","level":3,"title":"\"Connection Refused\"","text":"

    The hub isn't running or the port is wrong. Check:

    ctx hub status          # on the hub machine\nss -tlnp | grep 9900   # default port\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#authentication-failed","level":3,"title":"\"Authentication Failed\"","text":"

    The client token is wrong or was never registered. Re-register:

    ctx hub register --name \"<name>\" --admin-token <admin-token>\n
    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#entries-not-syncing","level":3,"title":"Entries Not Syncing","text":"

    Check that the client is listening:

    ctx connection status\n

    If connected but not syncing, check the hub logs for sequence mismatch errors. See Hub Failure Modes for details.

    ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/new-contributor/","level":1,"title":"New Contributor","text":"","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#new-contributor-onboarding","level":1,"title":"New Contributor Onboarding","text":"

    Step-by-step onboarding sequence for new contributors. Consolidates setup instructions currently scattered across the README, contributing guide, and setup docs.

    When to use: First-time contributor setup, or when verifying your development environment after a major upgrade.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-1-clone-the-repository","level":2,"title":"Step 1: Clone the Repository","text":"
    git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n

    Or fork first on GitHub, then clone your fork.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-2-initialize-context","level":2,"title":"Step 2: Initialize Context","text":"
    ctx init\n

    This creates the .context/ directory with knowledge files and the .claude/ directory with agent configuration. If ctx is not yet installed, proceed to Step 3 first, then come back.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-3-build-and-install","level":2,"title":"Step 3: Build and Install","text":"
    make build\nsudo make install\n

    Verify:

    ctx --version\n
    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-4-install-the-plugin-claude-code-users","level":2,"title":"Step 4: Install the Plugin (Claude Code Users)","text":"

    If you use Claude Code, install the plugin from your local clone so skills and hooks reflect your working tree:

    1. Launch claude
    2. Type /plugin and press Enter
    3. Select Marketplaces -> Add Marketplace
    4. Enter the absolute path to your clone (e.g., ~/WORKSPACE/ctx)
    5. Back in /plugin, select Install and choose ctx

    Verify:

    claude /plugin list   # should show ctx\n

    See Contributing: Install the Plugin for details on cache clearing.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-5-switch-to-dev-profile","level":2,"title":"Step 5: Switch to Dev Profile","text":"
    ctx config switch dev\n

    This enables verbose logging and notify events (useful during development).

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-6-verify-hooks","level":2,"title":"Step 6: Verify Hooks","text":"

    Start a Claude Code session and check that hooks fire:

    claude\n

    You should see ctx session hooks (ceremonies reminder, context loading) on session start. If not, check that the plugin is installed correctly (Step 4).

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-7-run-your-first-session","level":2,"title":"Step 7: Run Your First Session","text":"

    In Claude Code:

    /ctx-status\n

    This should show context file health, active tasks, and recent decisions. If it works, your setup is complete.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-8-verify-context-persistence","level":2,"title":"Step 8: Verify Context Persistence","text":"

    End the session and start a new one:

    /ctx-remember\n

    The agent should recall what happened in the previous session. This confirms that context persistence is working end-to-end.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-9-run-tests","level":2,"title":"Step 9: Run Tests","text":"
    make test     # unit tests\nmake audit    # full check: fmt + vet + lint + drift + docs + test\n

    All tests should pass with a clean clone.

    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#quick-reference","level":2,"title":"Quick Reference","text":"Task Command Build make build Install sudo make install Test make test Full audit make audit Rebuild docs site make site Serve docs locally make site-serve Clear plugin cache make plugin-reload Switch config profile ctx config switch dev","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#next-steps","level":2,"title":"Next Steps","text":"
    • Read the contributing guide for project layout, code style, and PR process
    • Check TASKS.md for open work items
    • Ask /ctx-next for suggested work
    ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/plugin-release/","level":1,"title":"Plugin Release","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#plugin-release","level":1,"title":"Plugin Release","text":"

    Plugin-specific release procedure. The general release checklist covers the full ctx release; this runbook covers the plugin-specific steps that are not part of that flow.

    When to use: When releasing plugin changes (new skills, hook updates, permission changes) independently of a ctx binary release, or as a sub-procedure within the full release.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#what-ships-in-the-plugin","level":2,"title":"What Ships in the Plugin","text":"

    The plugin lives at internal/assets/claude/ and includes:

    Component Path What it does Skills internal/assets/claude/skills/ User-facing /ctx-* slash commands Hooks internal/assets/claude/hooks/ Pre/post tool-use hooks Plugin manifest internal/assets/claude/.claude-plugin/plugin.json Declares skills, hooks, version Marketplace .claude-plugin/marketplace.json Points Claude Code to the plugin","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-1-update-hooksjson-if-hooks-changed","level":2,"title":"Step 1: Update hooks.json (If Hooks Changed)","text":"

    If you added, removed, or modified hooks:

    # Verify hook definitions match implementations\nmake audit\n

    Check that plugin.json lists all hooks correctly. Missing hooks silently fail to fire.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-2-bump-version","level":2,"title":"Step 2: Bump Version","text":"

    Update the version in three places:

    • internal/assets/claude/.claude-plugin/plugin.json
    • .claude-plugin/marketplace.json (two fields)
    • editors/vscode/package.json + package-lock.json (if VS Code extension is affected)

    The Release Script Does This

    If you're running make release, the script bumps these automatically from VERSION. Only bump manually if you're releasing the plugin independently.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-3-test-against-a-fresh-install","level":2,"title":"Step 3: Test Against a Fresh Install","text":"
    # Clear cached plugin\nmake plugin-reload\n\n# Restart Claude Code, then:\nclaude /plugin list    # verify version\n

    Test the critical paths:

    • /ctx-status works
    • Session hooks fire (ceremonies, context loading)
    • At least one user-facing skill works end-to-end
    • Pre-tool-use hooks block when they should
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-4-test-against-a-clean-project","level":2,"title":"Step 4: Test Against a Clean Project","text":"

    Create a temporary project to verify the plugin works outside the ctx repo:

    mkdir /tmp/test-ctx-plugin && cd /tmp/test-ctx-plugin\ngit init\nctx init\nclaude   # start a session, verify hooks fire\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-5-verify-skill-count","level":2,"title":"Step 5: Verify Skill Count","text":"

    The plugin manifest declares all user-invocable skills. Verify the count matches:

    # Count skills in plugin.json\njq '.skills | length' internal/assets/claude/.claude-plugin/plugin.json\n\n# Count skill directories\nls -d internal/assets/claude/skills/ctx-*/ | wc -l\n

    These numbers should match (some skills are not user-invocable and won't appear in both counts).

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-6-commit-and-tag","level":2,"title":"Step 6: Commit and Tag","text":"

    If releasing independently of a binary release:

    git add internal/assets/claude/ .claude-plugin/\ngit commit -m \"chore: release plugin v0.X.Y\"\ngit tag plugin-v0.X.Y\ngit push origin main --tags\n

    If part of a full release, the release checklist handles this.

    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#skills-dont-appear-after-update","level":3,"title":"Skills Don't Appear After Update","text":"

    Claude Code caches plugin files aggressively:

    make plugin-reload    # clears cache\n# restart Claude Code\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#hooks-dont-fire","level":3,"title":"Hooks Don't Fire","text":"

    Check that the hook is registered in plugin.json and that the command it calls exists:

    jq '.hooks' internal/assets/claude/.claude-plugin/plugin.json\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#version-mismatch","level":3,"title":"Version Mismatch","text":"

    If claude /plugin list shows an old version after updating:

    make plugin-reload\n# restart Claude Code\nclaude /plugin list   # should show new version\n
    ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/release-checklist/","level":1,"title":"Release Checklist","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release-checklist","level":1,"title":"Release Checklist","text":"

    The canonical pre-release sequence. This runbook ties together the audits, tests, and release steps that are otherwise scattered across docs and the operator's head.

    When to run: Before every release. No exceptions.

    Companion: The /_ctx-release skill automates the tag-and-push portion; this checklist covers everything before and after that automation.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#pre-release","level":2,"title":"Pre-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#1-run-the-codebase-audit","level":3,"title":"1. Run the Codebase Audit","text":"

    Use the codebase audit runbook prompt with your agent. Focus on analyses 1-4 (extractable patterns, documentation drift, maintainability, security). Triage findings into TASKS.md; anything blocking ships before the release.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#2-run-the-docs-semantic-audit","level":3,"title":"2. Run the Docs Semantic Audit","text":"

    Use the docs semantic audit runbook prompt. Fix high-severity findings (weak pages, broken narrative arcs). Medium-severity items can be deferred.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#3-sanitize-permissions","level":3,"title":"3. Sanitize Permissions","text":"

    Follow the sanitize permissions runbook. Clean up .claude/settings.local.json before it gets committed as part of the release.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#4-run-the-full-test-suite","level":3,"title":"4. Run the Full Test Suite","text":"
    make audit    # fmt + vet + lint + drift + docs + test\nmake smoke    # integration smoke tests\n

    All tests must pass. No exceptions.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#5-check-context-health","level":3,"title":"5. Check Context Health","text":"
    ctx drift          # broken references, stale patterns\nctx status         # context file health\n/ctx-link-check    # dead links in docs\n

    Fix anything flagged.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#6-review-tasksmd","level":3,"title":"6. Review TASKS.md","text":"

    Scan for incomplete tasks tagged as release-blocking. Either finish them or explicitly defer with a reason in the task note.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release","level":2,"title":"Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#7-bump-version","level":3,"title":"7. Bump Version","text":"
    echo \"0.X.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.X.0\"\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#8-generate-release-notes","level":3,"title":"8. Generate Release Notes","text":"

    In Claude Code:

    /_ctx-release-notes\n

    Review dist/RELEASE_NOTES.md. Ensure it captures all user-visible changes.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#9-cut-the-release","level":3,"title":"9. Cut the Release","text":"
    make release\n

    Or in Claude Code: /_ctx-release. See Cutting a Release for the full step-by-step.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#post-release","level":2,"title":"Post-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#10-verify-the-github-release","level":3,"title":"10. Verify the GitHub Release","text":"
    • GitHub Releases shows the new version
    • All 6 binaries are attached
    • SHA256 checksums are attached
    • Release notes render correctly
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#11-update-the-plugin-marketplace","level":3,"title":"11. Update the Plugin Marketplace","text":"

    If the plugin version changed, verify the marketplace entry:

    claude /plugin list   # shows updated version\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#12-announce","level":3,"title":"12. Announce","text":"

    Post in the project's communication channels. Reference the release notes.

    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#13-clean-up","level":3,"title":"13. Clean Up","text":"
    rm dist/RELEASE_NOTES.md   # consumed by the release script\ngit stash pop              # if you stashed earlier\n
    ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/","level":1,"title":"Sanitize Permissions","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#sanitize-permissions","level":1,"title":"Sanitize Permissions","text":"

    Manual procedure for cleaning up .claude/settings.local.json. The agent may analyze and recommend, but you make every edit.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#why-manual-not-automated","level":2,"title":"Why Manual, Not Automated","text":"

    settings.local.json controls what the agent can do without asking. An agent that can edit its own permission file is a self-escalation vector, especially if the skill is auto-accepted. Keep this manual.

    When to run: After busy sessions where you clicked \"Allow\" many times, weekly hygiene (pair with ctx drift), or before committing .claude/settings.local.json.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-1-snapshot","level":2,"title":"Step 1: Snapshot","text":"
    cp .claude/settings.local.json /tmp/settings-backup-$(date +%Y%m%d).json\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-2-extract-the-allow-list","level":2,"title":"Step 2: Extract the Allow List","text":"
    jq '.permissions.allow[]' .claude/settings.local.json | sort\n

    Eyeball it. You're looking for four categories:

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-3-identify-problems","level":2,"title":"Step 3: Identify Problems","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#a-garbage-nonsense","level":3,"title":"A. Garbage / Nonsense","text":"

    Entries that are clearly broken or meaningless:

    Bash(done)\nBash(__NEW_LINE_aa838494a90279c4__ echo \"\")\n

    Action: Delete.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#b-one-off-commands-session-debris","level":3,"title":"B. One-Off Commands (Session Debris)","text":"

    Entries with hardcoded paths, literal arguments, or exact commands that were accepted during a specific debugging session:

    Bash(git -C /home/jose/WORKSPACE/ctx log --oneline --all -20)\nBash(/home/jose/WORKSPACE/ctx/ctx add decision \"Use PostgreSQL\" --context ...)\n

    Signs of a one-off:

    • Full absolute paths to specific files
    • Literal string arguments (not wildcards)
    • Very specific flag combinations
    • Commands that look like they came from a single task

    Action: Delete unless you want to promote to a wildcard pattern.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#c-subsumed-entries-redundant","level":3,"title":"C. Subsumed Entries (Redundant)","text":"

    A narrow entry that's already covered by a broader one:

    # Narrow (redundant):\nBash(ctx journal source)\nBash(git -C /home/jose/WORKSPACE/ctx log --oneline -5)\n\n# Broad (already covers the above):\nBash(ctx journal source:*)\nBash(git -C:*)\n

    To find these, look for entries where removing the specific args would match an existing wildcard entry.

    Action: Delete the narrow entry.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#d-duplicate-intent-different-spelling","level":3,"title":"D. Duplicate Intent, Different Spelling","text":"

    Same command with env vars in different order, or slight variations:

    Bash(CGO_ENABLED=0 CTX_SKIP_PATH_CHECK=1 go test:*)\nBash(CTX_SKIP_PATH_CHECK=1 CGO_ENABLED=0 go test:*)\n

    Action: Keep one, delete the other.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-4-check-for-security-concerns","level":2,"title":"Step 4: Check for Security Concerns","text":"

    While you're in here, also flag:

    Pattern Risk Bash(git push:*) Bypasses block-git-push.sh hook Bash(rm -rf:*) Recursive delete, no confirmation Bash(sudo:*) Privilege escalation Bash(echo:*), Bash(cat:*) Can compose into writes to sensitive files Bash(curl:*), Bash(wget:*) Arbitrary network access Any write to .claude/ paths Agent self-modification

    See the /ctx-permission-sanitize skill for the full threat matrix.

    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-5-edit","level":2,"title":"Step 5: Edit","text":"

    Edit .claude/settings.local.json directly in your editor. Remove flagged entries. Keep the JSON valid.

    # Validate JSON after editing\njq . .claude/settings.local.json > /dev/null && echo \"valid\" || echo \"BROKEN\"\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
    # Compare before/after\ndiff /tmp/settings-backup-$(date +%Y%m%d).json .claude/settings.local.json\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-7-optionally-commit","level":2,"title":"Step 7: Optionally Commit","text":"
    git add .claude/settings.local.json\ngit commit -m \"chore: sanitize agent permissions\"\n
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#asking-the-agent-for-help","level":2,"title":"Asking the Agent for Help","text":"

    You can safely ask the agent to analyze the file:

    \"Look at my settings.local.json and tell me which permissions look like one-offs or are redundant.\"

    The agent can read and report. You do the edits.

    Do not add these to your allow list:

    • Skill(ctx-permission-sanitize)
    • Edit(.claude/settings.local.json)
    • Any Bash(...) pattern that writes to .claude/
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#history","level":2,"title":"History","text":"
    • 2026-02-15: Created as manual-only procedure after deciding against a self-modifying skill.
    • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
    ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"recipes/","level":1,"title":"Recipes","text":"

    Workflow recipes combining ctx commands and skills to solve specific problems.

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#getting-started","level":2,"title":"Getting Started","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#guide-your-agent","level":3,"title":"Guide Your Agent","text":"

    How commands, skills, and conversational patterns work together. Train your agent to be proactive through ask, guide, reinforce.

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#setup-across-ai-tools","level":3,"title":"Setup across AI Tools","text":"

    Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf. Includes shell completion, watch mode for non-native tools, and verification.

    Uses: ctx init, ctx setup, ctx agent, ctx completion, ctx watch

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#multilingual-session-parsing","level":3,"title":"Multilingual Session Parsing","text":"

    Parse session journal entries written in other languages. Configure recognized session-header prefixes so the journal pipeline works for Turkish, Japanese, and any other locale.

    Uses: ctx journal source, ctx journal import, session_prefixes in .ctxrc

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#keeping-context-in-a-separate-repo","level":3,"title":"Keeping Context in a Separate Repo","text":"

    Store context files outside the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or multi-repo setups.

    Uses: ctx init, --context-dir, --allow-outside-cwd, .ctxrc, /ctx-status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#sessions","level":2,"title":"Sessions","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#the-complete-session","level":3,"title":"The Complete Session","text":"

    Walk through a full ctx session from start to finish:

    • Loading context,
    • Picking what to work on,
    • Committing with context,
    • Capturing, reflecting, and saving a snapshot.

    Uses: ctx status, ctx agent, /ctx-remember, /ctx-next, /ctx-commit, /ctx-reflect

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-ceremonies","level":3,"title":"Session Ceremonies","text":"

    The two bookend rituals for every session: /ctx-remember at the start to load and confirm context, /ctx-wrap-up at the end to review the session and persist learnings, decisions, and tasks.

    Uses: /ctx-remember, /ctx-wrap-up, /ctx-commit, ctx agent, ctx add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#browsing-and-enriching-past-sessions","level":3,"title":"Browsing and Enriching Past Sessions","text":"

    Export your AI session history to a browsable journal site. Enrich entries with metadata and search across months of work.

    Uses: ctx journal source/import, ctx journal site, ctx journal obsidian, ctx serve, /ctx-history, /ctx-journal-enrich, /ctx-journal-enrich-all

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-reminders","level":3,"title":"Session Reminders","text":"

    Leave a message for your next session. Reminders surface automatically at session start and repeat until dismissed. Date-gate reminders to surface only after a specific date.

    Uses: ctx remind, ctx remind list, ctx remind dismiss, ctx system check-reminders

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#reviewing-session-changes","level":3,"title":"Reviewing Session Changes","text":"

    See what moved since your last session: context file edits, code commits, directories touched. Auto-detects session boundaries from state markers.

    Uses: ctx change, ctx agent, ctx status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#pausing-context-hooks","level":3,"title":"Pausing Context Hooks","text":"

    Silence all nudge hooks for a quick task that doesn't need ceremony overhead. Session-scoped: Other sessions are unaffected. Security hooks still fire.

    Uses: ctx hook pause, ctx hook resume, /ctx-pause, /ctx-resume

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#knowledge-and-tasks","level":2,"title":"Knowledge and Tasks","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#persisting-decisions-learnings-and-conventions","level":3,"title":"Persisting Decisions, Learnings, and Conventions","text":"

    Record architectural decisions with rationale, capture gotchas and lessons learned, and codify conventions so they survive across sessions and team members.

    Uses: ctx add decision, ctx add learning, ctx add convention, ctx decision reindex, ctx learning reindex, /ctx-decision-add, /ctx-learning-add, /ctx-convention-add, /ctx-reflect

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#tracking-work-across-sessions","level":3,"title":"Tracking Work across Sessions","text":"

    Add, prioritize, complete, snapshot, and archive tasks. Keep TASKS.md focused as your project evolves across dozens of sessions.

    Uses: ctx add task, ctx task complete, ctx task archive, ctx task snapshot, /ctx-task-add, /ctx-archive, /ctx-next

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#using-the-scratchpad","level":3,"title":"Using the Scratchpad","text":"

    Use the encrypted scratchpad for quick notes, working memory, and sensitive values during AI sessions. Natural language in, encrypted storage out.

    Uses: ctx pad, /ctx-pad, ctx pad show, ctx pad edit

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#syncing-scratchpad-notes-across-machines","level":3,"title":"Syncing Scratchpad Notes across Machines","text":"

    Distribute your scratchpad encryption key, push and pull encrypted notes via git, and resolve merge conflicts when two machines edit simultaneously.

    Uses: ctx init, ctx pad, ctx pad resolve, scp

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#bridging-claude-code-auto-memory","level":3,"title":"Bridging Claude Code Auto Memory","text":"

    Mirror Claude Code's auto memory (MEMORY.md) into .context/ for version control, portability, and drift detection. Import entries into structured context files with heuristic classification.

    Uses: ctx memory sync, ctx memory status, ctx memory diff, ctx memory import, ctx memory publish, ctx system check-memory-drift

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hooks-and-notifications","level":2,"title":"Hooks and Notifications","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-output-patterns","level":3,"title":"Hook Output Patterns","text":"

    Choose the right output pattern for your Claude Code hooks: VERBATIM relay for user-facing reminders, hard gates for invariants, agent directives for nudges, and five more patterns across the spectrum.

    Uses: ctx plugin hooks, settings.local.json

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#customizing-hook-messages","level":3,"title":"Customizing Hook Messages","text":"

    Customize what hooks say without changing what they do. Override the QA gate for Python (pytest instead of make lint), silence noisy ceremony nudges, or tailor post-commit instructions for your stack.

    Uses: ctx hook message list, ctx hook message show, ctx hook message edit, ctx hook message reset

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-sequence-diagrams","level":3,"title":"Hook Sequence Diagrams","text":"

    Mermaid sequence diagrams for every system hook: entry conditions, state reads, output, throttling, and exit points. Includes throttling summary table and state file reference.

    Uses: All ctx system hooks

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#auditing-system-hooks","level":3,"title":"Auditing System Hooks","text":"

    The 12 system hooks that run invisibly during every session: what each one does, why it exists, and how to verify they're actually firing. Covers webhook-based audit trails, log inspection, and detecting silent hook failures.

    Uses: ctx system, ctx hook notify, .context/logs/, .ctxrc notify.events

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

    Get push notifications when loops complete, hooks fire, or agents hit milestones. Webhook URL is encrypted: never stored in plaintext. Works with IFTTT, Slack, Discord, ntfy.sh, or any HTTP endpoint.

    Uses: ctx hook notify setup, ctx hook notify test, ctx hook notify --event, .ctxrc notify.events

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

    Switch between dev and base runtime configurations without editing .ctxrc by hand. Verbose logging and webhooks for debugging, clean defaults for normal sessions.

    Uses: ctx config switch, ctx config status, /ctx-config

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#maintenance","level":2,"title":"Maintenance","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#detecting-and-fixing-drift","level":3,"title":"Detecting and Fixing Drift","text":"

    Keep context files accurate by detecting structural drift (stale paths, missing files, stale file ages) and task staleness.

    Uses: ctx drift, ctx sync, ctx compact, ctx status, /ctx-drift, /ctx-status, /ctx-prompt-audit

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#state-directory-maintenance","level":3,"title":"State Directory Maintenance","text":"

    Clean up session tombstones from .context/state/. Prune old per-session files, identify stale global markers, and keep the state directory lean.

    Uses: ctx prune

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#troubleshooting","level":3,"title":"Troubleshooting","text":"

    Diagnose hook failures, noisy nudges, stale context, and configuration issues. Start with ctx doctor for a structural health check, then use /ctx-doctor for agent-driven analysis of event patterns.

    Uses: ctx doctor, ctx hook event, /ctx-doctor

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#claude-code-permission-hygiene","level":3,"title":"Claude Code Permission Hygiene","text":"

    Keep .claude/settings.local.json clean: recommended safe defaults, what to never pre-approve, and a maintenance workflow for cleaning up session debris.

    Uses: ctx init, /ctx-drift, /ctx-permission-sanitize, ctx permission snapshot, ctx permission restore

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#permission-snapshots","level":3,"title":"Permission Snapshots","text":"

    Capture a known-good permission baseline as a golden image, then restore at session start to automatically drop session-accumulated permissions.

    Uses: ctx permission snapshot, ctx permission restore, /ctx-permission-sanitize

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#turning-activity-into-content","level":3,"title":"Turning Activity into Content","text":"

    Generate blog posts from project activity, write changelog posts from commit ranges, and publish a browsable journal site from your session history.

    The output is generic Markdown, but the skills are tuned for the ctx-style blog artifacts you see on this website.

    Uses: ctx journal site, ctx journal obsidian, ctx serve, ctx journal import, /ctx-blog, /ctx-blog-changelog, /ctx-journal-enrich

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#importing-claude-code-plans","level":3,"title":"Importing Claude Code Plans","text":"

    Import Claude Code plan files (~/.claude/plans/*.md) into specs/ as permanent project specs. Filter by date, select interactively, and optionally create tasks referencing each imported spec.

    Uses: /ctx-plan-import, /ctx-task-add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#design-before-coding","level":3,"title":"Design Before Coding","text":"

    Front-load design with a four-skill chain: brainstorm the approach, spec the design, task the work, implement step-by-step. Each step produces an artifact that feeds the next.

    Uses: /ctx-brainstorm, /ctx-spec, /ctx-task-add, /ctx-implement, /ctx-decision-add

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#agents-and-automation","level":2,"title":"Agents and Automation","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#building-project-skills","level":3,"title":"Building Project Skills","text":"

    Encode repeating workflows into reusable skills the agent loads automatically. Covers the full cycle: identify a pattern, create the skill, test with realistic prompts, and iterate until it triggers correctly.

    Uses: /ctx-skill-create, ctx init

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#running-an-unattended-ai-agent","level":3,"title":"Running an Unattended AI Agent","text":"

    Set up a loop where an AI agent works through tasks overnight without you at the keyboard, using ctx for persistent memory between iterations.

    This recipe shows how ctx supports long-running agent loops without losing context or intent.

    Uses: ctx init, ctx loop, ctx watch, ctx load, /ctx-loop, /ctx-implement

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#when-to-use-a-team-of-agents","level":3,"title":"When to Use a Team of Agents","text":"

    Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

    This recipe covers the file overlap test, when teams make things worse, and what ctx provides at each level.

    Uses: /ctx-worktree, /ctx-next, ctx status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#parallel-agent-development-with-git-worktrees","level":3,"title":"Parallel Agent Development with Git Worktrees","text":"

    Split a large backlog across 3-4 agents using git worktrees, each on its own branch and working directory. Group tasks by file overlap, work in parallel, merge back.

    Uses: /ctx-worktree, /ctx-next, git worktree, git merge

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#architecture-deep-dive","level":3,"title":"Architecture Deep Dive","text":"

    Three-pass pipeline for understanding a codebase: map what exists, enrich with code intelligence, then hunt for where it will silently fail. Produces architecture docs, quantified dependency data, and ranked failure hypotheses.

    Uses: /ctx-architecture, /ctx-architecture-enrich, /ctx-architecture-failure-analysis

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#writing-steering-files","level":3,"title":"Writing Steering Files","text":"

    Tell your AI assistant how to behave with rule-based prompt injection that fires automatically when prompts match a description. Walks through scaffolding a steering file, previewing matches, and syncing to each AI tool's native format.

    Uses: ctx steering add, ctx steering preview, ctx steering list, ctx steering sync

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#authoring-lifecycle-triggers","level":3,"title":"Authoring Lifecycle Triggers","text":"

    Run executable shell scripts at session-start, pre-tool-use, file-save, and other lifecycle events. Script-based automation (complementary to steering's rule-based prompts), with a security-first workflow: scaffold disabled, test with mock input, enable only after review.

    Uses: ctx trigger add, ctx trigger test, ctx trigger enable, ctx trigger disable, ctx trigger list

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#hub","level":2,"title":"Hub","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hub-overview","level":3,"title":"Hub Overview","text":"

    Mental model and three user stories for the ctx Hub. What flows, what doesn't, and when not to use it. Read this before any of the other Hub recipes.

    Uses: ctx hub, ctx connection, ctx add --share

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-getting-started","level":3,"title":"ctx Hub: Getting Started","text":"

    Stand up a single-node hub on localhost, register two projects, publish a decision from one, and watch it appear in the other. End-to-end in under five minutes.

    Uses: ctx hub start, ctx connection register, ctx connection subscribe, ctx connection sync, ctx connection listen, ctx add --share, ctx agent --include-hub

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

    Story 1 day-to-day workflow: one developer, many projects, one hub on localhost. Records a learning in project A, watches it show up automatically in project B. Walks through a realistic day of using the hub as passive infrastructure (no manual sync, no git push, no ceremony).

    Uses: ctx add --share, ctx connection subscribe, ctx agent --include-hub

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#team-knowledge-bus","level":3,"title":"Team Knowledge Bus","text":"

    Story 2 day-to-day workflow: a small trusted team sharing decisions, learnings, and conventions via a hub on an internal server. Covers the team publishing culture, what belongs on the hub vs. local, token management, and the social rules that make a shared knowledge stream stay signal-rich.

    Uses: ctx add --share, ctx connection status, ctx connection subscribe, ctx hub status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-multi-machine","level":3,"title":"ctx Hub: Multi-Machine","text":"

    Run the hub on a LAN host as a daemon and connect from project directories on other workstations. Firewall guidance, TLS via a reverse proxy, and safe daemon restart semantics.

    Uses: ctx hub start --daemon, ctx hub stop, ctx connection register, ctx connection status

    ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-ha-cluster","level":3,"title":"ctx Hub: HA Cluster","text":"

    Raft-based leader election across three or more nodes for redundancy. Covers bootstrap, runtime peer management, graceful stepdown, and the Raft-lite durability caveat.

    Uses: ctx hub start --peers, ctx hub status, ctx hub peer add/remove, ctx hub stepdown

    ","path":["Recipes"],"tags":[]},{"location":"recipes/architecture-deep-dive/","level":1,"title":"Architecture Deep Dive","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-problem","level":2,"title":"The Problem","text":"

    Understanding a codebase at the surface level is easy. Understanding where it will break under real-world conditions takes three passes: mapping what exists, quantifying how it connects, and hunting for where it silently fails. Most teams stop at the first pass.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tldr","level":2,"title":"TL;DR","text":"
    # Pass 1: Map the system\n/ctx-architecture\n\n# Pass 2: Enrich with code intelligence\n/ctx-architecture-enrich\n\n# Pass 3: Hunt for failure modes\n/ctx-architecture-failure-analysis\n

    Each pass builds on the previous one. Run them in order. The output accumulates in .context/; each pass reads the prior artifacts and extends them.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-architecture Skill Map modules, dependencies, data flow, patterns /ctx-architecture-enrich Skill Verify blast radius and flows with code intel /ctx-architecture-failure-analysis Skill Generate falsifiable incident hypotheses ctx drift CLI Detect stale paths and broken references ctx status CLI Quick structural overview","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-1-map-what-exists","level":3,"title":"Pass 1: Map What Exists","text":"
    /ctx-architecture\n

    Produces:

    • ARCHITECTURE.md: succinct project map (< 4000 tokens), loaded at every session start
    • DETAILED_DESIGN*.md: deep per-module reference with exported API, data flow, danger zones, extension points
    • CHEAT-SHEETS.md: lifecycle flow diagrams
    • map-tracking.json: coverage state with confidence scores

    This pass forces deep code reading. No shortcuts, no code intelligence tools; the agent reads every module it analyzes. That forced reading is what makes the subsequent passes useful.

    When to run: First time on a codebase, or after significant structural changes (new packages, moved files, changed dependencies).

    Principal mode: Add principal to get strategic analysis (ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md from P4):

    /ctx-architecture principal\n
    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-2-enrich-with-code-intelligence","level":3,"title":"Pass 2: Enrich with Code Intelligence","text":"
    /ctx-architecture-enrich\n

    Takes the Pass 1 artifacts as baseline and layers on verified, graph-backed data from GitNexus:

    • Blast radius numbers for key functions
    • Execution flow traces through hot paths
    • Domain clustering validation
    • Registration site discovery

    This pass does not replace reading; it quantifies what reading found. If Pass 1 says \"module X depends on module Y,\" Pass 2 says \"module X has 47 callers in module Y, and changing function Z would affect 12 downstream consumers.\"

    When to run: After Pass 1, when you need quantified confidence for refactoring decisions or risk assessment.

    Requires: GitNexus MCP server connected.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-3-hunt-for-failure-modes","level":3,"title":"Pass 3: Hunt for Failure Modes","text":"
    /ctx-architecture-failure-analysis\n

    The adversarial pass. Reads all prior artifacts, then systematically hunts for correctness bugs across 9 failure categories:

    1. Concurrency (races, deadlocks, goroutine leaks)
    2. Ordering assumptions (init, registration, shutdown)
    3. Cache staleness (TTL-less, read-your-writes, cross-process)
    4. Fan-out amplification (N+1, retry storms)
    5. Ownership and lifecycle (orphans, double-close)
    6. Error handling (silent swallowing, partial failure)
    7. Scaling cliffs (quadratic, unbounded, global locks)
    8. Idempotency failures (duplicate processing, retry mutations)
    9. State machine drift (illegal states, unvalidated transitions)

    Every finding must meet an evidence standard: code path, trigger, failure path, silence reason, and code evidence. A mandatory challenge phase attempts to disprove each finding before it is accepted. Findings carry a confidence level (High/Medium/Low) and explicit risk score.

    Produces DANGER-ZONES.md, a ranked inventory of findings split into Critical and Elevated tiers.

    When to run: Before releases, after major refactors, when investigating incident categories, or when onboarding.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#what-you-get","level":2,"title":"What You Get","text":"

    After all three passes, .context/ contains:

    File From Purpose ARCHITECTURE.md Pass 1 System map (session-start context) DETAILED_DESIGN*.md Pass 1 Module-level deep reference CHEAT-SHEETS.md Pass 1 Lifecycle flow diagrams map-tracking.json Pass 1 Coverage and confidence data CONVERGENCE-REPORT.md Pass 1 What's covered, what's not DANGER-ZONES.md Pass 3 Ranked failure hypotheses

    Pass 2 enriches Pass 1 artifacts in-place rather than creating new files.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tips","level":2,"title":"Tips","text":"
    • Run Pass 1 with focus areas if the codebase is large. The skill asks what to go deep on, so name the modules you're about to change.
    • You don't need all three passes every time. Pass 1 is the foundation. Pass 2 and 3 are for when you need quantified confidence or adversarial rigor.
    • Re-run Pass 1 incrementally. It tracks coverage in map-tracking.json and only re-analyzes stale modules.
    • Pass 3 is most valuable before releases. The ranked DANGER-ZONES.md is a pre-release checklist.
    • The trilogy maps to a question progression: How does it work? How well does it connect? Where will it break?
    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#see-also","level":2,"title":"See Also","text":"

    See also: Detecting and Fixing Context Drift to keep architecture artifacts fresh between deep-dive sessions.

    See also: Detecting and Fixing Context Drift for structural checks that complement architecture analysis.

    ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/autonomous-loops/","level":1,"title":"Running an Unattended AI Agent","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-problem","level":2,"title":"The Problem","text":"

    You have a project with a clear list of tasks, and you want an AI agent to work through them autonomously: overnight, unattended, without you sitting at the keyboard.

    Each iteration needs to remember what the previous one did, mark tasks as completed, and know when to stop.

    Without persistent memory, every iteration starts fresh and the loop collapses. With ctx, each iteration can pick up where the last one left off, but only if the agent persists its context as part of the work.

    Unattended operation works because the agent treats context persistence as a first-class deliverable, not an afterthought.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                                    # 1. init context\n# Edit TASKS.md with phased work items\nctx loop --tool claude --max-iterations 10  # 2. generate loop.sh\n./loop.sh 2>&1 | tee /tmp/loop.log &        # 3. run the loop\nctx watch --log /tmp/loop.log               # 4. process context updates\n# Next morning:\nctx status && ctx load                      # 5. review the results\n

    Read on for permissions, isolation, and completion signals.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init Command Initialize project context and prompt templates ctx loop Command Generate the loop shell script ctx watch Command Monitor AI output and persist context updates ctx load Command Display assembled context (for debugging) /ctx-loop Skill Generate loop script from inside Claude Code /ctx-implement Skill Execute a plan step-by-step with verification","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-1-initialize-for-unattended-operation","level":3,"title":"Step 1: Initialize for Unattended Operation","text":"

    Start by creating a .context/ directory configured so the agent can work without human input.

    ctx init\n

    This creates .context/ with the template files (including a loop prompt at .context/loop.md), and seeds Claude Code permissions in .claude/settings.local.json. Install the ctx plugin for hooks and skills.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-2-populate-tasksmd-with-phased-work","level":3,"title":"Step 2: Populate TASKS.md with Phased Work","text":"

    Open .context/TASKS.md and organize your work into phases. The agent works through these systematically, top to bottom, using priority tags to break ties.

    # Tasks\n\n## Phase 1: Foundation\n\n- [ ] Set up project structure and build system `#priority:high`\n- [ ] Configure testing framework `#priority:high`\n- [ ] Create CI pipeline `#priority:medium`\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Hardening\n\n- [ ] Add rate limiting to API endpoints `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n- [ ] Write integration tests `#priority:medium`\n

    Phased organization matters because it gives the agent natural boundaries. Phase 1 tasks should be completable without Phase 2 code existing yet.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-3-configure-the-loop-prompt","level":3,"title":"Step 3: Configure the Loop Prompt","text":"

    The loop prompt at .context/loop.md instructs the agent to operate autonomously:

    1. Read .context/CONSTITUTION.md first (hard rules, never violated)
    2. Load context from .context/ files
    3. Pick one task per iteration
    4. Complete the task and update context files
    5. Commit changes (including .context/)
    6. Signal status with a completion signal

    You can customize .context/loop.md for your project. The critical parts are the one-task-per-iteration discipline, proactive context persistence, and completion signals at the end:

    ## Signal Status\n\nEnd your response with exactly ONE of:\n\n* `SYSTEM_CONVERGED`: All tasks in `TASKS.md` are complete (*this is the\n  signal the loop script detects by default*)\n* `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n* (*no signal*): More work remains, continue to the next iteration\n\nNote: the loop script only checks for `SYSTEM_CONVERGED` by default.\n`SYSTEM_BLOCKED` is a convention for the human reviewing the log.\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-4-configure-permissions","level":3,"title":"Step 4: Configure Permissions","text":"

    An unattended agent needs permission to use tools without prompting. By default, Claude Code asks for confirmation on file writes, bash commands, and other operations, which stops the loop and waits for a human who is not there.

    There are two approaches.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-a-explicit-allowlist-recommended","level":4,"title":"Option A: Explicit Allowlist (Recommended)","text":"

    Grant only the permissions the agent needs. In .claude/settings.local.json:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Bash(ctx:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

    Adjust the Bash patterns for your project's toolchain. The agent can run make, go, git, and ctx commands but cannot run arbitrary shell commands.

    This is recommended even in sandboxed environments because it limits blast radius.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-b-skip-all-permission-checks","level":4,"title":"Option B: Skip All Permission Checks","text":"

    Claude Code supports a --dangerously-skip-permissions flag that disables all permission prompts:

    claude --dangerously-skip-permissions -p \"$(cat .context/loop.md)\"\n

    This Flag Means What It Says

    With --dangerously-skip-permissions, the agent can execute any shell command, write to any file, and make network requests without confirmation.

    Only use this on a sandboxed machine: ideally a virtual machine with no access to host credentials, no SSH keys, and no access to production systems.

    If you would not give an untrusted intern sudo on this machine, do not use this flag.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#enforce-isolation-at-the-os-level","level":4,"title":"Enforce Isolation at the OS Level","text":"

    The only controls an agent cannot override are the ones enforced by the operating system, the container runtime, or the hypervisor.

    Do Not Skip This Section

    This is not optional hardening:

    An unattended agent with unrestricted OS access is an unattended shell with unrestricted OS access.

    The allowlist above is a strong first layer, but do not rely on a single runtime boundary.

    For unattended runs, enforce isolation at the infrastructure level:

    Layer What to enforce User account Run the agent as a dedicated unprivileged user with no sudo access and no membership in privileged groups (docker, wheel, adm). Filesystem Restrict the project directory via POSIX permissions or ACLs. The agent should have no access to other users' files or system directories. Container Run inside a Docker/Podman sandbox. Mount only the project directory. Drop capabilities (--cap-drop=ALL). Disable network if not needed (--network=none). Never mount the Docker socket and do not run privileged containers. Prefer rootless containers. Virtual machine Prefer a dedicated VM with no shared folders, no host passthrough, and no keys to other machines. Network If the agent does not need the internet, disable outbound access entirely. If it does, restrict to specific domains via firewall rules. Resource limits Apply CPU, memory, and disk limits (cgroups/container limits). A runaway loop should not fill disk or consume all RAM. Self-modification Make instruction files read-only. CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md should not be writable by the agent user. If using project-local hooks, protect those too.

    A minimal Docker setup for overnight runs:

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh 2>&1 | tee /tmp/loop.log\n

    Defense in Depth

    Use multiple layers together: OS-level isolation (the boundary the agent cannot cross), a permission allowlist (what Claude Code will do within that boundary), and CONSTITUTION.md (a soft nudge for the common case).

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-5-generate-the-loop-script","level":3,"title":"Step 5: Generate the Loop Script","text":"

    Use ctx loop to generate a loop.sh tailored to your AI tool:

    # Generate for Claude Code with a 10-iteration cap\nctx loop --tool claude --max-iterations 10\n\n# Generate for Aider\nctx loop --tool aider --max-iterations 10\n\n# Custom prompt file and output filename\nctx loop --tool claude --prompt my-prompt.md --output my-loop.sh\n

    The generated script reads .context/loop.md, runs the tool, checks for completion signals, and loops until done or the cap is reached.

    You can also use the /ctx-loop skill from inside Claude Code.

    A Shell Loop Is the Best Practice

    The shell loop approach spawns a fresh AI process each iteration, so the only state that carries between iterations is what lives in .context/ and git.

    Claude Code's built-in /loop runs iterations within the same session, which can allow context window state to leak between iterations. This can be convenient for short runs, but it is less reliable for unattended loops.

    See Shell Loop vs Built-in Loop for details.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-6-run-with-watch-mode","level":3,"title":"Step 6: Run with Watch Mode","text":"

    Open two terminals. In the first, run the loop. In the second, run ctx watch to process context updates from the AI output.

    # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

    The watch command parses XML context-update commands from the AI output and applies them:

    <context-update type=\"complete\">user registration</context-update>\n<context-update type=\"learning\"\n  context=\"Setting up user registration\"\n  lesson=\"Email verification needs SMTP configured\"\n  application=\"Add SMTP setup to deployment checklist\"\n>SMTP Requirement</context-update>\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-7-completion-signals-end-the-loop","level":3,"title":"Step 7: Completion Signals End the Loop","text":"

    The generated script checks for one completion signal per run. By default this is SYSTEM_CONVERGED. You can change it with the --completion flag:

    ctx loop --tool claude --completion BOOTSTRAP_COMPLETE --max-iterations 5\n

    The following signals are conventions used in .context/loop.md:

    Signal Convention How the script handles it SYSTEM_CONVERGED All tasks in TASKS.md are done Detected by default (--completion default value) SYSTEM_BLOCKED Agent cannot proceed Only detected if you set --completion to this BOOTSTRAP_COMPLETE Initial scaffolding done Only detected if you set --completion to this

    The script uses grep -q on the agent's output, so any string works as a signal. If you need to detect multiple signals in one run, edit the generated loop.sh to add additional grep checks.

    When you return in the morning, check the log and the context files:

    tail -100 /tmp/loop.log\nctx status\nctx load\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-8-use-ctx-implement-for-plan-execution","level":3,"title":"Step 8: Use /ctx-implement for Plan Execution","text":"

    Within each iteration, the agent can use /ctx-implement to execute multi-step plans with verification between steps. This is useful for complex tasks that touch multiple files.

    The skill breaks a plan into atomic, verifiable steps:

    Step 1/6: Create user model .................. OK\nStep 2/6: Add database migration ............. OK\nStep 3/6: Implement registration handler ..... OK\nStep 4/6: Write unit tests ................... OK\nStep 5/6: Run test suite ..................... FAIL\n  -> Fixed: missing test dependency\n  -> Re-verify ............................... OK\nStep 6/6: Update TASKS.md .................... OK\n

    Each step is verified (build, test, syntax check) before moving to the next.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A typical overnight run:

    ctx init\n# Edit TASKS.md and .context/loop.md\n\nctx loop --tool claude --max-iterations 20\n\n./loop.sh 2>&1 | tee /tmp/loop.log &\nctx watch --log /tmp/loop.log\n\n# Next morning:\nctx status\nctx load\n
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#why-autonomous-loops-work-proactive-context-persistence","level":2,"title":"Why Autonomous Loops Work: Proactive Context Persistence","text":"

    The autonomous loop pattern works because the agent persists context as part of the job.

    Without proactive persistence, the loop degrades into disconnected iterations that repeat work, contradict decisions, and lose track of progress. The agent cannot rely on a human to prompt it. It must treat context updates as part of every task, not as a separate step.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-agent-playbook-self-check-drives-autonomy","level":3,"title":"The Agent Playbook Self-Check Drives Autonomy","text":"

    The Agent Playbook defines a self-check prompt the agent applies continuously:

    \"If this session ended right now, would the next session know what happened?\"\n

    In interactive sessions, a human might remind the agent to save context. When the agent runs unattended, nobody is there to remind it.

    The self-check becomes the trigger: after meaningful work, the agent evaluates whether the context files reflect reality and updates them immediately if they do not.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#what-the-agent-does-proactively-between-iterations","level":3,"title":"What the Agent Does Proactively between Iterations","text":"

    At milestones within an iteration, the agent persists without waiting for instructions:

    Milestone What the agent does proactively Completes a task Marks it [x] in TASKS.md Discovers a gotcha Adds it to LEARNINGS.md Makes a design choice Records it in DECISIONS.md with rationale Identifies follow-up work Creates new tasks in TASKS.md with #added timestamp Hits an unexpected failure Documents the root cause before moving on","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#example-what-proactive-persistence-looks-like","level":3,"title":"Example: What Proactive Persistence Looks Like","text":"

    Here is what the agent does during a single iteration beyond writing code:

    Iteration 4:\n  1. Read TASKS.md -> pick \"Add email verification\" (#priority:high)\n  2. Add #started:2026-01-25-030012 to the task\n  3. Implement the feature (code, tests, docs if needed)\n  4. Tests pass -> mark task [x]\n  5. Add learning: \"SMTP config must be set before verification handler registers. Order matters in init().\"\n  6. Add decision: \"Use token-based verification links (not codes) because links work better in automated tests.\"\n  7. Create follow-up task: \"Add rate limiting to verification endpoint\" #added:...\n  8. Commit all changes including `.context/`\n  9. No signal emitted -> loop continues to iteration 5\n

    Steps 2, 4, 5, 6, and 7 are proactive context persistence:

    The agent was not asked to do any of them.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#context-persistence-at-milestones","level":3,"title":"Context Persistence at Milestones","text":"

    For long autonomous runs, the agent persists context at natural boundaries, often at phase transitions or after completing a cluster of related tasks. It updates TASKS.md, DECISIONS.md, and LEARNINGS.md as it goes.

    If the loop crashes at 4 AM, the context files tell you exactly where to resume. You can also use ctx journal source to review the session transcripts.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-persistence-contract","level":3,"title":"The Persistence Contract","text":"

    The autonomous loop has an implicit contract:

    1. Every iteration reads context: TASKS.md, DECISIONS.md, LEARNINGS.md
    2. Every iteration writes context: task updates, new learnings, decisions
    3. Every commit includes .context/ so the next iteration sees changes
    4. Context stays current: if the loop stopped right now, nothing important is lost

    Break any part of this contract and the loop degrades.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tips","level":2,"title":"Tips","text":"

    Markdown Is Not Enforcement

    Your real guardrails are permissions and isolation, not Markdown. CONSTITUTION.md can nudge the agent, but it is probabilistic.

    The permission allowlist and OS isolation are deterministic:

    For unattended runs, trust the sandbox and the allowlist, not the prose.

    • Start with a small iteration cap. Use --max-iterations 5 on your first run.
    • Keep tasks atomic. Each task should be completable in a single iteration.
    • Check signal discipline. If the loop runs forever, the agent is not emitting SYSTEM_CONVERGED or SYSTEM_BLOCKED. Make the signal requirement explicit in .context/loop.md.
    • Commit after context updates. Finish code, update .context/, commit including .context/, then signal.
    • Set up webhook notifications to get notified when the loop completes, hits max iterations, or when hooks fire nudges. The generated loop script includes ctx hook notify calls automatically.
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#next-up","level":2,"title":"Next Up","text":"

    When to Use a Team of Agents →: Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#see-also","level":2,"title":"See Also","text":"
    • Autonomous Loops: loop pattern, prompt templates, troubleshooting
    • CLI Reference: ctx loop: flags and options
    • CLI Reference: ctx watch: watch mode details
    • CLI Reference: ctx init: init flags
    • The Complete Session: interactive workflow
    • Tracking Work Across Sessions: structuring TASKS.md
    ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/building-skills/","level":1,"title":"Building Project Skills","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-problem","level":2,"title":"The Problem","text":"

    You have workflows your agent needs to repeat across sessions: a deploy checklist, a review protocol, a release process. Each time, you re-explain the steps. The agent gets it mostly right but forgets edge cases you corrected last time.

    Skills solve this by encoding domain knowledge into a reusable document the agent loads automatically when triggered. A skill is not code - it is a structured prompt that captures what took you sessions to learn.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-skill-create\n

    The skill-creator walks you through: identify a repeating workflow, draft a skill, test with realistic prompts, iterate until it triggers correctly and produces good output.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-skill-create Skill Interactive skill creation and improvement workflow ctx init Command Deploys template skills to .claude/skills/ on first setup","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-1-identify-a-repeating-pattern","level":3,"title":"Step 1: Identify a Repeating Pattern","text":"

    Good skill candidates:

    • Checklists you repeat: deploy steps, release prep, code review
    • Decisions the agent gets wrong: if you keep correcting the same behavior, encode the correction
    • Multi-step workflows: anything with a sequence of commands and conditional branches
    • Domain knowledge: project-specific terminology, architecture constraints, or conventions the agent cannot infer from code alone

    Not good candidates: one-off instructions, things the platform already handles (file editing, git operations), or tasks too narrow to reuse.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-2-create-the-skill","level":3,"title":"Step 2: Create the Skill","text":"

    Invoke the skill-creator:

    You: \"I want a skill for our deploy process\"\n\nAgent: [Asks about the workflow: what steps, what tools,\n        what edge cases, what the output should look like]\n

    Or capture a workflow you just did:

    You: \"Turn what we just did into a skill\"\n\nAgent: [Extracts the steps from conversation history,\n        confirms understanding, drafts the skill]\n

    The skill-creator produces a SKILL.md file in .claude/skills/your-skill/.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-3-test-with-realistic-prompts","level":3,"title":"Step 3: Test with Realistic Prompts","text":"

    The skill-creator proposes 2-3 test prompts - the kind of thing a real user would say. It runs each one and shows the result alongside a baseline (same prompt without the skill) so you can compare.

    Agent: \"Here are test prompts I'd try:\n        1. 'Deploy to staging'\n        2. 'Ship the hotfix'\n        3. 'Run the release checklist'\n        Want to adjust these?\"\n
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-4-iterate-on-the-description","level":3,"title":"Step 4: Iterate on the Description","text":"

    The description field in frontmatter determines when a skill triggers. Claude tends to undertrigger - descriptions need to be specific and slightly \"pushy\":

    # Weak - too vague, will undertrigger\ndescription: \"Use for deployments\"\n\n# Strong - covers situations and synonyms\ndescription: >-\n  Use when deploying to staging or production, running the release\n  checklist, or when the user says 'ship it', 'deploy this', or\n  'push to prod'. Also use after merging to main when a deploy\n  is expected.\n

    The skill-creator helps you tune this iteratively.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-5-deploy-as-template-optional","level":3,"title":"Step 5: Deploy as Template (Optional)","text":"

    If the skill should be available to all projects (not just this one), place it in internal/assets/claude/skills/ so ctx init deploys it to new projects automatically.

    Most project-specific skills stay in .claude/skills/ and travel with the repo.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#skill-anatomy","level":2,"title":"Skill Anatomy","text":"
    my-skill/\n  SKILL.md         # Required: frontmatter + instructions (<500 lines)\n  scripts/         # Optional: deterministic code the skill can execute\n  references/      # Optional: detail loaded on demand (not always)\n  assets/          # Optional: output templates, not loaded into context\n

    Key sections in SKILL.md:

    Section Purpose Required? Frontmatter Name, description (trigger) Yes When to Use Positive triggers Yes When NOT to Use Prevents false activations Yes Process Steps and commands Yes Examples Good/bad output pairs Recommended Quality Checklist Verify before reporting completion For complex skills","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tips","level":2,"title":"Tips","text":"
    • Description is everything. A great skill with a vague description never fires. Spend time on trigger coverage - synonyms, concrete situations, edge cases.
    • Stay under 500 lines. If your skill is growing past this, move detail into references/ files and point to them from SKILL.md.
    • Do not duplicate the platform. If the agent already knows how to do something (edit files, run git commands), do not restate it. Tag paragraphs as Expert/Activation/Redundant and delete Redundant ones.
    • Explain why, not just what. \"Sort by date because users want recent results first\" beats \"ALWAYS sort by date.\" The agent generalizes from reasoning better than from rigid rules.
    • Test negative triggers. Make sure the skill does not fire on unrelated prompts. A skill that activates too broadly becomes noise.
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#next-up","level":2,"title":"Next Up","text":"

    Parallel Agent Development with Git Worktrees ->: Split work across multiple agents using git worktrees.

    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: full listing of all bundled and project-local skills
    • Guide Your Agent: how commands, skills, and conversational patterns work together
    • Design Before Coding: the four-skill chain for front-loading design work
    ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/claude-code-permissions/","level":1,"title":"Claude Code Permission Hygiene","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code's .claude/settings.local.json controls what the agent can do without asking. Over time, this file accumulates one-off permissions from individual sessions: Exact commands with hardcoded paths, duplicate entries, and stale skill references.

    A noisy \"allowlist\" makes it harder to spot dangerous permissions and increases the surface area for unintended behavior.

    Since settings.local.json is .gitignored, it drifts independently of your codebase. There is no PR review, no CI check: just whatever you clicked \"Allow\" on.

    This recipe shows what a well-maintained permission file looks like and how to keep it clean.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                            # seeds safe defaults\n/ctx-drift                          # detects missing/stale permissions\n/ctx-permission-sanitize               # audits for dangerous patterns\n

    See Recommended Defaults for the full list.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Populates default ctx permissions /ctx-drift Detects missing or stale permission entries /ctx-permission-sanitize Audits for dangerous patterns (security-focused)","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#recommended-defaults","level":2,"title":"Recommended Defaults","text":"

    After running ctx init, your settings.local.json will have the ctx defaults pre-populated. Here is an opinionated safe starting point for a Go project using ctx:

    {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(/tmp/ctx-*:*)\",\n      \"Bash(CGO_ENABLED=0 go build:*)\",\n      \"Bash(CGO_ENABLED=0 go test:*)\",\n      \"Bash(ctx:*)\",\n      \"Bash(git add:*)\",\n      \"Bash(git branch:*)\",\n      \"Bash(git check-ignore:*)\",\n      \"Bash(git checkout:*)\",\n      \"Bash(git commit:*)\",\n      \"Bash(git diff:*)\",\n      \"Bash(git log:*)\",\n      \"Bash(git remote:*)\",\n      \"Bash(git restore:*)\",\n      \"Bash(git show:*)\",\n      \"Bash(git stash:*)\",\n      \"Bash(git status:*)\",\n      \"Bash(git tag:*)\",\n      \"Bash(go build:*)\",\n      \"Bash(go fmt:*)\",\n      \"Bash(go test:*)\",\n      \"Bash(go vet:*)\",\n      \"Bash(golangci-lint run:*)\",\n      \"Bash(grep:*)\",\n      \"Bash(ls:*)\",\n      \"Bash(make:*)\",\n      \"Skill(ctx-convention-add)\",\n      \"Skill(ctx-decision-add)\",\n      \"Skill(ctx-learning-add)\",\n      \"Skill(ctx-task-add)\",\n      \"Skill(ctx-agent)\",\n      \"Skill(ctx-archive)\",\n      \"Skill(ctx-blog)\",\n      \"Skill(ctx-blog-changelog)\",\n      \"Skill(absorb)\",\n      \"Skill(ctx-commit)\",\n      \"Skill(ctx-drift)\",\n      \"Skill(ctx-implement)\",\n      \"Skill(ctx-journal-enrich)\",\n      \"Skill(ctx-journal-enrich-all)\",\n      \"Skill(ctx-loop)\",\n      \"Skill(ctx-next)\",\n      \"Skill(ctx-pad)\",\n      \"Skill(ctx-prompt-audit)\",\n      \"Skill(ctx-history)\",\n      \"Skill(ctx-reflect)\",\n      \"Skill(ctx-remember)\",\n      \"Skill(ctx-status)\",\n      \"Skill(ctx-worktree)\",\n      \"WebSearch\"\n    ],\n    \"deny\": [\n      \"Bash(sudo *)\",\n      \"Bash(git push *)\",\n      \"Bash(git push)\",\n      \"Bash(rm -rf /*)\",\n      \"Bash(rm -rf ~*)\",\n      \"Bash(curl *)\",\n      \"Bash(wget *)\",\n      \"Bash(chmod 777 *)\",\n      \"Read(**/.env)\",\n      \"Read(**/.env.*)\",\n      \"Read(**/*credentials*)\",\n      \"Read(**/*secret*)\",\n      \"Read(**/*.pem)\",\n      \"Read(**/*.key)\",\n      \"Edit(**/.env)\",\n      \"Edit(**/.env.*)\"\n    ]\n  }\n}\n

    This Is a Starting Point, Not a Mandate

    Your project may need more or fewer entries.

    The goal is intentional permissions: Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#design-principles","level":3,"title":"Design Principles","text":"

    Use wildcards for trusted binaries: If you trust the binary (your own project's CLI, make, go), a single wildcard like Bash(ctx:*) beats twenty subcommand entries. It reduces noise and means new subcommands work without re-prompting.

    Keep git commands granular: Unlike ctx or make, git has both safe commands (git log, git status) and destructive ones (git reset --hard, git clean -f). Listing safe commands individually prevents accidentally pre-approving dangerous ones.

    Pre-approve all ctx- skills: Skills shipped with ctx (Skill(ctx-*)) are safe to pre-approve. They are part of your project and you control their content. This prevents the agent from prompting on every skill invocation.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#default-deny-rules","level":3,"title":"Default Deny Rules","text":"

    ctx init automatically populates permissions.deny with rules that block dangerous operations. Deny rules are evaluated before allow rules: A denied pattern always prompts the user, even if it also matches an allow entry.

    The defaults block:

    Pattern Why Bash(sudo *) Cannot enter password; will hang Bash(git push *) Must be explicit user action Bash(rm -rf /*) etc. Recursive delete of system/home directories Bash(curl *) / wget Arbitrary network requests Bash(chmod 777 *) World-writable permissions Read/Edit(**/.env*) Secrets and credentials Read(**/*.pem, *.key) Private keys

    Read/Edit Deny Rules

    Read() and Edit() deny rules have known upstream enforcement issues (claude-code#6631,#24846).

    They are included as defense-in-depth and intent documentation.

    Blocked by default deny rules: no action needed, ctx init handles these:

    Pattern Risk Bash(git push:*) Must be explicit user action Bash(sudo:*) Privilege escalation Bash(rm -rf:*) Recursive delete with no confirmation Bash(curl:*) / Bash(wget:*) Arbitrary network requests

    Requires manual discipline: Never add these to allow:

    Pattern Risk Bash(git reset:*) Can discard uncommitted work Bash(git clean:*) Deletes untracked files Skill(ctx-permission-sanitize) Edits this file: self-modification vector Skill(release) Runs the release pipeline: high impact","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#hooks-regex-safety-net","level":2,"title":"Hooks: Regex Safety Net","text":"

    Deny rules handle prefix-based blocking natively. Hooks complement them by catching patterns that require regex matching: Things deny rules can't express.

    The ctx plugin ships these blocking hooks:

    Hook What it blocks ctx system block-non-path-ctx Running ctx from wrong path

    Project-local hooks (not part of the plugin) catch regex edge cases:

    Hook What it blocks block-dangerous-commands.sh Mid-command sudo/git push (after &&), copies to bin dirs, absolute-path ctx

    Pre-Approved + Hook-Blocked = Silent Block

    If you pre-approve a command that a hook blocks, the user never sees the confirmation dialog. The agent gets a block response and must handle it, which is confusing.

    It's better not to pre-approve commands that hooks are designed to intercept.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-maintenance-workflow","level":2,"title":"The Maintenance Workflow","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#after-busy-sessions","level":3,"title":"After Busy Sessions","text":"

    Permissions accumulate fastest during debugging and exploration sessions. After a session where you clicked \"Allow\" many times:

    1. Open .claude/settings.local.json in your editor;
    2. Look for entries at the bottom of the allowlist (new entries append there);
    3. Delete anything that looks session-specific:
      • Exact commands with hardcoded paths,
      • Commands with literal string arguments,
      • Entries that duplicate an existing wildcard.

    See the Sanitize Permissions runbook for a step-by-step procedure.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#periodically","level":3,"title":"Periodically","text":"

    Run /ctx-drift to catch permission drift:

    • Missing Bash(ctx:*) wildcard;
    • Missing Skill(ctx-*) entries for installed skills;
    • Stale Skill(ctx-*) entries for removed skills;
    • Granular Bash(ctx <subcommand>:*) entries that should be consolidated.

    Run /ctx-permission-sanitize to catch security issues:

    • Hook bypass patterns
    • Destructive commands
    • Overly broad permissions
    • Injection vectors
    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#when-adding-new-skills","level":3,"title":"When Adding New Skills","text":"

    If you create a custom ctx-* skill, add its Skill() entry to the allowlist manually.

    ctx init only populates the default permissions: It won't pick up custom skills.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#golden-image-snapshots","level":3,"title":"Golden Image Snapshots","text":"

    If manual cleanup is too tedious, use a golden image to automate it:

    Snapshot a curated permission set, then restore at session start to automatically drop session-accumulated permissions. See the Permission Snapshots recipe for the full workflow.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#adapting-for-other-languages","level":2,"title":"Adapting for Other Languages","text":"

    The recommended defaults above are Go-specific. For other stacks, swap the build/test tooling:

    Node.js / TypeScript:

    \"Bash(npm run:*)\",\n\"Bash(npm test:*)\",\n\"Bash(npx:*)\",\n\"Bash(node:*)\"\n

    Python:

    \"Bash(pytest:*)\",\n\"Bash(python:*)\",\n\"Bash(pip show:*)\",\n\"Bash(ruff:*)\"\n

    Rust:

    \"Bash(cargo build:*)\",\n\"Bash(cargo test:*)\",\n\"Bash(cargo clippy:*)\",\n\"Bash(cargo fmt:*)\"\n

    The ctx, git, and skill entries remain the same across all stacks.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#next-up","level":2,"title":"Next Up","text":"

    Permission Snapshots →: Save and restore permission baselines for reproducible setups.

    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#see-also","level":2,"title":"See Also","text":"
    • Setting Up ctx Across AI Tools: full setup recipe including settings.local.json creation
    • Context Health: keeping .context/ files accurate
    • Sanitize Permissions runbook: manual cleanup procedure
    ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/configuration-profiles/","level":1,"title":"Configuration Profiles","text":"","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#configuration-profiles","level":1,"title":"Configuration Profiles","text":"

    Switch between dev and base runtime configurations without editing .ctxrc by hand. Useful when you want verbose logging and webhook notifications during development, then clean defaults for normal sessions.

    Uses: ctx config switch, ctx config status, /ctx-config

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#how-it-works","level":2,"title":"How It Works","text":"

    The ctx repo ships two source profiles committed to git:

    File Profile Description .ctxrc.base base All defaults, notifications off .ctxrc.dev dev Verbose logging, webhook notifications on

    The working copy (.ctxrc) is gitignored. Switching profiles copies the source file over .ctxrc, so your runtime configuration is always a clean snapshot of one of the two sources.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#switching-profiles","level":2,"title":"Switching Profiles","text":"
    # Switch to dev (verbose logging, notifications)\nctx config switch dev\n\n# Switch to base (defaults)\nctx config switch base\n\n# Toggle to the opposite profile\nctx config switch\n\n# \"prod\" is an alias for \"base\"\nctx config switch prod\n

    The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#checking-the-active-profile","level":2,"title":"Checking the Active Profile","text":"
    ctx config status\n

    Output examples:

    active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n
    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#typical-workflow","level":2,"title":"Typical Workflow","text":"
    1. Start of a debugging session: switch to dev for verbose logging and webhook notifications so you can trace hook activity and get push alerts.
    ctx config switch dev\n
    1. Work through the issue: hooks log verbosely, webhooks fire on key events (commits, ceremony nudges, drift warnings).

    2. Done debugging: switch back to base to silence the noise.

    ctx config switch base\n
    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#customizing-profiles","level":2,"title":"Customizing Profiles","text":"

    Edit the source files directly:

    • .ctxrc.dev: add any .ctxrc keys you want active during development (e.g., log_level: debug, notify.events, notify.webhook_url).
    • .ctxrc.base: keep this minimal. It represents your \"production\" defaults.

    After editing a source file, re-run ctx config switch <profile> to apply the changes to the working copy.

    Commit Your Profiles

    Both .ctxrc.base and .ctxrc.dev should be committed to git so team members share the same profile definitions. The working copy .ctxrc stays gitignored.

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#using-the-skill","level":2,"title":"Using the Skill","text":"

    In a Claude Code session, say any of:

    • \"switch to dev mode\"
    • \"switch to base\"
    • \"what profile am I on?\"
    • \"toggle verbose logging\"

    The /ctx-config skill handles the rest.

    See also: ctx config reference, Configuration

    ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/context-health/","level":1,"title":"Detecting and Fixing Drift","text":"","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-problem","level":2,"title":"The Problem","text":"

    ctx files drift: you rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist, TASKS.md is 80 percent completed checkboxes, and CONVENTIONS.md describes patterns you stopped using two months ago.

    Stale context is worse than no context:

    An AI tool that trusts outdated references will hallucinate confidently.

    This recipe shows how to detect drift, fix it, and keep your .context/ directory lean and accurate.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tldr","level":2,"title":"TL;DR","text":"
    ctx drift                      # detect problems\nctx drift --fix                # auto-fix the easy ones\nctx sync --dry-run && ctx sync # reconcile after refactors\nctx compact --archive          # archive old completed tasks\nctx fmt                        # normalize line widths\nctx status                     # verify\n

    Or just ask your agent: \"Is our context clean?\"

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx drift Command Detect stale paths, missing files, violations ctx drift --fix Command Auto-fix simple issues ctx sync Command Reconcile context with codebase structure ctx compact Command Archive completed tasks, clean up empty sections ctx fmt Command Normalize context files to 80-char line width ctx status Command Quick health overview /ctx-drift Skill Structural plus semantic drift detection /ctx-architecture Skill Refresh ARCHITECTURE.md from actual codebase /ctx-status Skill In-session context summary /ctx-prompt-audit Skill Audit prompt quality and token efficiency","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-workflow","level":2,"title":"The Workflow","text":"

    The best way to maintain context health is conversational: Ask your agent, guide it, and let it detect problems, explain them, and fix them with your approval. CLI commands exist for CI pipelines, scripting, and fine-grained control.

    For day-to-day maintenance, talk to your agent.

    Your Questions Reinforce the Pattern

    Asking \"is our context clean?\" does two things:

    • It triggers a drift check right now
    • It reinforces the habit

    This is reinforcement, not enforcement.

    Do not wait for the agent to be proactive on its own:

    Guide your agent, especially in early sessions.

    Over time, you will ask less and the agent will start offering more.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-1-ask-your-agent","level":3,"title":"Step 1: Ask Your Agent","text":"

    The simplest way to check context health:

    Is our context clean?\nAnything stale?\nHow healthy are our context files?\n

    Or invoke the skill directly:

    /ctx-drift\n

    The agent performs two layers of analysis:

    Layer 1, structural checks (via ctx drift): Dead paths, missing files, completed task counts, constitution violations. Fast and programmatic.

    Layer 2, semantic analysis (agent-driven): Does CONVENTIONS.md describe patterns the code no longer follows? Does DECISIONS.md contain entries whose rationale no longer applies? Are there learnings about bugs that are now fixed? This is where the agent adds value the CLI cannot: It reads both context files and source code and compares them.

    The agent reports both layers together, explains each finding in plain language, and offers to fix what it can.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-2-maintenance-at-session-start","level":3,"title":"Step 2: Maintenance at Session Start","text":"

    You do not need to ask explicitly.

    Using Claude Code

    ctx ships with Claude Code hooks that remind the agent at the right time to take initiative.

    Checking context health at the session start, offering to persist learnings before you quit, and flagging drift when it matters. The agent stays proactive without you having to prompt it:

    Agent: Good morning. I've loaded the context files. A few things\n       before we start:\n\n       - ARCHITECTURE.md references `pkg/auth/` which is now empty\n       - DECISIONS.md hasn't been updated in 40 days\n       - There are 18 completed tasks ready for archival\n\n       Want me to run a quick maintenance pass, or should we jump\n       straight into today's work?\n

    ☝️️ this is what persistent, initiative-driven sessions feel like when context is treated as a system instead of a prompt.

    If the agent does not offer this on its own, a gentle nudge is enough:

    Anything stale before we start?\nHow's the context looking?\n

    This turns maintenance from a scheduled chore into a conversation that happens when it matters.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-3-real-time-detection-during-work","level":3,"title":"Step 3: Real-Time Detection during Work","text":"

    Agents can notice drift while working: When a mismatch is directly in the path of their current task. If an agent reads ARCHITECTURE.md to find where to add a handler and internal/handlers/ doesn't exist, it will notice because the stale reference blocks its work:

    Agent: ARCHITECTURE.md references `internal/handlers/` but that directory\n       doesn't exist. I'll look at the actual source tree to find where\n       handlers live now.\n

    This happens reliably when the drift intersects the task. What is less reliable is the agent generalizing from one mismatch to \"there might be more stale references; let me run drift detection\" That leap requires the agent to know /ctx-drift exists and to decide the current task should pause for maintenance.

    If you want that behavior, reinforce it:

    Good catch. Yes, run /ctx-drift and clean up any other stale references.\n

    Over time, agents that have seen this pattern will start offering proactively. But do not expect it from a cold start.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-4-archival-and-cleanup","level":3,"title":"Step 4: Archival and Cleanup","text":"

    ctx drift detects when TASKS.md has more than 10 completed items and flags it as a staleness warning. Running ctx drift --fix archives completed tasks automatically.

    You can also run /ctx-archive to compact on demand.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#knowledge-health-flow","level":3,"title":"Knowledge Health Flow","text":"

    Over time, LEARNINGS.md and DECISIONS.md accumulate entries that overlap or partially repeat each other. The check-persistence hook detects when entry counts exceed a configurable threshold and surfaces a nudge:

    \"LEARNINGS.md has 25+ entries. Consider running /ctx-consolidate to merge overlapping items.\"

    The consolidation workflow:

    1. Review: /ctx-consolidate groups entries by keyword similarity and presents candidate merges for your approval.
    2. Merge: Approved groups are combined into single entries that preserve the key information from each original.
    3. Archive: Originals move to .context/archive/, not deleted -- the full history is preserved in git and the archive directory.
    4. Verify: Run ctx drift after consolidation to confirm no cross-references were broken by the merge.

    This replaces ad-hoc cleanup with a repeatable, nudge-driven cycle: detect accumulation, review candidates, merge with approval, archive originals.

    See also: Knowledge Capture for the recording workflow that feeds into this maintenance cycle.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-doctor-the-superset-check","level":2,"title":"ctx doctor: The Superset Check","text":"

    ctx doctor combines drift detection with hook auditing, configuration checks, event logging status, and token size reporting in a single command. If you want one command that covers structural health, hooks, and state:

    ctx doctor          # everything in one pass\nctx doctor --json   # machine-readable for scripting\n

    Use /ctx-doctor Too

    For agent-driven diagnosis that adds semantic analysis on top of the structural checks, use /ctx-doctor.

    See the Troubleshooting recipe for the full workflow.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#cli-reference","level":2,"title":"CLI Reference","text":"

    The conversational approach above uses CLI commands under the hood. When you need direct control, use the commands directly.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift","level":3,"title":"ctx drift","text":"

    Scan context files for structural problems:

    ctx drift\n

    Sample output:

    Drift Report\n============\n\nWarnings (3):\n  ARCHITECTURE.md:14  path \"internal/api/router.go\" does not exist\n  ARCHITECTURE.md:28  path \"pkg/auth/\" directory is empty\n  CONVENTIONS.md:9    path \"internal/handlers/\" not found\n\nViolations (1):\n  TASKS.md            31 completed tasks (recommend archival)\n\nStaleness:\n  DECISIONS.md        last modified 45 days ago\n  LEARNINGS.md        last modified 32 days ago\n\nExit code: 1 (warnings found)\n
    Level Meaning Action Warning Stale path references, missing files Fix or remove Violation Constitution rule heuristic failures, heavy clutter Fix soon Staleness Files not updated recently Review content

    Exit codes: 0 equals clean, 1 equals warnings, 3 equals violations.

    For CI integration:

    ctx drift --json | jq '.warnings | length'\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift-fix","level":3,"title":"ctx drift --fix","text":"

    Auto-fix mechanical issues:

    ctx drift --fix\n

    This handles removing dead path references, updating unambiguous renames, clearing empty sections. Issues requiring judgment are flagged but left for you.

    Run ctx drift again afterward to confirm what remains.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-sync","level":3,"title":"ctx sync","text":"

    After a refactor, reconcile context with the actual codebase structure:

    ctx sync --dry-run   # preview first\nctx sync             # apply\n

    ctx sync scans for structural changes, compares with ARCHITECTURE.md, checks for new dependencies worth documenting, and identifies context referring to code that no longer exists.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-compact","level":3,"title":"ctx compact","text":"

    Consolidate completed tasks and clean up empty sections:

    ctx compact            # move completed tasks to Completed section,\n                       # remove empty sections\nctx compact --archive  # also archive old tasks to .context/archive/\n
    • Tasks: moves completed items (with all subtasks done) into the Completed section of TASKS.md
    • All files: removes empty sections left behind
    • With --archive: writes tasks older than 7 days to .context/archive/tasks-YYYY-MM-DD.md

    Without --archive, nothing is deleted: Tasks are reorganized in place.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-fmt","level":3,"title":"ctx fmt","text":"

    Normalize context file line widths:

    ctx fmt              # wrap long lines to 80 chars\nctx fmt --check      # CI: exit 1 if files need formatting\n

    Long task descriptions, decision rationale, and learning entries accumulate as single-line entries. ctx fmt wraps them at word boundaries with 2-space continuation indent for list items. Headings, tables, and comments are preserved.

    Idempotent: safe to run repeatedly.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-status","level":3,"title":"ctx status","text":"

    Quick health overview:

    ctx status --verbose\n

    Shows file counts, token estimates, modification times, and drift warnings in a single glance.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

    Checks whether your context files are readable, compact, and token-efficient for the model.

    /ctx-prompt-audit\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Conversational approach (recommended):

    Is our context clean?  -> agent runs structural plus semantic checks\nFix what you can       -> agent auto-fixes and proposes edits\nArchive the done tasks -> agent runs ctx compact --archive\nHow's token usage?     -> agent checks ctx status\n

    CLI approach (for CI, scripts, or direct control):

    ctx drift                      # 1. Detect problems\nctx drift --fix                # 2. Auto-fix the easy ones\nctx sync --dry-run && ctx sync # 3. Reconcile after refactors\nctx compact --archive          # 4. Archive old completed tasks\nctx fmt                        # 5. Normalize line widths\nctx status                     # 6. Verify\n
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tips","level":2,"title":"Tips","text":"

    Agents cross-reference context files with source code during normal work. When drift intersects their current task, they will notice: a renamed package, a deleted directory, a path that doesn't resolve. But they rarely generalize from one mismatch to a full audit on their own. Reinforce the pattern: when an agent mentions a stale reference, ask it to run /ctx-drift. Over time, it starts offering.

    When an agent says \"this reference looks stale,\" it is usually right.

    Semantic drift is more damaging than structural drift: ctx drift catches dead paths. But CONVENTIONS.md describing a pattern your code stopped following three weeks ago is worse. When you ask \"is our context clean?\", the agent can do both checks.

    Use ctx status as a quick check: It shows file counts, token estimates, and drift warnings in a single glance. Good for a fast \"is everything ok?\" before diving into work.

    Drift detection in CI: add ctx drift --json to your CI pipeline and fail on exit code 3 (violations). This catches constitution-level problems before they reach upstream.

    Do not over-compact: Completed tasks have historical value. The --archive flag preserves them in .context/archive/ so you can search past work without cluttering active context.

    Sync is cautious by default: Use --dry-run after large refactors, then apply.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#next-up","level":2,"title":"Next Up","text":"

    Claude Code Permission Hygiene →: Recommended permission defaults and maintenance workflow for Claude Code.

    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#see-also","level":2,"title":"See Also","text":"
    • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
    • Tracking Work Across Sessions: task lifecycle and archival
    • Persisting Decisions, Learnings, and Conventions: keeping knowledge files current
    • The Complete Session: where maintenance fits in the daily workflow
    • CLI Reference: full flag documentation for all commands
    • Context Files: structure and purpose of each .context/ file
    ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/customizing-hook-messages/","level":1,"title":"Customizing Hook Messages","text":"","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-problem","level":2,"title":"The Problem","text":"

    ctx hooks speak ctx's language, not your project's. The QA gate says \"lint the ENTIRE project\" and \"make build,\" but your Python project uses pytest and ruff. The post-commit nudge suggests running lints, but your project uses npm test. You could remove the hook entirely, but then you lose the logic (counting, state tracking, adaptive frequency) just to change the words.

    How do you customize what hooks say without removing what they do?

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tldr","level":2,"title":"TL;DR","text":"
    ctx hook message list                     # see all hooks and their messages\nctx hook message show qa-reminder gate    # view the current template\nctx hook message edit qa-reminder gate    # copy default to .context/ for editing\nctx hook message reset qa-reminder gate   # revert to embedded default\n
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx hook message list CLI command Show all hook messages with category and override status ctx hook message show CLI command Print the effective message template ctx hook message edit CLI command Copy embedded default to .context/ for editing ctx hook message reset CLI command Delete user override, revert to default","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#how-it-works","level":2,"title":"How It Works","text":"

    Hook messages use a 3-tier fallback:

    1. User override: .context/hooks/messages/{hook}/{variant}.txt
    2. Embedded default: compiled into the ctx binary
    3. Hardcoded fallback: belt-and-suspenders safety net

    The hook logic (when to fire, counting, state tracking, cooldowns) is unchanged. Only the content (what text gets emitted) comes from the template. You customize what the hook says without touching how it decides to speak.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#finding-the-original-templates","level":3,"title":"Finding the Original Templates","text":"

    The default templates live in the ctx source tree at:

    internal/assets/hooks/messages/{hook}/{variant}.txt\n

    You can also browse them on GitHub: internal/assets/hooks/messages/

    Or use ctx hook message show to print any template without digging through source code:

    ctx hook message show qa-reminder gate        # QA gate instructions\nctx hook message show check-persistence nudge  # persistence nudge\nctx hook message show post-commit nudge        # post-commit reminder\n

    The show output includes the template source and available variables -- everything you need to write a replacement.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables","level":3,"title":"Template Variables","text":"

    Some messages use Go text/template variables for dynamic content:

    No context files updated in {{.PromptsSinceNudge}}+ prompts.\nHave you discovered learnings, made decisions,\nestablished conventions, or completed tasks\nworth persisting?\n

    The show and edit commands list available variables for each message. When writing a replacement, keep the same {{.VariableName}} placeholders to preserve dynamic content. Variables that you omit render as <no value>: no error, but the output may look odd.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#intentional-silence","level":3,"title":"Intentional Silence","text":"

    An empty template file (0 bytes or whitespace-only) means \"don't emit a message\". The hook still runs its logic but produces no output. This lets you silence specific messages without removing the hook from hooks.json.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-python-project-qa-gate","level":2,"title":"Example: Python Project QA Gate","text":"

    The default QA gate says \"lint the ENTIRE project\" and references make lint. For a Python project, you want pytest and ruff:

    # See the current default\nctx hook message show qa-reminder gate\n\n# Copy it to .context/ for editing\nctx hook message edit qa-reminder gate\n\n# Edit the override\n

    Replace the content in .context/hooks/messages/qa-reminder/gate.txt:

    HARD GATE! DO NOT COMMIT without completing ALL of these steps first:\n(1) Run the full test suite: pytest -x\n(2) Run the linter: ruff check .\n(3) Verify a clean working tree\nRun tests and linter BEFORE every git commit, no exceptions.\n

    The hook still fires on every Edit call. The logic is identical. Only the instructions changed.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-silencing-ceremony-nudges","level":2,"title":"Example: Silencing Ceremony Nudges","text":"

    The ceremony check nudges you to use /ctx-remember and /ctx-wrap-up. If your team has a different workflow and finds these noisy:

    ctx hook message edit check-ceremonies both\nctx hook message edit check-ceremonies remember\nctx hook message edit check-ceremonies wrapup\n

    Then empty each file:

    echo -n \"\" > .context/hooks/messages/check-ceremonies/both.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/remember.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/wrapup.txt\n

    The hooks still track ceremony usage internally, but they no longer emit any visible output.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-javascript-project-post-commit","level":2,"title":"Example: JavaScript Project Post-Commit","text":"

    The default post-commit nudge mentions generic \"lints and tests.\" For a JavaScript project:

    ctx hook message edit post-commit nudge\n

    Replace with:

    Commit succeeded. 1. Offer context capture to the user: Decision (design\nchoice?), Learning (gotcha?), or Neither. 2. Ask the user: \"Want me to\nrun npm test and eslint before you push?\" Do NOT push. The user pushes\nmanually.\n
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-two-categories","level":2,"title":"The Two Categories","text":"

    Not all messages are equal. The list command shows each message's category:

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#customizable-17-messages","level":3,"title":"Customizable (17 Messages)","text":"

    Messages that are opinions: project-specific wording that benefits from customization. These are the primary targets for override.

    Hook Variant Description check-backup-age warning Backup staleness warning check-freshness stale Technology constant freshness warning check-ceremonies both Both ceremonies missing check-ceremonies remember Start-of-session ceremony check-ceremonies wrapup End-of-session ceremony check-context-size checkpoint Context capacity warning check-context-size oversize Injection oversize nudge check-context-size window Context window usage warning (>80%) check-journal both Unimported sessions + unenriched entries check-journal unenriched Unenriched journal entries check-journal unimported Unimported sessions check-knowledge warning Knowledge file growth check-map-staleness stale Architecture map staleness check-persistence nudge Context persistence nudge post-commit nudge Post-commit context capture qa-reminder gate Pre-commit QA gate","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#ctx-specific-10-messages","level":3,"title":"ctx-Specific (10 Messages)","text":"

    Messages specific to ctx's own development workflow. You can customize them, but edit will warn you first.

    Hook Variant Description block-dangerous-commands cp-to-bin Block copy to bin dirs block-dangerous-commands install-to-local-bin Block copy to ~/.local/bin block-dangerous-commands mid-git-push Block git push block-dangerous-commands mid-sudo Block sudo block-non-path-ctx absolute-path Block absolute path invocation block-non-path-ctx dot-slash Block ./ctx invocation block-non-path-ctx go-run Block go run invocation check-reminders reminders Pending reminders relay check-resources alert Resource pressure alert check-version key-rotation Key rotation nudge check-version mismatch Version mismatch","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables-reference","level":2,"title":"Template Variables Reference","text":"Hook Variant Variables check-backup-age warning {{.Warnings}} check-freshness stale {{.StaleFiles}} check-context-size checkpoint (none) check-context-size oversize {{.TokenCount}} check-context-size window {{.TokenCount}}, {{.Percentage}} check-ceremonies both, remember, wrapup (none) check-journal both {{.UnimportedCount}}, {{.UnenrichedCount}} check-journal unenriched {{.UnenrichedCount}} check-journal unimported {{.UnimportedCount}} check-knowledge warning {{.FileWarnings}} check-map-staleness stale {{.LastRefreshDate}}, {{.ModuleCount}} check-persistence nudge {{.PromptsSinceNudge}} check-reminders reminders {{.ReminderList}} check-resources alert {{.AlertMessages}} check-version key-rotation {{.KeyAgeDays}} check-version mismatch {{.BinaryVersion}}, {{.PluginVersion}} post-commit nudge (none) qa-reminder gate (none) block-dangerous-commands all variants (none) block-non-path-ctx all variants (none)

    Templates that reference undefined variables render <no value>: no error, graceful degradation.

    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tips","level":2,"title":"Tips","text":"
    • Override files are version-controlled: they live in .context/ alongside your other context files. Team members get the same customized messages.
    • Start with show: always check the current default before editing. The embedded template is the baseline your override replaces.
    • Use reset to undo: if a customization causes confusion, reset reverts to the embedded default instantly.
    • Empty file = silence: you don't need to delete the hook. An empty override file silences the message while preserving the hook's logic.
    • JSON output for scripting: ctx hook message list --json returns structured data for automation.
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#see-also","level":2,"title":"See Also","text":"
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Auditing System Hooks: verifying hooks are running and auditing their output
    • Configuration: project-level settings via .ctxrc
    ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/design-before-coding/","level":1,"title":"Design Before Coding","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-problem","level":2,"title":"The Problem","text":"

    You start coding a feature. Halfway through, you realize the approach doesn't handle a key edge case. You refactor. Then you discover the CLI interface doesn't fit the existing patterns. More refactoring.

    The design work happened during implementation, mixed in with debugging and trial-and-error. The result works, but the spec was never written down, the trade-offs were never recorded, and the next session has no idea why things are shaped this way.

    How do you front-load design so the implementation is straightforward?

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-brainstorm          # explore the design space\n/ctx-spec                # write the spec document\n/ctx-task-add            # break it into tasks\n/ctx-implement           # execute step-by-step\n

    Four skills, used in sequence. Each produces an artifact that feeds the next.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-brainstorm Skill Structured design dialogue: explore approaches /ctx-spec Skill Scaffold and fill out a spec from the template /ctx-task-add Skill Add implementation tasks to TASKS.md /ctx-implement Skill Execute a plan step-by-step with verification /ctx-decision-add Skill Record design choices made during brainstorming","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-1-brainstorm-the-design","level":3,"title":"Step 1: Brainstorm the Design","text":"

    Start with /ctx-brainstorm when you have a rough idea but haven't committed to an approach. The skill follows a gated process:

    1. Understand context: what exists today, what's the gap
    2. Clarify the idea: one question at a time, not a wall of questions
    3. Surface non-functional requirements: performance, security, compatibility
    4. Lock understanding: confirm before exploring solutions
    5. Explore 2-3 approaches: with trade-offs for each
    6. Stress-test: poke holes in the chosen approach
    7. Present the design: detailed enough to implement from
    You: \"I want to add RSS feed generation to the blog.\"\n\nAgent: \"Before we design, let me understand: is this for the journal\n  site, the docs site, or both?\"\n\nYou: \"Journal site only.\"\n\nAgent: [asks about update frequency, content scope, existing infra...]\n       [presents 3 approaches with trade-offs]\n\nYou: \"Option 2 - generate at build time.\"\n\nAgent: \"Let me stress-test that. What happens when...\"\n       [probes edge cases]\n       \"Design looks solid. Want me to record the approach as a\n       decision and write a spec?\"\n

    The brainstorm produces a validated design in conversation. It doesn't produce a file - that's what the spec is for.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-2-write-the-spec","level":3,"title":"Step 2: Write the Spec","text":"

    Once the design is clear, use /ctx-spec to produce the written artifact. The skill reads the project's spec template (specs/tpl/spec-template.md) and walks through each section:

    You: /ctx-spec (rss feed generation)\n\nAgent: \"Creating specs/rss-feed.md from template.\"\n\n       \"Problem: What user-visible problem does this solve? Why now?\"\n\nYou: \"Users can't subscribe to blog updates. They have to check\n      the site manually.\"\n\nAgent: [writes the Problem section, moves to Approach]\n\n       \"Approach: High-level, how does this work?\"\n\nYou: \"Generate feed.xml during ctx journal site. Atom format.\"\n\nAgent: [writes Approach, moves to Happy Path, then Edge Cases...]\n

    The skill spends extra time on Edge Cases and Error Handling: These are where specs earn their value. The skill pushes for at least three edge cases and won't accept \"none\" without a challenge.

    Sections that don't apply can be skipped. The result is a complete spec at specs/{feature-name}.md.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-3-break-into-tasks","level":3,"title":"Step 3: Break into Tasks","text":"

    After the spec is written, the skill offers to create tasks:

    Agent: \"Spec written to specs/rss-feed.md. Want me to break this\n  into tasks in TASKS.md?\"\n\nYou: \"Yes.\"\n\nAgent: [creates a Phase section referencing the spec, adds tasks]\n       \"Added Phase 0.8: RSS/Atom Feed Generation with 4 tasks:\n         - [ ] Implement feed.xml generation in ctx journal site\n         - [ ] Add --feed flag and .ctxrc feed config\n         - [ ] Write tests for feed generation\n         - [ ] Add recipe and update docs\"\n

    Each task is independently completable and references the spec via Spec: specs/rss-feed.md in the Phase header.

    You can also invoke /ctx-task-add directly at any point to add individual tasks.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-4-implement-step-by-step","level":3,"title":"Step 4: Implement Step-by-Step","text":"

    Pick up tasks with /ctx-implement. The skill loads the spec, breaks work into atomic steps, and checkpoints after every 3-5 steps with a build and test verification:

    You: /ctx-implement (specs/rss-feed.md)\n\nAgent: \"Loaded spec. Breaking into steps:\n         1. Add FeedEntry type to journal package\n         2. Implement feed.xml template\n         3. Wire into site generation pipeline\n         [builds and tests after step 3]\n         4. Add --feed flag to ctx journal site\n         5. Add .ctxrc feed configuration\n         [builds and tests after step 5]\n         ...\"\n

    If a build or test fails, the agent stops, diagnoses, and fixes before continuing.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#when-to-skip-steps","level":2,"title":"When to Skip Steps","text":"

    Not every feature needs all four steps. Use your judgment:

    Situation Start at Vague idea, multiple valid approaches Step 1: Brainstorm Clear approach, need to document it Step 2: Spec Spec already exists, need to plan work Step 3: Tasks Tasks exist, ready to code Step 4: Implement

    A brainstorm without a spec is fine for small decisions. A spec without a brainstorm is fine when the design is obvious. The full chain is for features complex enough to warrant front-loaded design.

    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need skill names. Natural language works:

    You say What happens \"Let's think through this feature\" /ctx-brainstorm \"Spec this out\" /ctx-spec \"Write a design doc for...\" /ctx-spec \"Break this into tasks\" /ctx-task-add \"Implement the spec\" /ctx-implement \"Let's design before we build\" Starts at brainstorm","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tips","level":2,"title":"Tips","text":"
    • Brainstorm first when uncertain. If you can articulate the approach in two sentences, skip to spec. If you can't, brainstorm.
    • Specs prevent scope creep. The Non-Goals section is as important as the approach. Writing down what you won't do keeps implementation focused.
    • Edge cases are the point. A spec that only describes the happy path isn't a spec - it's a wish. The /ctx-spec skill pushes for at least 3 edge cases because that's where designs break.
    • Record decisions during brainstorming. When you choose between approaches, the agent offers to persist the trade-off via /ctx-decision-add. Accept - future sessions need to know why, not just what.
    • Specs are living documents. Update them when implementation reveals new constraints. A spec that diverges from reality is worse than no spec.
    • The spec template is customizable. Edit specs/tpl/spec-template.md to match your project's needs. The /ctx-spec skill reads whatever template it finds there.
    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: /ctx-brainstorm: structured design dialogue
    • Skills Reference: /ctx-spec: spec scaffolding from template
    • Skills Reference: /ctx-implement: step-by-step execution with verification
    • Tracking Work Across Sessions: task lifecycle and archival
    • Importing Claude Code Plans: turning ephemeral plans into permanent specs
    • Persisting Decisions, Learnings, and Conventions: capturing design trade-offs
    ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/external-context/","level":1,"title":"Keeping Context in a Separate Repo","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-problem","level":2,"title":"The Problem","text":"

    ctx files contain project-specific decisions, learnings, conventions, and tasks. By default, they live in .context/ inside the project tree, and that works well when the context can be public.

    But sometimes you need the context outside the project:

    • Open-source projects with private context: Your architectural notes, internal task lists, and scratchpad entries shouldn't ship with the public repo.
    • Compliance or IP concerns: Context files reference sensitive design rationale that belongs in a separate access-controlled repository.
    • Personal preference: You want a single context repo that covers multiple projects, or you just prefer keeping notes separate from code.

    ctx supports this through three configuration methods. This recipe shows how to set them up and how to tell your AI assistant where to find the context.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tldr","level":2,"title":"TL;DR","text":"

    First --allow-outside-cwd in your project:

    mkdir ~/repos/myproject-context && cd ~/repos/myproject-context && git init\ncd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context --allow-outside-cwd init\n

    Then, create a .ctxrc in your project root to specify the new .context folder location:

    context_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

    All ctx commands now use the external directory automatically.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context directory --context-dir Global flag Point ctx at a non-default directory --allow-outside-cwd Global flag Permit context outside the project root .ctxrc Config file Persist the context directory setting CTX_DIR Env variable Override context directory per-session /ctx-status Skill Verify context is loading correctly","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-1-create-the-private-context-repo","level":3,"title":"Step 1: Create the Private Context Repo","text":"

    Create a separate repository for your context files. This can live anywhere: a private GitHub repo, a shared drive, a sibling directory:

    # Create the context repo\nmkdir ~/repos/myproject-context\ncd ~/repos/myproject-context\ngit init\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-2-initialize-ctx-pointing-at-it","level":3,"title":"Step 2: Initialize ctx Pointing at It","text":"

    From your project root, initialize ctx with --context-dir pointing to the external location. Because the directory is outside your project tree, you also need --allow-outside-cwd:

    cd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context \\\n    --allow-outside-cwd \\\n    init\n

    This creates the full .context/-style file set inside ~/repos/myproject-context/ instead of ~/repos/myproject/.context/.

    Boundary Validation

    ctx validates that the .context directory is within the current working directory.

    If your external directory is truly outside the project root:

    • Either every ctx command needs --allow-outside-cwd,
    • or you can persist the setting in .ctxrc (next step).
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-3-make-it-stick","level":3,"title":"Step 3: Make It Stick","text":"

    Typing --context-dir and --allow-outside-cwd on every command is tedious. Pick one of these methods to make the configuration permanent.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-a-ctxrc-recommended","level":4,"title":"Option A: .ctxrc (Recommended)","text":"

    Create a .ctxrc file in your project root:

    # .ctxrc: committed to the project repo\ncontext_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

    ctx reads .ctxrc automatically. Every command now uses the external directory without extra flags:

    ctx status          # reads from ~/repos/myproject-context\nctx add learning \"Redis MULTI doesn't roll back on error\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    Commit .ctxrc

    .ctxrc belongs in the project repo. It contains no secrets: It's just a path and a boundary override.

    .ctxrc lets teammates share the same configuration.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-b-ctx_dir-environment-variable","level":4,"title":"Option B: CTX_DIR Environment Variable","text":"

    Good for CI pipelines, temporary overrides, or when you don't want to commit a .ctxrc:

    # In your shell profile (~/.bashrc, ~/.zshrc)\nexport CTX_DIR=~/repos/myproject-context\n

    Or for a single session:

    CTX_DIR=~/repos/myproject-context ctx status\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-c-shell-alias","level":4,"title":"Option C: Shell Alias","text":"

    If you prefer a shell alias over .ctxrc:

    # ~/.bashrc or ~/.zshrc\nalias ctx='ctx --context-dir ~/repos/myproject-context --allow-outside-cwd'\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#priority-order","level":4,"title":"Priority Order","text":"

    When multiple methods are set, ctx resolves the context directory in this order (highest priority first):

    1. --context-dir flag
    2. CTX_DIR environment variable
    3. context_dir in .ctxrc
    4. Default: .context/
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-4-agent-auto-discovery-via-bootstrap","level":3,"title":"Step 4: Agent Auto-Discovery via Bootstrap","text":"

    When context lives outside the project tree, your AI assistant needs to know where to find it. The ctx system bootstrap command resolves the configured context directory and communicates it to the agent automatically:

    $ ctx system bootstrap\nctx system bootstrap\n====================\n\ncontext_dir: /home/user/repos/myproject-context\n\nFiles:\n  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...\n

    The CLAUDE.md template generated by ctx init already instructs the agent to run ctx system bootstrap at session start. Because .ctxrc is in the project root, your agent inherits the external path automatically via the ctx system bootstrap call instruction.

    Here is the relevant section from CLAUDE.md for reference:

    <!-- CLAUDE.md -->\n1. **Run `ctx system bootstrap`**: CRITICAL, not optional.\n   This tells you where the context directory is. If it fails or returns\n   no context_dir, STOP and warn the user.\n

    Moreover, every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: /home/user/repos/myproject-context footer, so the agent remains anchored to the correct directory even in long sessions.

    If you use CTX_DIR instead of .ctxrc, export it in your shell profile so the hook process inherits it:

    export CTX_DIR=~/repos/myproject-context\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-5-share-with-teammates","level":3,"title":"Step 5: Share with Teammates","text":"

    Teammates clone both repos and set up .ctxrc:

    # Clone the project\ngit clone git@github.com:org/myproject.git\ncd myproject\n\n# Clone the private context repo\ngit clone git@github.com:org/myproject-context.git ~/repos/myproject-context\n

    If .ctxrc is already committed to the project, they're done: ctx commands will find the external context automatically.

    If teammates use different paths, each developer sets their own CTX_DIR:

    export CTX_DIR=~/my-own-path/myproject-context\n

    For encryption key distribution across the team, see the Syncing Scratchpad Notes recipe.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-6-day-to-day-sync","level":3,"title":"Step 6: Day-to-Day Sync","text":"

    The external context repo has its own git history. Treat it like any other repo: Commit and push after sessions:

    cd ~/repos/myproject-context\n\n# After a session\ngit add -A\ngit commit -m \"Session: refactored auth module, added rate-limit learning\"\ngit push\n

    Your AI assistant can do this too. When ending a session:

    You: \"Save what we learned and push the context repo.\"\n\nAgent: [runs ctx add learning, then commits and pushes the context repo]\n

    You can also set up a post-session habit: project code gets committed to the project repo, context gets committed to the context repo.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember the flags; simply ask your assistant:

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#set-up-your-system-using-natural-language","level":3,"title":"Set Up Your System Using Natural Language","text":"
    You: \"Set up ctx to use ~/repos/myproject-context as the context directory.\"\n\nAgent: \"I'll create a .ctxrc in the project root pointing to that path.\n       I'll also update CLAUDE.md so future sessions know where to find\n       context. Want me to initialize the context files there too?\"\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#configure-separate-repo-for-context-folder-using-natural-language","level":3,"title":"Configure Separate Repo for .context Folder Using Natural Language","text":"
    You: \"My context is in a separate repo. Can you load it?\"\n\nAgent: [reads .ctxrc, finds the path, loads context from the external dir]\n       \"Loaded. You have 3 pending tasks, last session was about the auth\n       refactor.\"\n
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tips","level":2,"title":"Tips","text":"
    • Start simple. If you don't need external context yet, don't set it up. The default .context/ in-tree is the easiest path. Move to an external repo when you have a concrete reason.
    • One context repo per project. Sharing a single context directory across multiple projects creates confusion. Keep the mapping 1:1.
    • Use .ctxrc over env vars when the path is stable. It's committed, documented, and works for the whole team without per-developer shell setup.
    • Don't forget the boundary flag. The most common error is Error: context directory is outside the project root. Set allow_outside_cwd: true in .ctxrc or pass --allow-outside-cwd.
    • Commit both repos at session boundaries. Context without code history (or code without context history) loses half the value.
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#next-up","level":2,"title":"Next Up","text":"

    The Complete Session →: Walk through a full ctx session from start to finish.

    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#see-also","level":2,"title":"See Also","text":"
    • Setting Up ctx Across AI Tools: initial setup recipe
    • Syncing Scratchpad Notes Across Machines: distribute encryption keys when context is shared
    • CLI Reference: all global flags including --context-dir and --allow-outside-cwd
    ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/guide-your-agent/","level":1,"title":"Guide Your Agent","text":"

    Commands vs. Skills

    Commands (ctx status, ctx add task) run in your terminal.

    Skills (/ctx-reflect, /ctx-next) run inside your AI coding assistant.

    Recipes combine both.

    Think of commands as structure and skills as behavior.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#proactive-behavior","level":2,"title":"Proactive Behavior","text":"

    These recipes show explicit commands and skills, but agents trained on the ctx playbook are proactive: They offer to save learnings after debugging, record decisions after trade-offs, create follow-up tasks after completing work, and suggest what to work on next.

    Your questions train the agent. Asking \"what have we learned?\" or \"is our context clean?\" does two things:

    • It triggers the workflow right now,
    • and it reinforces the pattern.

    The more you guide, the more the agent habituates the behavior and begins offering on its own.

    Each recipe includes a Conversational Approach section showing these natural-language patterns.

    Tip

    Don't wait passively for proactive behavior: especially in early sessions.

    Ask, guide, reinforce. Over time, you ask less and the agent offers more.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#next-up","level":2,"title":"Next Up","text":"

    Setup Across AI Tools →: Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf.

    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle from start to finish
    • Prompting Guide: general tips for working effectively with AI coding assistants
    ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/hook-output-patterns/","level":1,"title":"Hook Output Patterns","text":"","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code hooks can output text, JSON, or nothing at all. But the format of that output determines who sees it and who acts on it.

    Choose the wrong pattern, and your carefully crafted warning gets silently absorbed by the agent, or your agent-directed nudge gets dumped on the user as noise.

    This recipe catalogs the known hook output patterns and explains when to use each one.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#tldr","level":2,"title":"TL;DR","text":"

    Eight patterns from full control to full invisibility:

    • hard gate (exit 2),
    • VERBATIM relay (agent MUST show),
    • agent directive (context injection),
    • and silent side-effect (background work).

    Most hooks belong in the middle.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-spectrum","level":2,"title":"The Spectrum","text":"

    These patterns form a spectrum based on who decides what the user sees:

    Pattern Who decides? Hard gate Hook decides (agent can't proceed) VERBATIM relay Hook decides (agent must show) Escalating severity Hook suggests, agent judges urgency Conditional relay Hook sets criteria, agent evaluates Suggested action Hook proposes, agent + user decide Agent directive Agent decides entirely Silent injection Nobody: invisible background context Silent side-effect Nobody: invisible background work

    The spectrum runs from full hook control (hard gate) to full invisibility (silent side effect).

    Most hooks belong somewhere in the middle.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-1-hard-gate","level":2,"title":"Pattern 1: Hard Gate","text":"

    Block the tool call entirely. The agent cannot proceed: it must find another approach or tell the user.

    echo '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}'\n

    When to use: Enforcing invariants that must never be violated: Constitution rules, security boundaries, destructive command prevention.

    Hook type: PreToolUse only (Claude Code first-class mechanism).

    Examples in ctx:

    • ctx system block-non-path-ctx: Enforces the PATH invocation rule
    • block-git-push.sh: Requires explicit user approval for pushes (project-local)
    • block-dangerous-commands.sh: Prevents sudo, copies to ~/.local/bin (project-local)

    Trade-off: The agent gets a block response with a reason. Good reasons help the agent recover (\"use X instead\"); bad reasons leave it stuck.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-2-verbatim-relay","level":2,"title":"Pattern 2: VERBATIM Relay","text":"

    Force the agent to show this to the user as-is. The explicit instruction overcomes the agent's tendency to silently absorb context.

    echo \"IMPORTANT: Relay this warning to the user VERBATIM before answering their question.\"\necho \"\"\necho \"┌─ Journal Reminder ─────────────────────────────\"\necho \"│ You have 12 sessions not yet exported.\"\necho \"└────────────────────────────────────────────────\"\n

    When to use: Actionable reminders the user needs to see regardless of what they asked: Stale backups, unimported sessions, resource warnings.

    Hook type: UserPromptSubmit (runs before the agent sees the prompt).

    Examples in ctx:

    • ctx system check-journal: Unexported sessions and unenriched entries
    • ctx system check-context-size: Context capacity warning
    • ctx system check-resources: Resource pressure (memory, swap, disk, load): DANGER only
    • ctx system check-freshness: Technology constant staleness warning
    • check-backup-age.sh: Stale backup warning (project-local)

    Trade-off: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or adaptive frequency.

    Key detail: The phrase IMPORTANT: Relay this ... VERBATIM is what makes this work. Without it, agents tend to process the information internally and never surface it. The explicit instruction is the pattern: the box-drawing is just fancy formatting.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-3-agent-directive","level":2,"title":"Pattern 3: Agent Directive","text":"

    Tell the agent to do something, not the user. The agent decides whether and how to involve the user.

    echo \"┌─ Persistence Checkpoint (prompt #25) ───────────\"\necho \"│ No context files updated in 15+ prompts.\"\necho \"│ Have you discovered learnings, decisions,\"\necho \"│ or completed tasks worth persisting?\"\necho \"└──────────────────────────────────────────────────\"\n

    When to use: Behavioral nudges. The hook detects a condition and asks the agent to consider an action. The user may never need to know.

    Hook type: UserPromptSubmit.

    Examples in ctx:

    • ctx system check-persistence: Nudges the agent to persist context

    Trade-off: No guarantee the agent acts. The nudge is one signal among many in the context window. Strong phrasing helps (\"Have you...?\" is better than \"Consider...\"), but ultimately the agent decides.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-4-silent-context-injection","level":2,"title":"Pattern 4: Silent Context Injection","text":"

    Load context with no visible output. The agent gets enriched without either party noticing.

    ctx agent --budget 4000 >/dev/null || true\n

    When to use: Background context loading that should be invisible. The agent benefits from the information, but neither it, nor the user needs to know it happened.

    Hook type: PreToolUse with .* matcher (runs on every tool call).

    Examples in ctx:

    • The ctx agent PreToolUse hook: injects project context silently

    Trade-off: Adds latency to every tool call. Keep the injected content small and fast to generate.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-5-silent-side-effect","level":2,"title":"Pattern 5: Silent Side-Effect","text":"

    Do work, produce no output: Housekeeping that needs no acknowledgment.

    find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

    When to use: Cleanup, log rotation, temp file management. Anything where the action is the point and nobody needs to know it happened.

    Hook type: Any hook where output is irrelevant.

    Examples in ctx:

    • Log rotation, marker file cleanup, state directory maintenance

    Trade-off: None, if the action is truly invisible. If it can fail in a way that matters, consider logging.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-6-conditional-relay","level":3,"title":"Pattern 6: Conditional Relay","text":"

    Tell the agent to relay only if a condition holds in context.

    echo \"If the user's question involves modifying .context/ files,\"\necho \"relay this warning VERBATIM:\"\necho \"\"\necho \"┌─ Context Integrity ─────────────────────────────\"\necho \"│ CONSTITUTION.md has not been verified in 7 days.\"\necho \"└────────────────────────────────────────────────\"\necho \"\"\necho \"Otherwise, proceed normally.\"\n

    When to use: Warnings that only matter in certain contexts. Avoids noise when the user is doing unrelated work.

    Trade-off: Depends on the agent's judgment about when the condition holds. More fragile than VERBATIM relay, but less noisy.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-7-suggested-action","level":3,"title":"Pattern 7: Suggested Action","text":"

    Give the agent a specific command to propose to the user.

    echo \"┌─ Stale Dependencies ──────────────────────────\"\necho \"│ go.sum is 30+ days newer than go.mod.\"\necho \"│ Suggested: run \\`go mod tidy\\`\"\necho \"│ Ask the user before proceeding.\"\necho \"└───────────────────────────────────────────────\"\n

    When to use: The hook detects a fixable condition and knows the fix. Goes beyond a nudge: Gives the agent a concrete next step. The agent still asks for permission but knows exactly what to propose.

    Trade-off: The suggestion might be wrong or outdated. The \"ask the user before proceeding\" part is critical.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-8-escalating-severity","level":3,"title":"Pattern 8: Escalating Severity","text":"

    Different urgency tiers with different relay expectations.

    # INFO: agent processes silently, mentions if relevant\necho \"INFO: Last test run was 3 days ago.\"\n\n# WARN: agent should mention to user at next natural pause\necho \"WARN: 12 uncommitted changes across 3 branches.\"\n\n# CRITICAL: agent must relay immediately, before any other work\necho \"CRITICAL: Relay VERBATIM before answering. Disk usage at 95%.\"\n

    When to use: When you have multiple hooks producing output and need to avoid overwhelming the user. INFO gets absorbed, WARN gets mentioned, CRITICAL interrupts.

    Examples in ctx:

    • ctx system check-resources: Uses two tiers (WARNING/DANGER) internally but only fires the VERBATIM relay at DANGER level: WARNING is silent. See ctx system for the user-facing command that shows both tiers.

    Trade-off: Requires agent training or convention to recognize the tiers. Without a shared protocol, the prefixes are just text.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#choosing-a-pattern","level":2,"title":"Choosing a Pattern","text":"
    Is the agent about to do something forbidden?\n  └─ Yes → Hard gate\n\nDoes the user need to see this regardless of what they asked?\n  └─ Yes → VERBATIM relay\n  └─ Sometimes → Conditional relay\n\nShould the agent consider an action?\n  └─ Yes, with a specific fix → Suggested action\n  └─ Yes, open-ended → Agent directive\n\nIs this background context the agent should have?\n  └─ Yes → Silent injection\n\nIs this housekeeping?\n  └─ Yes → Silent side-effect\n
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#design-tips","level":2,"title":"Design Tips","text":"

    Throttle aggressively: VERBATIM relays that fire every prompt will be ignored or resented. Use once-per-day markers (touch $REMINDED), adaptive frequency (every Nth prompt), or staleness checks (only fire if condition persists).

    Include actionable commands: \"You have 12 unimported sessions\" is less useful than \"You have 12 unimported sessions. Run: ctx journal import --all.\" Give the user (or agent) the exact next step.

    Use box-drawing for visual structure: The ┌─ ─┐ │ └─ ─┘ pattern makes hook output visually distinct from agent prose. It also signals \"this is machine-generated, not agent opinion.\"

    Test the silence path: Most hook runs should produce no output (the condition isn't met). Make sure the common case is fast and silent.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

    Lessons from 19 days of hook debugging in ctx. Every one of these was encountered, debugged, and fixed in production.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#silent-misfire-wrong-key-name","level":3,"title":"Silent Misfire: Wrong Key Name","text":"
    { \"PreToolUseHooks\": [ ... ] }\n

    The key is PreToolUse, not PreToolUseHooks. Claude Code validates silently: A misspelled key means the hook is ignored with no error. Always test with a debug echo first to confirm the hook fires before adding real logic.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#json-escaping-breaks-shell-commands","level":3,"title":"JSON Escaping Breaks Shell Commands","text":"

    Go's json.Marshal escapes >, <, and & as Unicode sequences (\\u003e) by default. This breaks shell commands in generated config:

    \"command\": \"ctx agent 2\\u003e/dev/null\"\n

    Fix: use json.Encoder with SetEscapeHTML(false) when generating hook configuration.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#stdin-not-environment-variables","level":3,"title":"stdin, Not Environment Variables","text":"

    Hook input arrives as JSON via stdin, not environment variables:

    # Wrong:\nCOMMAND=\"$CLAUDE_TOOL_INPUT\"\n\n# Right:\nHOOK_INPUT=$(cat)\nCOMMAND=$(echo \"$HOOK_INPUT\" | jq -r '.tool_input.command // empty')\n
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#regex-overfitting","level":3,"title":"Regex Overfitting","text":"

    A regex meant to catch ctx as a binary will also match ctx as a directory component:

    # Too broad: blocks: git -C /home/jose/WORKSPACE/ctx status\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# Narrow to binary only:\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n

    Test hook regexes against paths that contain the target string as a substring, not just as the final component.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#repetition-fatigue","level":3,"title":"Repetition Fatigue","text":"

    Injecting context on every tool call sounds safe. In practice, after seeing the same context injection fifteen times, the agent treats it as background noise: Conventions stated in the injected context get violated because salience has been destroyed by repetition.

    Fix: cooldowns. ctx agent --session $PPID --cooldown 10m injects at most once per ten minutes per session using a tombstone file in /tmp/. This is not an optimization; it is a correction for a design flaw. Every injection consumes attention budget: 50 tool calls at 4,000 tokens each means 200,000 tokens of repeated context, most of it wasted.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#hardcoded-paths","level":3,"title":"Hardcoded Paths","text":"

    A username rename (parallels to jose) broke every hook at once. Use $CLAUDE_PROJECT_DIR instead of absolute paths:

    \"command\": \"\\\"$CLAUDE_PROJECT_DIR\\\"/.claude/hooks/block-git-push.sh\"\n

    If the platform provides a runtime variable for paths, always use it.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#next-up","level":2,"title":"Next Up","text":"

    Webhook Notifications →: Get push notifications when loops complete, hooks fire, or agents hit milestones.

    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#see-also","level":2,"title":"See Also","text":"
    • Customizing Hook Messages: override what hooks say without changing what they do
    • Claude Code Permission Hygiene: how permissions and hooks work together
    • Defense in Depth: why hooks matter for agent security
    ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/","level":1,"title":"Hook Sequence Diagrams","text":"","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#hook-lifecycle","level":2,"title":"Hook Lifecycle","text":"

    This page documents the ctx system hooks: the built-in ctx system * subcommands that Claude Code invokes via .claude/hooks.json at lifecycle events. These are owned by ctx itself, not authored by users.

    Not to Be Confused with ctx trigger

    ctx has three distinct hook-like layers:

    • ctx system hooks (this page): built-in, owned by ctx, wired into Claude Code via internal/assets/claude/hooks/hooks.json.
    • ctx trigger: user-authored shell scripts in .context/hooks/<type>/*.sh. See ctx trigger reference and the trigger authoring recipe.
    • Claude Code hooks configured directly in .claude/settings.local.json, tool-specific, not portable across AI tools.

    This page is only about the first category.

    Every ctx system hook is a Go binary invoked by Claude Code at one of three lifecycle events: PreToolUse (before a tool runs, can block), PostToolUse (after a tool completes), or UserPromptSubmit (on every user prompt, before any tools run). Hooks receive JSON on stdin and emit JSON or plain text on stdout.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#pretooluse-hooks","level":2,"title":"PreToolUse Hooks","text":"

    These fire before a tool executes. They can block, gate, or inject context.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#context-load-gate","level":3,"title":"Context-Load-Gate","text":"

    Matcher: .* (all tools)

    Injects the full context packet on first tool use of a session. One-shot per session.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as context-load-gate\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Git as git log\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized\n    alt not initialized\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check ctx-loaded-{session} marker\n    alt marker exists\n        Hook-->>CC: (silent exit, already fired)\n    end\n    Hook->>State: Create marker (one-shot guard)\n    Hook->>State: Prune stale session files\n    loop Each file in ReadOrder\n        alt GLOSSARY or TASK\n            Note over Hook: Skip (Task mentioned in footer only)\n        else DECISION or LEARNING\n            Hook->>Ctx: Extract index table only\n        else other files\n            Hook->>Ctx: Read full content\n        end\n        Hook->>Hook: Estimate tokens per file\n    end\n    Hook->>Git: Detect changes since last session\n    Hook->>Hook: Build injection (files + changes + token counts)\n    Hook-->>CC: JSON {additionalContext: injection}\n    Hook->>Hook: Send webhook (metadata only)\n    Hook->>State: Write oversize flag if tokens > threshold
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-non-path-ctx","level":3,"title":"Block-Non-Path-ctx","text":"

    Matcher: Bash

    Blocks ./ctx, go run ./cmd/ctx, or absolute-path ctx invocations. Constitutionally enforced.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-non-path-ctx\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Test regex: relative-path, go-run, absolute-path\n    alt no match\n        Hook-->>CC: (silent exit)\n    end\n    alt absolute-path + test exception\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason + constitution suffix}\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#qa-reminder","level":3,"title":"Qa-Reminder","text":"

    Matcher: Bash

    Gate nudge before any git command. Reminds agent to lint/test.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as qa-reminder\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check command contains \"git\"\n    alt no git command\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, gate, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: QA gate}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#specs-nudge","level":3,"title":"Specs-Nudge","text":"

    Matcher: EnterPlanMode

    Nudges agent to save plans/specs when new implementation detected.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as specs-nudge\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: specs nudge}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#posttooluse-hooks","level":2,"title":"PostToolUse Hooks","text":"

    These fire after a tool completes. They observe, nudge, and track state.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#post-commit","level":3,"title":"Post-Commit","text":"

    Matcher: Bash

    Fires after git commit (not amend). Nudges for context capture and checks version drift.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as post-commit\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"git commit\"?\n    alt not a git commit\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"--amend\"?\n    alt is amend\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: post-commit nudge}\n    Hook->>Hook: Relay(message)\n    Hook->>Hook: CheckVersionDrift()
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-task-completion","level":3,"title":"Check-Task-Completion","text":"

    Matcher: Edit, Write

    Configurable-interval nudge after edits. Per-session counter resets after firing.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-task-completion\n    participant State as .context/state/\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read task nudge interval\n    alt interval <= 0 (disabled)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read per-session counter\n    Hook->>Hook: Increment counter\n    alt counter < interval\n        Hook->>State: Write counter\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Reset counter to 0\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: JSON {additionalContext: task nudge}\n    Hook->>Hook: Relay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#userpromptsubmit-hooks","level":2,"title":"UserPromptSubmit Hooks","text":"

    These fire on every user prompt, before any tools run. They perform health checks, track state, and nudge for housekeeping.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-context-size","level":3,"title":"Check-Context-Size","text":"

    Adaptive context window monitoring. Fires checkpoints, window warnings, and billing alerts based on prompt count and token usage.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-context-size\n    participant State as .context/state/\n    participant Session as Session JSONL\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized\n    Hook->>Hook: Read input, resolve session ID\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: Pause acknowledgment message\n    end\n    Hook->>State: Increment session prompt counter\n    Hook->>Session: Read token info (tokens, model, window)\n\n    rect rgb(255, 240, 240)\n        Note over Hook: Billing check (independent, never suppressed)\n        alt tokens >= billing threshold (one-shot)\n            Hook->>Tpl: LoadMessage(hook, billing, vars)\n            Hook-->>CC: Billing warning nudge box\n            Hook->>Hook: NudgeAndRelay(billing message)\n        end\n    end\n\n    Hook->>State: Check wrap-up marker\n    alt wrapped up recently (< 2h)\n        Hook->>State: Write stats (event: suppressed)\n        Hook-->>CC: (silent exit)\n    end\n\n    rect rgb(240, 248, 255)\n        Note over Hook: Adaptive frequency check\n        alt count > 30 and count % 3 == 0\n            Note over Hook: High frequency trigger\n        else count > 15 and count % 5 == 0\n            Note over Hook: Medium frequency trigger\n        else\n            Hook->>State: Write stats (event: silent)\n            Hook-->>CC: (silent exit)\n        end\n    end\n\n    alt context window >= 80%\n        Hook->>Tpl: LoadMessage(hook, window, vars)\n        Hook-->>CC: Window warning nudge box\n        Hook->>Hook: NudgeAndRelay(window message)\n    else checkpoint trigger\n        Hook->>Tpl: LoadMessage(hook, checkpoint)\n        Hook-->>CC: Checkpoint nudge box\n        Hook->>Hook: NudgeAndRelay(checkpoint message)\n    end\n    Hook->>State: Write session stats
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-ceremonies","level":3,"title":"Check-Ceremonies","text":"

    Daily check for /ctx-remember and /ctx-wrap-up usage in recent journal entries.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-ceremonies\n    participant State as .context/state/\n    participant Journal as Journal files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Read recent files (lookback window)\n    alt no journal files\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Scan for /ctx-remember and /ctx-wrap-up\n    alt both ceremonies present\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Note over Hook: variant: both | remember | wrapup\n    Hook-->>CC: Nudge box (missing ceremonies)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-freshness","level":3,"title":"Check-Freshness","text":"

    Daily check for technology-dependent constants that may need review.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-freshness\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Stat tracked files (5 source files)\n    alt all files modified within 6 months\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {StaleFiles})\n    Hook-->>CC: Nudge box (stale file list + review URL)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-journal","level":3,"title":"Check-Journal","text":"

    Daily check for unimported sessions and unenriched journal entries.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-journal\n    participant State as .context/state/\n    participant Journal as Journal dir\n    participant Claude as Claude projects dir\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Check dir exists\n    Hook->>Claude: Check dir exists\n    alt either dir missing\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Get newest entry mtime\n    Hook->>Claude: Count .jsonl files newer than journal\n    Hook->>Journal: Count unenriched entries\n    alt unimported == 0 and unenriched == 0\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, {counts})\n    Note over Hook: variant: both | unimported | unenriched\n    Hook-->>CC: Nudge box (counts)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-knowledge","level":3,"title":"Check-Knowledge","text":"

    Daily check for knowledge file entry/line counts exceeding configured thresholds.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-knowledge\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read thresholds (decisions, learnings, conventions)\n    alt all thresholds disabled (0)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Ctx: Parse DECISIONS.md entry count\n    Hook->>Ctx: Parse LEARNINGS.md entry count\n    Hook->>Ctx: Count CONVENTIONS.md lines\n    Hook->>Hook: Compare against thresholds\n    alt all within limits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {FileWarnings})\n    Hook-->>CC: Nudge box (file warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-map-staleness","level":3,"title":"Check-Map-Staleness","text":"

    Daily check for architecture map age and relevant code changes.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-map-staleness\n    participant State as .context/state/\n    participant Tracking as map-tracking.json\n    participant Git as git log\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tracking: Read map-tracking.json\n    alt missing, invalid, or opted out\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Parse LastRun date\n    alt map not stale (< N days)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Git: Count commits touching internal/ since LastRun\n    alt no relevant commits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {date, count})\n    Hook-->>CC: Nudge box (last refresh + commit count)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-memory-drift","level":3,"title":"Check-Memory-Drift","text":"

    Per-session check for MEMORY.md changes since last sync.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-memory-drift\n    participant State as .context/state/\n    participant Mem as memory.Discover\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check session tombstone\n    alt already nudged this session\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: DiscoverMemoryPath(projectRoot)\n    alt auto memory not active\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: HasDrift(contextDir, sourcePath)\n    alt no drift\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: Nudge box (drift reminder)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch session tombstone
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-persistence","level":3,"title":"Check-Persistence","text":"

    Tracks context file modification and nudges when edits happen without persisting context. Adaptive threshold based on prompt count.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-persistence\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read persistence state {Count, LastNudge, LastMtime}\n    alt first prompt (no state)\n        Hook->>State: Initialize state {Count:1, LastNudge:0, LastMtime:now}\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Increment Count\n    Hook->>Ctx: Get current context mtime\n    alt context modified since LastMtime\n        Hook->>State: Reset LastNudge = Count, update LastMtime\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: sinceNudge = Count - LastNudge\n    Hook->>Hook: PersistenceNudgeNeeded(Count, sinceNudge)?\n    alt threshold not reached\n        Hook->>State: Write state\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, vars)\n    Hook-->>CC: Nudge box (prompt count, time since last persist)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Update LastNudge = Count, write state
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-reminders","level":3,"title":"Check-Reminders","text":"

    Per-prompt check for due reminders. No throttle.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-reminders\n    participant Store as Reminders store\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Store: ReadReminders()\n    alt load error\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter by due date (After <= today)\n    alt no due reminders\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, reminders, {list})\n    Hook-->>CC: Nudge box (reminder list + dismiss hints)\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-resources","level":3,"title":"Check-Resources","text":"

    Checks system resources (memory, swap, disk, load). Fires on every prompt. No initialization required.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-resources\n    participant Sys as sysinfo\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: HookPreamble (parse input, check pause)\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Sys: Collect snapshot (memory, swap, disk, load)\n    Hook->>Sys: Evaluate thresholds per metric\n    alt max severity < Danger\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter alerts to Danger level only\n    Hook->>Hook: Build alertMessages from danger alerts\n    Hook->>Tpl: LoadMessage(hook, alert, {alertMessages}, fallback)\n    Hook-->>CC: Nudge box (danger alerts)\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-version","level":3,"title":"Check-Version","text":"

    Daily binary-vs-plugin version comparison with piggybacked key rotation check.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-version\n    participant State as .context/state/\n    participant Config as Binary + Plugin version\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read binary version\n    alt dev build\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read plugin version\n    alt plugin version not found or parse error\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Compare major.minor\n    alt versions match\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, mismatch, {versions})\n    Hook-->>CC: Nudge box (version mismatch)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle\n    Hook->>Hook: CheckKeyAge() (piggybacked)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#heartbeat","level":3,"title":"Heartbeat","text":"

    Silent per-prompt pulse. Tracks prompt count, context modification, and token usage. The agent never sees this hook's output.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as heartbeat\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Notify as Webhook + EventLog\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Increment heartbeat counter\n    Hook->>Ctx: Get latest context file mtime\n    Hook->>State: Compare with last recorded mtime\n    Hook->>State: Update mtime record\n    Hook->>State: Read session token info\n    Hook->>Notify: Send heartbeat notification\n    Hook->>Notify: Append to event log\n    Hook->>State: Write heartbeat log entry\n    Note over Hook: No stdout - agent never sees this
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#project-local-hooks","level":2,"title":"Project-Local Hooks","text":"

    These hooks are configured in settings.local.json and are not shipped with ctx. They are specific to individual developer setups.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-dangerous-commands","level":3,"title":"Block-Dangerous-Commands","text":"

    Lifecycle: PreToolUse. Matcher: Bash

    Blocks dangerous shell patterns (sudo, git push, cp to bin). No initialization or pause checks: always active.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-dangerous-commands\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Note over Hook: Cascade: first matching regex wins\n    Hook->>Hook: Test MidSudo regex\n    alt match\n        Hook->>Hook: variant = sudo\n    end\n    Hook->>Hook: Test MidGitPush regex (if no variant)\n    alt match\n        Hook->>Hook: variant = git-push\n    end\n    Hook->>Hook: Test CpMvToBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = cp-to-bin\n    end\n    Hook->>Hook: Test InstallToLocalBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = install-to-bin\n    end\n    alt no variant matched\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason}\n    Hook->>Hook: NudgeAndRelay(message)
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-backup-age","level":3,"title":"Check-Backup-Age","text":"

    Lifecycle: UserPromptSubmit.

    Daily check for SMB mount and backup freshness.

    sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-backup-age\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Check SMB mount (if env var set)\n    Hook->>FS: Check backup marker file age\n    alt no warnings\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {Warnings})\n    Hook-->>CC: Nudge box (warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#throttling-summary","level":2,"title":"Throttling Summary","text":"Hook Lifecycle Throttle Type Scope context-load-gate PreToolUse One-shot marker Per session block-non-path-ctx PreToolUse None Every match qa-reminder PreToolUse None Every git command specs-nudge PreToolUse None Every prompt post-commit PostToolUse None Every git commit check-task-completion PostToolUse Configurable interval Per session check-context-size UserPromptSubmit Adaptive counter Per session check-ceremonies UserPromptSubmit Daily marker Once per day check-freshness UserPromptSubmit Daily marker Once per day check-journal UserPromptSubmit Daily marker Once per day check-knowledge UserPromptSubmit Daily marker Once per day check-map-staleness UserPromptSubmit Daily marker Once per day check-memory-drift UserPromptSubmit Session tombstone Once per session check-persistence UserPromptSubmit Adaptive counter Per session check-reminders UserPromptSubmit None Every prompt check-resources UserPromptSubmit None Every prompt check-version UserPromptSubmit Daily marker Once per day heartbeat UserPromptSubmit None Every prompt block-dangerous-commands PreToolUse * None Every match check-backup-age UserPromptSubmit * Daily marker Once per day

    * Project-local hook (settings.local.json), not shipped with ctx.

    ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#state-file-reference","level":2,"title":"State File Reference","text":"

    All state files live in .context/state/.

    File Pattern Hook Purpose ctx-loaded-{session} context-load-gate One-shot injection marker ctx-paused-{session} (all) Session pause marker ctx-wrapped-up check-context-size Suppress nudges after wrap-up (2h expiry) freshness-checked check-freshness Daily throttle backup-reminded check-backup-age Daily throttle ceremony-reminded check-ceremonies Daily throttle journal-reminded check-journal Daily throttle knowledge-reminded check-knowledge Daily throttle map-staleness-reminded check-map-staleness Daily throttle version-checked check-version Daily throttle memory-drift-nudged-{session} check-memory-drift Per-session tombstone ctx-context-count-{session} check-context-size Prompt counter stats-{session}.jsonl check-context-size Session stats log persist-{session} check-persistence Counter + mtime state ctx-task-count-{session} check-task-completion Prompt counter heartbeat-count-{session} heartbeat Prompt counter heartbeat-mtime-{session} heartbeat Last context mtime","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hub-cluster/","level":1,"title":"HA Cluster","text":"","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#ctx-hub-high-availability-cluster","level":1,"title":"ctx Hub: High-Availability Cluster","text":"

    Run multiple hub nodes with Raft-based leader election for redundancy. Any follower can take over if the leader dies.

    This recipe assumes you've read the ctx Hub overview and the Multi-machine setup. HA only makes sense in the \"small trusted team\" story; a personal cross-project brain on one workstation does not need three Raft peers.

    Raft-Lite

    ctx uses Raft only for leader election, not for data consensus. Entry replication happens via sequence-based gRPC sync on the append-only JSONL store. This is simpler than full Raft log replication and is possible because the store is append-only and clients are idempotent. The implication: a write accepted by the leader is durable on the leader immediately; followers catch up asynchronously. If the leader crashes between accepting a write and replicating it, that write can be lost. Do not use the hub as a bank ledger.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#topology","level":2,"title":"Topology","text":"

    A minimum HA cluster is three nodes. Two is worse than one: it doubles failure probability without providing quorum.

             +-------------+\n         |  client(s)  |\n         +------+------+\n                |\n    +-----------+-----------+\n    |           |           |\n+---v---+   +---v---+   +---v---+\n| hub A |   | hub B |   | hub C |\n| :9900 |   | :9900 |   | :9900 |\n+-------+   +-------+   +-------+\n    ^           ^           ^\n    +-----------+-----------+\n        Raft (leader election)\n        gRPC (data sync)\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-1-bootstrap-the-first-node","level":2,"title":"Step 1: Bootstrap the First Node","text":"
    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

    The node starts a Raft election as soon as it sees its peers.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-2-start-the-other-nodes","level":2,"title":"Step 2: Start the Other Nodes","text":"

    On hub-b.lan:

    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-c.lan:9900\n

    On hub-c.lan:

    ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-b.lan:9900\n

    After a few seconds, one node wins the election and becomes the leader. The other two are followers.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-3-verify-cluster-state","level":2,"title":"Step 3: Verify Cluster State","text":"

    From any node:

    ctx hub status\n

    Expected output:

    role:       leader\npeers:      hub-a.lan:9900 (leader)\n            hub-b.lan:9900 (follower, in-sync)\n            hub-c.lan:9900 (follower, in-sync)\nentries:    1248\nuptime:     3h42m\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-4-register-clients-with-failover-peers","level":2,"title":"Step 4: Register Clients with Failover Peers","text":"

    When registering a client, give it the full peer list:

    ctx connection register hub-a.lan:9900 \\\n  --token ctx_adm_... \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

    If the leader becomes unreachable, the client reconnects to the next peer. Followers redirect to the current leader, so writes always land on the right node.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#runtime-membership-changes","level":2,"title":"Runtime Membership Changes","text":"

    Add a new peer without downtime:

    ctx hub peer add hub-d.lan:9900\n

    Remove a decommissioned peer:

    ctx hub peer remove hub-c.lan:9900\n
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#planned-maintenance","level":2,"title":"Planned Maintenance","text":"

    Before taking a leader offline, hand off leadership:

    ssh hub-a.lan 'ctx hub stepdown'\n

    stepdown triggers a new election among the remaining followers before the leader goes offline. In-flight clients briefly pause, then reconnect to the new leader.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#failure-modes-at-a-glance","level":2,"title":"Failure Modes at a Glance","text":"Event What happens Leader crashes New election; clients reconnect to new leader Follower crashes No write impact; catches up on restart Network partition (majority) Majority side keeps serving; minority read-only Network partition (split) No quorum; all nodes read-only Disk full on leader Writes rejected; read traffic continues

    For the full list, see Hub failure modes.

    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#see-also","level":2,"title":"See Also","text":"
    • Multi-machine recipe: single-node deployment
    • Hub operations: backup and maintenance
    • Hub security model: TLS, tokens
    ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-getting-started/","level":1,"title":"Getting Started","text":"","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#ctx-hub-getting-started","level":1,"title":"ctx Hub: Getting Started","text":"

    Stand up a single-node ctx Hub on localhost, register two projects, publish a decision from one, and see it appear in the other, all in under five minutes.

    Read This First

    If you haven't already, skim the ctx Hub overview. It explains the mental model, names the two user stories (personal vs small team), and (importantly) lists what the hub does not do. This recipe assumes you already know you want the feature.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-youll-get-out-of-this-recipe","level":2,"title":"What You'll Get out of This Recipe","text":"

    By the end, you will have:

    1. A local hub process running on port 9900.
    2. Two project directories both registered with the ctx Hub.
    3. A decision published from project alpha that appears automatically in project beta's .context/hub/ and in ctx agent --include-hub output.

    Concretely, the payoff this unlocks: a lesson you record in one project becomes visible to your agent the next time you open another project, without touching local files in the second project or opening another editor window.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-this-recipe-does-not-cover","level":2,"title":"What This Recipe Does Not Cover","text":"
    • Sharing .context/journal/, .context/pad, or any other local state. The hub only fans out decision, learning, convention, and task entries. Everything else stays local.
    • Multi-user attribution. The hub identifies projects, not people.
    • Running over a LAN; see Multi-machine setup.
    • Redundancy; see HA cluster.
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"
    • ctx installed and on PATH
    • Two project directories, each already initialized with ctx init
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"

    In a dedicated terminal:

    ctx hub start\n

    On first run, the hub generates an admin token and prints it to stdout. Copy it; you'll need it for each project registration:

    ctx hub listening on :9900\nadmin token: ctx_adm_7f3a1c2d...\ndata dir: ~/.ctx/hub-data/\n

    The admin token is written to ~/.ctx/hub-data/admin.token so you can recover it later. Treat it like a password.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-2-register-the-first-project","level":2,"title":"Step 2: Register the First Project","text":"
    cd ~/projects/alpha\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\n

    This stores an encrypted connection config in .context/.connect.enc. The admin token is exchanged for a per-project client token; the admin token itself is never persisted in the project.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-3-choose-what-to-receive","level":2,"title":"Step 3: Choose What to Receive","text":"
    ctx connection subscribe decision learning convention\n

    Only the entry types you subscribe to will be delivered by sync and listen.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-4-publish-a-decision","level":2,"title":"Step 4: Publish a Decision","text":"

    Either use ctx add --share to write locally and push to the ctx Hub:

    ctx add decision \"Use UTC timestamps everywhere\" --share \\\n  --context \"We had timezone drift between the API and journal\" \\\n  --rationale \"Single source of truth avoids conversion bugs\" \\\n  --consequence \"The UI does conversion at render time\"\n

    Or publish an existing entry directly:

    ctx connection publish decision \"Use UTC timestamps everywhere\"\n
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-5-register-a-second-project-and-sync","level":2,"title":"Step 5: Register a Second Project and Sync","text":"
    cd ~/projects/beta\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\nctx connection subscribe decision learning convention\nctx connection sync\n

    The decision from alpha now appears in ~/projects/beta/.context/hub/decisions.md with an origin tag and timestamp.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-6-watch-entries-arrive-live","level":2,"title":"Step 6: Watch Entries Arrive Live","text":"

    Instead of re-running sync, stream new entries as they land:

    ctx connection listen\n

    Leave this running in a terminal; every --share publish from any registered project will appear in .context/hub/ immediately.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-7-feed-shared-knowledge-into-the-agent","level":2,"title":"Step 7: Feed Shared Knowledge into the Agent","text":"

    Once entries exist in .context/hub/, include them in the agent context packet:

    ctx agent --include-hub\n

    Shared entries are added as a dedicated tier in the budget-aware assembly, scored by recency and type relevance.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#auto-sync-on-session-start","level":2,"title":"Auto-Sync on Session Start","text":"

    After register, the check-hub-sync hook pulls new entries at the start of each session (daily throttled). Most users never need to call ctx connection sync manually.

    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
    • Multi-machine hub: run the hub on a LAN host and connect from other workstations.
    • HA cluster: Raft-based leader election for high availability.
    • Hub operations: daemon mode, backup, log rotation, JSONL store layout.
    • Hub security model: token lifecycle, encryption at rest, threat model.
    • ctx connect reference and ctx hub start reference.
    ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-multi-machine/","level":1,"title":"Multi-Machine","text":"","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#ctx-hub-multi-machine","level":1,"title":"ctx Hub: Multi-Machine","text":"

    Run the hub on a LAN host and connect from project directories on other workstations. This recipe is the Story 2 (\"small trusted team\") shape described in the ctx Hub overview; read that first if you haven't, especially the trust-model warnings.

    This recipe assumes you've already walked through Getting Started and understand what flows through the hub (decisions, learnings, conventions, tasks, not journals, scratchpad, or raw context files).

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#topology","level":2,"title":"Topology","text":"
    +------------------+        +------------------+\n| workstation A    |        | workstation B    |\n|  ~/projects/x    |        |  ~/projects/y    |\n|  ctx connection  |        |  ctx connection  |\n+---------+--------+        +---------+--------+\n          |                           |\n          +-----------+   +-----------+\n                      v   v\n              +-------------------+\n              | LAN host \"nexus\"  |\n              | ctx hub start     |\n              | --daemon          |\n              | :9900             |\n              +-------------------+\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-1-start-the-daemon-on-the-lan-host","level":2,"title":"Step 1: Start the Daemon on the LAN Host","text":"

    On the machine that will hold the hub (call it nexus):

    ctx hub start --daemon --port 9900\n

    The daemon writes a PID file to ~/.ctx/hub-data/hub.pid. Stop it later with:

    ctx hub stop\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-2-firewall-and-port","level":2,"title":"Step 2: Firewall and Port","text":"

    Open port 9900/tcp on nexus to the LAN only. Never expose the hub to the public internet without a reverse proxy and TLS in front of it (see Hub security model).

    Typical LAN allowlist rules:

    firewalldufwnftables
    sudo firewall-cmd --zone=internal \\\n  --add-port=9900/tcp --permanent\nsudo firewall-cmd --reload\n
    sudo ufw allow from 192.168.1.0/24 to any port 9900 proto tcp\n
    sudo nft add rule inet filter input ip saddr 192.168.1.0/24 \\\n  tcp dport 9900 accept\n
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-3-retrieve-the-admin-token","level":2,"title":"Step 3: Retrieve the Admin Token","text":"

    The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead:

    cat ~/.ctx/hub-data/admin.token\n

    Copy the token over a trusted channel (SSH, password manager, or an encrypted note). Do not email it or put it in chat.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-4-register-projects-from-each-workstation","level":2,"title":"Step 4: Register Projects from Each Workstation","text":"

    On workstation A:

    cd ~/projects/x\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

    On workstation B:

    cd ~/projects/y\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

    Each registration exchanges the admin token for a per-project client token. Only the client token is persisted in .context/.connect.enc, encrypted with the same AES-256-GCM scheme ctx uses for notification credentials.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-5-verify","level":2,"title":"Step 5: Verify","text":"

    From either workstation:

    ctx connection status\n

    You should see the ctx Hub address, role (leader for single-node), subscription filters, and the sequence number you're synced to.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#tls-recommended","level":2,"title":"TLS (Recommended)","text":"

    For anything beyond a trusted home LAN, terminate TLS in front of the hub. The hub speaks gRPC, so the reverse proxy must speak HTTP/2:

    server {\n    listen 443 ssl http2;\n    server_name nexus.example.com;\n\n    ssl_certificate     /etc/letsencrypt/live/nexus.example.com/fullchain.pem;\n    ssl_certificate_key /etc/letsencrypt/live/nexus.example.com/privkey.pem;\n\n    location / {\n        grpc_pass grpc://127.0.0.1:9900;\n    }\n}\n

    Point ctx connection register at the public hostname and port 443.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#handling-daemon-restarts","level":2,"title":"Handling Daemon Restarts","text":"

    The hub is append-only JSONL, so restarts are safe. Clients keep their last-seen sequence in .context/hub/.sync-state.json and pick up exactly where they left off on the next sync or listen reconnect.

    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#see-also","level":2,"title":"See Also","text":"
    • HA cluster recipe: for redundancy
    • Hub operations: backup, rotation
    • Hub failure modes
    • Hub security model
    ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-overview/","level":1,"title":"Overview","text":"","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#ctx-hub-overview","level":1,"title":"ctx Hub: Overview","text":"

    Start here before the other hub recipes. This page answers what the hub is, who it's for, why you'd run one, and, equally important, what it is not.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#mental-model-in-one-paragraph","level":2,"title":"Mental Model in One Paragraph","text":"

    The hub is a fan-out channel for structured knowledge entries across projects. When you publish a decision, learning, convention, or task with --share, the hub stores it in an append-only log and delivers it to every other project subscribed to that type. The next time your agent loads context in any of those projects, shared entries can be included in the context packet alongside local ones.

    That's the whole feature. It is a project-to-project knowledge bus for a small, curated set of entry types. It is not a shared memory, a shared journal, or a multi-user database.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-flows-through-the-hub","level":2,"title":"What Flows through the Hub","text":"

    Only four entry types:

    Type What it is decision Architectural decisions with rationale learning Gotchas, lessons, surprising behaviors convention Coding patterns and standards task Work items worth sharing across projects

    Each entry is an immutable record with a content blob, the publishing project's name as Origin, a timestamp, and a hub-assigned sequence number. Once published, entries are never rewritten.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-does-not-flow-through-the-hub","level":2,"title":"What Does Not Flow through the Hub","text":"

    This is the part new users get wrong most often:

    • Session journals (~/.claude/ logs, .context/journal/) stay local. The hub does not sync your AI session history.
    • Scratchpad (.context/pad) stays local. Encrypted notes never leave the machine they were written on.
    • Local context files as a whole (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) are not mirrored wholesale. Only entries you explicitly --share, or publish later with ctx connection publish, cross the boundary.
    • Anything under .context/ that isn't one of the four entry types above. Configuration, state, logs, memory, journal metadata: all local.

    If you were expecting \"now my agent in project B can see everything my agent did in project A,\" that's not this feature. Local session density still lives on the local machine.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#two-user-stories","level":2,"title":"Two User Stories","text":"

    The hub makes sense in two different shapes. Pick the one that matches your situation; the mechanics are identical but the trust model and threat surface are very different.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-1-personal-cross-project-brain","level":3,"title":"Story 1: Personal Cross-Project Brain","text":"

    One developer, many projects, one hub, usually on localhost.

    You're working across several projects on the same machine (or a handful of machines you own). You want a lesson learned debugging project A to show up when you open project B a week later, without re-discovering it. You want a convention you codified in one project to be visible as-you-type in another.

    Concrete payoff:

    • ctx add learning --share \"...\" in project A → ctx agent --include-hub in project B shows that learning in the next context packet.
    • A decision recorded in your personal \"dotfiles\" project is instantly visible to every other project on your workstation.
    • Cross-project conventions (e.g., \"use UTC timestamps everywhere\") live in one place and propagate.

    Trust model: high, because you trust every participant since every participant is you. Run the hub on localhost or on your own LAN, use the default single-node setup, don't worry about TLS.

    Start here: Getting Started for the one-time setup, then Personal cross-project brain for the day-to-day workflow.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-2-small-trusted-team","level":3,"title":"Story 2: Small Trusted Team","text":"

    A few teammates, projects they each own, one hub on a LAN host they all trust.

    Your team has a handful of services and you want a shared \"things we've learned the hard way\" stream. Someone on the platform team records a convention about timestamp handling; everyone else's agents see it the next session. An on-call engineer records a learning from a 3 AM incident; the rest of the team inherits the lesson without needing to read the postmortem.

    Concrete payoff:

    • Team conventions propagate without needing a wiki or chat.
    • Lessons from one team member become available to everyone else's agent context packets automatically.
    • Cross-project decisions (shared libraries, deployment patterns, naming rules) live in a single log the whole team reads.

    Trust model: the hub assumes everyone holding a client token is friendly. There is no per-user attribution you can rely on, Origin is self-asserted by the publishing client, and there is no read ACL beyond the subscription filter. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

    Operational shape: run the hub on a LAN host (or a three-node HA cluster for redundancy), put TLS in front of it for anything beyond a home LAN, distribute client tokens over a trusted channel.

    Start here: Multi-machine setup for the deployment, Team knowledge bus for the day-to-day team workflow, then HA cluster if you need redundancy.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#identity-projects-not-users","level":2,"title":"Identity: Projects, Not Users","text":"

    The hub has no concept of users. Its unit of identity is the project. ctx connection register binds a hub token to a project directory, not to a person. Two developers working on the same project share either:

    • The same .connect.enc, copied between machines over a trusted channel, or
    • Different project names (alpha@laptop-a, alpha@laptop-b), because the hub rejects duplicate registrations of the same project name.

    Either works; neither gives you per-human attribution. If you need \"who wrote this,\" the hub is the wrong tool.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#when-not-to-use-it","level":2,"title":"When Not to Use It","text":"
    • Solo, single-project work. Local .context/ files are enough. The hub adds operational surface for no payoff.
    • Untrusted participants. The hub assumes everyone with a client token is friendly. It is not hardened against hostile insiders or compromised tokens.
    • Compliance-sensitive environments. There is no audit trail that can prove who published what, only which project published what, and Origin is self-asserted.
    • Secrets or PII. Entry content is stored plaintext on the hub and fanned out to every subscribed client. Don't publish anything you wouldn't paste in a team chat.
    • Wholesale journal sharing. See \"what does not flow\" above. If that's what you want, this feature won't provide it. Talk to us in the issue tracker about what would.
    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#how-entries-reach-your-agent","level":2,"title":"How Entries Reach Your Agent","text":"

    Once a project is registered and subscribed, entries arrive by three mechanisms:

    1. ctx connection sync: an on-demand pull, replays everything new since the last sequence you saw.
    2. ctx connection listen: a long-lived gRPC stream that writes new entries to .context/hub/ as they arrive.
    3. check-hub-sync hook: runs at session start, daily throttled, so most users never call sync manually.

    Once entries exist in .context/hub/, ctx agent --include-hub adds a dedicated tier to the budget-aware context packet, scored by recency and type relevance. That's the end of the pipeline.

    ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#where-to-go-next","level":2,"title":"Where to Go Next","text":"If you're… Read Trying it for yourself on one machine Getting Started A solo developer using the hub day-to-day Personal cross-project brain Setting up for a small team on a LAN Multi-machine setup A small team using the hub day-to-day Team knowledge bus Running redundant nodes HA cluster Operating a hub in production Operations Assessing the security posture Security model Debugging a hub in trouble Failure modes Just reading the commands ctx connect, ctx serve, ctx hub","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-personal/","level":1,"title":"Personal Cross-Project Brain","text":"","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#personal-cross-project-brain","level":1,"title":"Personal Cross-Project Brain","text":"

    This recipe shows how one developer uses a ctx Hub across their own projects day-to-day, the \"Story 1\" shape from the Hub overview. You're not setting up infrastructure for a team; you're making a lesson you learned last Tuesday in project A automatically surface when you open project B next Thursday.

    Prerequisites: a working ctx Hub on localhost (see Getting Started for the roughly five-minute setup). This recipe assumes the hub is already running and you've registered at least two projects.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#the-core-loop","level":2,"title":"The Core Loop","text":"

    Every day, the same three verbs matter:

    1. Record: notice a decision, learning, or convention and capture it with ctx add --share.
    2. Subscribe: every project you care about is subscribed to the types you want delivered (set once with ctx connection subscribe).
    3. Load: your agent picks up shared entries on next session start via the auto-sync hook, or explicitly via ctx agent --include-hub.

    That's the whole workflow. The rest of this recipe fills in the concrete moments where each verb matters.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#a-realistic-day","level":2,"title":"A Realistic Day","text":"

    You have three projects on your workstation:

    • ~/projects/api, a Go service you're actively developing
    • ~/projects/cli, a companion CLI that consumes the API
    • ~/projects/dotfiles, your personal conventions and cross-project learnings

    All three are registered with a single hub running on localhost:9900 (started once at boot, or via a systemd user unit; see Hub operations). All three subscribe to decision, learning, and convention.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#0900-start-work-on-api","level":3,"title":"09:00 - Start Work on api","text":"

    You cd ~/projects/api and start a Claude Code session. Behind the scenes, the plugin's PreToolUse hook calls ctx agent --budget 8000 --include-hub before the first tool call. Agent loads:

    • Local .context/ (TASKS, DECISIONS, LEARNINGS, etc.)
    • Foundation steering files (always-inclusion)
    • Everything you've shared from the other two projects

    So the \"use UTC timestamps everywhere\" decision you recorded in dotfiles last week is already in Claude's context for this session, without any manual sync.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1030-you-discover-a-gotcha","level":3,"title":"10:30 - You Discover a Gotcha","text":"

    While debugging, you find that the API's retry loop silently drops the last error when the transport times out. This is the kind of thing you'd normally add to LEARNINGS.md in api/. But it's useful across every Go service you'll ever write, not just this one. So:

    ctx add learning --share \\\n  --context \"Go http.Client retries mask the final error\" \\\n  --lesson  \"Transport timeouts don't surface as errors when the retry loop re-assigns err without wrapping. Check for context.DeadlineExceeded on the request context instead.\" \\\n  --application \"Any retry loop over http.Client.Do that uses a per-attempt timeout\"\n

    The --share flag does two things:

    1. Writes the learning to api/.context/LEARNINGS.md locally (as a normal ctx add learning would).
    2. Publishes the same entry to the ctx Hub, which stores it in the append-only JSONL and fans it out to every subscribed client.

    Within seconds, cli/.context/hub/learnings.md and dotfiles/.context/hub/learnings.md both contain a copy of this learning (the ctx connection listen daemon picks it up from the ctx Hub's Listen stream).

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1200-you-switch-to-cli","level":3,"title":"12:00 - You Switch to cli","text":"

    cd ~/projects/cli, open a new session. The agent packet for cli now includes the learning you just recorded in api, because cli is subscribed to learning and the entry has already been synced into cli/.context/hub/learnings.md.

    You don't have to re-explain the retry-loop gotcha. Claude already sees it.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1400-you-codify-a-convention","level":3,"title":"14:00 - You Codify a Convention","text":"

    You've been writing error messages in api and decided you want a consistent pattern: lowercase start, no trailing period, single-sentence. This is a convention, not a decision; it applies to every Go project you touch. Record it in dotfiles (since that's your \"personal standards\" project), and share it:

    cd ~/projects/dotfiles\nctx add convention --share \\\n  \"Error messages: lowercase start, no trailing period, single sentence (follows Go's stdlib style)\"\n

    The convention lands in dotfiles/CONVENTIONS.md locally and fans out to api and cli via the hub. The next Claude Code session in either project gets the convention injected into the steering-adjacent slot of the agent packet.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1630-end-of-day","level":3,"title":"16:30 - End of Day","text":"

    You didn't run ctx connection sync once. You didn't git push anything between projects. You didn't remember to tell your agent about the retry-loop gotcha in the new project. The hub did all of it for you.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-the-workflow-actually-looks-like","level":2,"title":"What the Workflow Actually Looks Like","text":"

    Stripped of prose, the day's commands were:

    # Morning: nothing. Agent loads --include-hub automatically.\n\n# Mid-morning: record a learning that should cross projects\nctx add learning --share \\\n  --context \"...\" --lesson \"...\" --application \"...\"\n\n# Afternoon: codify a convention in the \"standards\" project\nctx add convention --share \"...\"\n\n# Evening: nothing. Everything's already propagated.\n

    The hub is passive infrastructure. You never talk to it directly; you talk through it by using --share on commands you were already running.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#tips-for-solo-use","level":2,"title":"Tips for Solo Use","text":"

    Pick a \"standards\" project. One of your projects should play the role of \"canonical source for rules you want everywhere.\" Your dotfiles, a personal scratch repo, or a dedicated ctx-standards project all work. Record cross-cutting conventions there and let the hub propagate them to everything else.

    Subscribe to task only if you want cross-project todos. The four subscribable types are decision, learning, convention, task. Tasks are usually project-local; subscribing makes every hub-shared task from every project show up in every other project's agent packet. That's probably not what you want. Skip task in ctx connection subscribe unless you have a specific reason.

    Run the hub as a user-level daemon so you don't have to remember to start it. On Linux with systemd:

    # ~/.config/systemd/user/ctx-hub.service\n[Unit]\nDescription=ctx Hub (personal)\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/ctx hub start\nRestart=on-failure\n\n[Install]\nWantedBy=default.target\n
    systemctl --user enable --now ctx-hub.service\n

    Don't overthink subscription filters. For personal use, subscribe every project to all four types at first (or three, if you skip task). Tune later if the context packets get noisy.

    Local storage is fine; no TLS needed. The hub runs on localhost. No one else is on the network. Skip the TLS setup from the Multi-machine recipe; it's relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

    Not a setup guide. For the one-time hub install and project registration, use Getting Started.

    Not a team guide. If you're sharing across humans, not just across your own projects, read Team knowledge bus instead; the trust model and operational concerns are different.

    Not production operations. For backup, log rotation, failure recovery, and HA, see Hub operations and Hub failure modes.

    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#see-also","level":2,"title":"See Also","text":"
    • Hub overview: when to use the Hub and when not to.
    • Team knowledge bus: the multi-human companion recipe.
    • ctx connect: the client-side commands used above (subscribe, publish, sync, listen, status).
    • ctx add: the --share flag reference.
    • ctx hub: operator commands for starting, stopping, and inspecting the hub.
    ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-team/","level":1,"title":"Team Knowledge Bus","text":"","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#team-knowledge-bus","level":1,"title":"Team Knowledge Bus","text":"

    This recipe shows how a small trusted team uses a ctx Hub as a shared knowledge bus, the \"Story 2\" shape from the Hub overview. You're not building a wiki, you're not replacing your issue tracker, and you're not running a multi-tenant service. You're connecting 3-10 developers who trust each other so that lessons, decisions, and conventions flow between them without ceremony.

    Prerequisites:

    • A running ctx Hub on a LAN host or internal server everyone on the team can reach. See Multi-machine setup for the deployment guide.
    • Each team member has ctx installed and has ctx connection register-ed their working projects with the hub.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#trust-model-read-this-first","level":2,"title":"Trust Model: Read This First","text":"

    The hub assumes everyone holding a client token is friendly. There's no per-user attribution you can rely on, no read ACL beyond subscription filters, and Origin is self-asserted by the publishing client. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

    If your team is:

    • ✅ 3-10 engineers, all known to each other, all trusted with production access
    • ✅ On a single internal network or behind a VPN
    • ✅ Comfortable with \"the hub assumes friendly participants\"

    …this recipe fits. If your team is:

    • ❌ Larger than ~15, with turnover
    • ❌ Includes contractors, untrusted agents, or compromised-workstation concerns
    • ❌ Needs audit trails that prove who published what
    • ❌ Requires per-team-member isolation

    …you're in \"Story 3\" territory, which the hub does not support today. Use a wiki or a dedicated knowledge platform instead.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#the-teams-three-verbs","level":2,"title":"The Team's Three Verbs","text":"

    Everyone on the team does three things, same as in the personal recipe, but with different social expectations:

    1. Record: when you learn something that would save a teammate time, capture it with ctx add --share.
    2. Subscribe: every engineer's project directories subscribe to the types the team cares about.
    3. Load: agents pick up shared entries automatically via the auto-sync hook and the --include-hub flag in the PreToolUse hook pipeline.

    The operational shape is identical to solo use. What's different is the culture around publishing: when do you --share, and what belongs on the hub vs. in your local .context/.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-goes-on-the-hub-team-rules-of-thumb","level":2,"title":"What Goes on the Hub (Team Rules of Thumb)","text":"

    Share it if it's true for more than one person. The central question: \"would the next teammate who hits this problem save time if they already knew this?\" If yes, --share. If no, record it locally and move on.

    Decisions:

    • ✅ Cross-service decisions (database choice, auth model, deployment pattern, monitoring stack).
    • ✅ Policy decisions that apply to all services (naming, API versioning, error-message format).
    • ❌ Internal implementation decisions inside a single service (\"chose a map over a slice here because lookups dominate\").
    • ❌ One-off tactical calls for a specific PR.

    Learnings:

    • ✅ Gotchas, surprising behavior, flaky infrastructure quirks, anything you'd tell a teammate over coffee with \"watch out for X\".
    • ✅ Lessons from incidents, right after the postmortem is the highest-value time to share.
    • ❌ Internal debugging notes that only make sense with context from your current branch.

    Conventions:

    • ✅ Repo layout, commit message format, pre-commit hooks, review expectations.
    • ✅ Language-level style decisions that apply across services.
    • ❌ Per-service idioms (\"in billing/ we prefer…\").

    Tasks: almost always project-local. Don't subscribe to task unless the team has a specific reason (e.g., a cross-cutting migration you want visible everywhere).

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#a-realistic-week","level":2,"title":"A Realistic Week","text":"

    Monday, 3 AM incident, shared learning

    On-call engineer Alice gets paged: the payment service starts returning 500s after a dependency update. After an hour she finds the culprit: a breaking change in a transitive gRPC dep that only manifests under high concurrency. Postmortem on Tuesday, but right now she records the learning:

    ctx add learning --share \\\n  --context \"Payment service 3 AM incident, 2026-04-03\" \\\n  --lesson  \"grpc-go v1.62+ changes DialContext behavior under high \\\n  concurrency: connections from a single channel can deadlock if the \\\n  server emits GOAWAY mid-stream. Symptom: 500 errors cluster in \\\n  30s bursts, no error in grpc client logs.\" \\\n  --application \"Any service on grpc-go. Pin to v1.61 or patch with \\\n  keepalive: https://github.com/grpc/grpc-go/issues/...\" \n

    By Tuesday morning, every other engineer's agent context packet contains this learning. When Bob starts work on the ledger service (which also uses grpc-go), his Claude Code session already knows about the gotcha without Bob having to read the incident channel.

    Wednesday, cross-service decision

    The team agrees on a new pattern for API versioning: header-based instead of URL-based. Platform lead Carol records the decision:

    ctx add decision --share \\\n  --context \"Need consistent API versioning across all 6 services. \\\n  Current URL-based /v1/ isn't working for gradual rollouts.\" \\\n  --rationale \"Header-based versioning lets us route by header at the \\\n  edge, which makes canary rollouts trivial. URL-based versioning \\\n  forces clients to update their paths.\" \\\n  --consequence \"All new endpoints use X-API-Version header. \\\n  Existing /v1/ endpoints stay. Deprecation schedule in q3.\" \\\n  \"Use header-based API versioning for new endpoints\"\n

    Every engineer's next session knows about this decision automatically. When Dave starts adding endpoints to the inventory service on Thursday, Claude already prompts him for the header pattern instead of defaulting to /v1/.

    Friday, convention drift caught at review

    Dave notices that his PR auto-formatted some error messages to end with periods. He recalls the team convention is \"no trailing period\" but can't remember where it was documented. He runs ctx connection status, sees the hub is healthy, greps his local .context/hub/conventions.md, and finds:

    ## [2026-03-12] Error message format\nLowercase start, no trailing period, single sentence.\n

    He fixes the PR. No lookup on the wiki, no question in chat, no context-switch penalty.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#workflow-tips-for-teams","level":2,"title":"Workflow Tips for Teams","text":"

    Designate a \"champion\" for decisions. The team lead or platform engineer should be the person who explicitly --shares cross-cutting decisions. Other team members share learnings freely but should ask \"should this be a decision?\" in review before --shareing a decision. This keeps the decision stream signal-rich.

    Publish postmortem learnings immediately, not after the meeting. The postmortem itself is a document; the actionable rules that come out of it belong on the hub, and they should land within an hour of the incident. \"Share fast, edit later\" is the rule.

    Delete noisy entries, don't tolerate them. The hub is append-only, but the .context/hub/ mirror on each client is just markdown. If a shared learning turns out to be wrong or obsolete, remove it from local mirrors and stop the hub daemon to truncate entries.jsonl (see Hub operations). Noisy shared feeds lose trust fast.

    Don't subscribe every project to every type. For backend engineers, subscribing to decision + learning + convention is usually right. For platform or DevOps projects, adding task makes sense. For a prototype or experiment project, subscribing only to convention might be enough.

    Run a single hub, not one per team. If two teams need to share knowledge, they should share a hub. Splitting hubs by team creates silos, which is often exactly the thing you were trying to solve.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#operational-concerns","level":2,"title":"Operational Concerns","text":"

    The team recipe assumes someone owns the hub host. That person (or a small group) is responsible for:

    • Uptime: the hub is infrastructure; treat it like any other internal service you run. See Hub operations.
    • Backups: entries.jsonl is the source of truth. Snapshot it to the same backup tier as your other internal data.
    • Upgrades: cadence the team agrees on. Major upgrades may require everyone to re-register, so do them at natural breaks.
    • Failures: see Hub failure modes for the standard oncall playbook.

    Optional but recommended: run a 3-node Raft cluster so the hub survives individual node failures. See HA cluster. For teams under 10 people, a single-node hub with daily backups is usually fine.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#token-management","level":2,"title":"Token Management","text":"

    Every team member has a client token stored in their .context/.connect.enc. Rules of thumb:

    • One token per engineer per project. Not one token per team; not one shared token. Each engineer registers each of their working projects separately.
    • Token compromise = revoke immediately. When an engineer leaves, their tokens should be removed from clients.json on the hub. This is a manual operation today; see Hub security for the revocation steps.
    • No checked-in tokens. .context/.connect.enc is encrypted with the local machine key, but don't push it to shared repos; it's per-workstation.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

    Not a wiki replacement. The hub is for structured entries, not prose. Put your architecture overviews, onboarding docs, and design discussions in a real wiki.

    Not an audit log. Origin on the hub is self-asserted. If compliance requires provenance, the hub is the wrong tool.

    Not a ticket system. Task sharing works, but mature teams already have Jira/Linear/Github Issues. Don't try to replace those with hub tasks; use the hub for lightweight cross-project todos that your existing tracker doesn't capture well.

    Not a production service for end users. This is internal team infrastructure. Do not expose the hub to customers, partners, or the open internet.

    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#see-also","level":2,"title":"See Also","text":"
    • Hub overview: when to use the hub and when not to.
    • Personal cross-project brain: the single-developer companion recipe.
    • Multi-machine setup: standing up the hub on a LAN host.
    • HA cluster: optional redundancy for larger teams.
    • Hub operations: backup, rotation, monitoring.
    • Hub security: threat model and hardening checklist.
    ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/import-plans/","level":1,"title":"Importing Claude Code Plans","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code plan files (~/.claude/plans/*.md) are ephemeral: They have structured context, approach, and file lists, but they're orphaned after the session ends. The filenames are UUIDs, so you can't tell what's in them without opening each one.

    How do you turn a useful plan into a permanent project spec?

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tldr","level":2,"title":"TL;DR","text":"
    You: /ctx-plan-import\nAgent: [lists plans with dates and titles]\n       1. 2026-02-28  Add authentication middleware\n       2. 2026-02-27  Refactor database connection pool\nYou: \"import 1\"\nAgent: [copies to specs/add-authentication-middleware.md]\n

    Plans are copied (not moved) to specs/, slugified by their H1 heading.

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-plan-import Skill List, filter, and import plan files to specs /ctx-task-add Skill Optionally add a task referencing the spec","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-1-list-available-plans","level":3,"title":"Step 1: List Available Plans","text":"

    Invoke the skill and it lists plans with modification dates and titles:

    You: /ctx-plan-import\n\nAgent: Found 3 plan files:\n         1. 2026-02-28  Add authentication middleware\n         2. 2026-02-27  Refactor database connection pool\n         3. 2026-02-25  Import plans skill\n       Which plans would you like to import?\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-2-filter-optional","level":3,"title":"Step 2: Filter (Optional)","text":"

    You can narrow the list with arguments:

    Argument Effect --today Only plans modified today --since YYYY-MM-DD Only plans modified on or after the date --all Import everything without prompting (none) Interactive selection
    You: /ctx-plan-import --today\nYou: /ctx-plan-import --since 2026-02-27\nYou: /ctx-plan-import --all\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-3-select-and-import","level":3,"title":"Step 3: Select and Import","text":"

    Pick one or more plans by number:

    You: \"import 1 and 3\"\n\nAgent: Imported 2 plan(s):\n         ~/.claude/plans/abc123.md -> specs/add-authentication-middleware.md\n         ~/.claude/plans/ghi789.md -> specs/import-plans-skill.md\n       Want me to add tasks referencing these specs?\n

    The agent reads the H1 heading from each plan and slugifies it for the filename. If a plan has no H1 heading, the original filename (minus extension) is used as the slug.

    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-4-add-follow-up-tasks-optional","level":3,"title":"Step 4: Add Follow-Up Tasks (Optional)","text":"

    If you say yes, the agent creates tasks in TASKS.md that reference the imported specs:

    You: \"yes, add tasks\"\n\nAgent: [runs /ctx-task-add for each spec]\n       Added:\n         - [ ] Implement authentication middleware (spec: specs/add-authentication-middleware.md)\n         - [ ] Import plans skill (spec: specs/import-plans-skill.md)\n
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember the exact skill name:

    You say What happens \"import my plans\" /ctx-plan-import (interactive) \"save today's plans as specs\" /ctx-plan-import --today \"import all plans from this week\" /ctx-plan-import --since ... \"turn that plan into a spec\" /ctx-plan-import (filtered)","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tips","level":2,"title":"Tips","text":"
    • Plans are copied, not moved: The originals stay in ~/.claude/plans/. Claude Code manages that directory; ctx doesn't delete from it.
    • Conflict handling: If specs/{slug}.md already exists, the agent asks whether to overwrite or pick a different name.
    • Specs are project memory: Once imported, specs are tracked in git and available to future sessions. Reference them from TASKS.md phase headers with Spec: specs/slug.md.
    • Pair with /ctx-implement: After importing a plan as a spec, use /ctx-implement to execute it step-by-step with verification.
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#see-also","level":2,"title":"See Also","text":"
    • Skills Reference: /ctx-plan-import: full skill description
    • The Complete Session: where plan import fits in the session flow
    • Tracking Work Across Sessions: managing tasks that reference imported specs
    ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/knowledge-capture/","level":1,"title":"Persisting Decisions, Learnings, and Conventions","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-problem","level":2,"title":"The Problem","text":"

    You debug a subtle issue, discover the root cause, and move on.

    Three weeks later, a different session hits the same issue. The knowledge existed briefly in one session's memory but was never written down.

    Architectural decisions suffer the same fate: you weigh trade-offs, pick an approach, and six sessions later the AI suggests the alternative you already rejected.

    How do you make sure important context survives across sessions?

    Prefer Skills to Raw Commands

    Use /ctx-decision-add and /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-reflect               # surface items worth persisting\n/ctx-decision-add \"Title\"  # record with context/rationale/consequence\n/ctx-learning-add \"Title\"  # record with context/lesson/application\n

    Or just tell your agent: \"What have we learned this session?\"

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add decision Command Record an architectural decision ctx add learning Command Record a gotcha, tip, or lesson ctx add convention Command Record a coding pattern or standard ctx reindex Command Rebuild both quick-reference indices ctx decision reindex Command Rebuild the DECISIONS.md index ctx learning reindex Command Rebuild the LEARNINGS.md index /ctx-decision-add Skill AI-guided decision capture with validation /ctx-learning-add Skill AI-guided learning capture with validation /ctx-convention-add Skill AI-guided convention recording with placement /ctx-reflect Skill Surface items worth persisting at breakpoints","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-workflow","level":2,"title":"The Workflow","text":"

    Decision, Learning, or Convention?

    • If you chose between alternatives, it is a decision.
    • If you discovered something surprising, it is a learning.
    • If you are codifying a repeated pattern, it is a convention.
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-1-understand-what-to-persist","level":3,"title":"Step 1: Understand What to Persist","text":"

    Three context files serve different purposes:

    Decisions (DECISIONS.md) answer \"why is it this way?\" They record trade-offs between alternatives with structured fields: context, rationale, and consequences. Decisions prevent re-debating settled questions.

    Learnings (LEARNINGS.md) answer \"what did we discover the hard way?\" They record gotchas and debugging insights specific to this project with structured fields: context, lesson, and application.

    Learnings prevent repeating past mistakes.

    Conventions (CONVENTIONS.md) answer \"how do we do things here?\" They record patterns and standards. No structured fields required: just a name, a rule, and an example. Conventions keep code consistent across sessions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-2-record-decisions","level":3,"title":"Step 2: Record Decisions","text":"

    Decisions have three structured fields:

    • context,
    • rationale,
    • and consequences.

    The easiest way to record one is to tell your agent:

    You: \"Save that as a decision: we picked file-based cooldown\n      tokens over env vars.\"\n\nAgent: \"Recorded in DECISIONS.md:\n  - Decision: Use file-based cooldown tokens instead of env vars\n  - Context: Hook subprocesses cannot persist env vars to parent shell\n  - Rationale: File tokens survive across processes. Simpler than\n    IPC. Cleanup is automatic via TTL.\n  - Consequence: Tombstone files accumulate in /tmp. Cannot share\n    state across machines.\"\n

    The agent was present for the trade-off discussion. It fills in the structured fields from what it observed, which is hard to reconstruct later as a terminal one-liner.

    When multiple options were considered, the agent includes rejected alternatives in the rationale automatically.

    The /ctx-decision-add skill also guides you through the fields interactively. For quick decisions, it supports a Y-statement:

    \"In the context of [situation], facing [constraint], we decided for [choice] and against [alternatives], to achieve [benefit], accepting that [trade-off].\"

    CLI Command for Scripting and Automation

    When no agent is in the loop (CI pipelines, shell scripts, hooks), use the CLI directly with structured flags:

    ctx add decision \"Use file-based cooldown tokens instead of env vars\" \\\n  --context \"Hook subprocesses cannot persist env vars to parent shell\" \\\n  --rationale \"File tokens survive across processes. Simpler than IPC. Cleanup is automatic via TTL.\" \\\n  --consequence \"Tombstone files accumulate in /tmp. Cannot share state across machines.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-3-record-learnings","level":3,"title":"Step 3: Record Learnings","text":"

    Learnings have three structured fields: context, lesson, and application.

    Again, your agent is the best tool for this. It was there when you discovered the gotcha and can express it more completely than a backslash-escaped terminal command:

    You: \"That subprocess thing was painful. Save it as a learning.\"\n\nAgent: \"Added to LEARNINGS.md:\n  - Learning: Claude Code hooks run in a subprocess\n  - Context: Set env var in PreToolUse hook, but it was not visible\n    in the main session\n  - Lesson: Hook scripts execute in a child process. Env changes do\n    not propagate to parent.\n  - Application: Use tombstone files for hook-to-session\n    communication. Never rely on hook env vars.\"\n

    You said one sentence. The agent produced a structured entry with all three fields filled from the session context.

    That is the point: the agent has the full conversation and can be far more expressive than what you would type at a command prompt.

    The /ctx-learning-add skill applies three quality filters:

    1. Could someone Google this in 5 minutes?
    2. Is it specific to this codebase?
    3. Did it take real effort to discover?

    All three must pass.

    Learnings capture principles and heuristics, not code snippets.

    CLI Command for Scripting and Automation

    When no agent is in the loop:

    ctx add learning \"Claude Code hooks run in a subprocess\" \\\n  --context \"Set env var in PreToolUse hook, but it was not visible in the main session\" \\\n  --lesson \"Hook scripts execute in a child process. Env changes do not propagate to parent.\" \\\n  --application \"Use tombstone files for hook-to-session communication. Never rely on hook env vars.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-4-record-conventions","level":3,"title":"Step 4: Record Conventions","text":"

    Conventions are simpler: just a name, a rule, and optionally a section.

    These are short enough that either approach works:

    You: \"We've been using kebab-case for every CLI flag. Codify that.\"\n\nAgent: \"Added to CONVENTIONS.md under Naming:\n  'Use kebab-case for all CLI flag names.'\"\n

    Or from the terminal:

    ctx add convention \"Use kebab-case for all CLI flag names\" --section \"Naming\"\n

    Conventions work best for rules that come up repeatedly. Codify a pattern the third time you see it, not the first.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-5-reindex-after-manual-edits","level":3,"title":"Step 5: Reindex After Manual Edits","text":"

    DECISIONS.md and LEARNINGS.md maintain a quick-reference index at the top: a compact table of date and title for each entry. The index updates automatically via ctx add, but falls out of sync after hand edits.

    ctx reindex\n

    This single command regenerates both indices. You can also reindex individually with ctx decision reindex or ctx learning reindex.

    Run reindex after any manual edit. The index lets AI tools scan all entries without reading the full file, which matters when token budgets are tight.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-6-use-ctx-reflect-to-surface-what-to-capture","level":3,"title":"Step 6: Use /ctx-reflect to Surface What to Capture","text":"

    Keep It Conversational

    /ctx-reflect is not the only way to trigger reflection.

    Agents trained on the ctx playbook naturally surface persist-worthy items at breakpoints, even without invoking the skill explicitly.

    A conversational prompt like \"anything worth saving?\" or \"let's wrap up\" can trigger the same review.

    The skill provides a structured checklist, but the behavior is available through natural conversation.

    At natural breakpoints (after completing a feature, fixing a bug, or before ending a session) use /ctx-reflect to identify items worth persisting.

    /ctx-reflect\n

    The skill walks through learnings, decisions, tasks, and session notes, skipping categories with nothing to report. The output includes specific commands for each suggested persist:

    This session implemented file-based cooldown for `ctx agent` and\ndiscovered that hook subprocesses cannot set env vars in the parent.\n\nI'd suggest persisting:\n- **Learning**: Hook subprocesses cannot propagate env vars\n  `ctx add learning \"...\" --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Decision**: File-based cooldown tokens over env vars\n  `ctx add decision \"...\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

    The skill always asks before persisting.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-7-the-conversational-approach","level":3,"title":"Step 7: The Conversational Approach","text":"

    The commands shown in Steps 2 to 4 are precise, but you rarely need to type them yourself. When working with an agent that has loaded the ctx playbook, natural language triggers the same operations.

    You ask, the agent acts:

    You say What the agent does \"What have we learned this session?\" Reviews recent work, identifies learnings, offers to persist \"Save that as a decision\" Captures the trade-off you just discussed with structured fields \"Any gotchas worth remembering?\" Scans recent work for non-obvious lessons \"Record that convention\" Adds the pattern you just agreed on to CONVENTIONS.md \"Let's wrap up\" Runs through the reflect checklist conversationally

    Example exchange:

    You: \"What did we learn from that debugging session?\"\n\nAgent: \"We discovered three things worth persisting:\n  1. Hook subprocesses can't propagate env vars to the parent. We\n     spent 20 minutes on this before switching to tombstone files.\n  2. Tombstone files need a TTL or they accumulate in /tmp.\n  3. The cooldown logic must check file mtime, not just existence.\n  Want me to save these as learnings?\"\n\nYou: \"Yes, all of them.\"\n\nAgent: \"Done. Added all three to LEARNINGS.md with structured fields.\"\n

    The agent can also be proactive without being asked. The playbook encourages agents to persist context at natural milestones.

    You may see:

    • After fixing a tricky bug: \"I noticed this was a non-obvious issue. Want me to save it as a learning?\"
    • After choosing between approaches: \"We just made a trade-off between file-based tokens and IPC. Should I record this as a decision?\"
    • At session milestones: the agent checks what is worth persisting and offers a brief summary.

    When agents run unattended (no human in the loop), they may persist directly:

    • \"I've added the subprocess env var gotcha to LEARNINGS.md so we don't hit it again.\"
    • \"Recorded the decision to use file-based tokens over env vars in DECISIONS.md.\"
    • \"Marked the cooldown task done and added a follow-up for TTL cleanup.\"

    This is behavior by design.

    The playbook's self-check prompt, \"If this session ended right now, would the next session know what happened?\" drives agents to persist early and often rather than waiting for explicit instructions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#command-line-approach-scripting-and-automation","level":3,"title":"Command-Line Approach (Scripting and Automation)","text":"
    # Decision: record the trade-off\nctx add decision \"Use PostgreSQL over SQLite\" \\\n  --context \"Need concurrent multi-user access\" \\\n  --rationale \"SQLite locks on writes; Postgres handles concurrency\" \\\n  --consequence \"Requires a database server; team needs Postgres training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Learning: record the gotcha\nctx add learning \"SQL migrations must be idempotent\" \\\n  --context \"Deploy failed when migration ran twice after rollback\" \\\n  --lesson \"CREATE TABLE without IF NOT EXISTS fails on retry\" \\\n  --application \"Always use IF NOT EXISTS guards in migrations\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Convention: record the pattern\nctx add convention \"API handlers return structured errors\" --section \"API\"\n\n# Reindex after manual edits\nctx reindex\n\n# Reflect at breakpoints (from AI assistant)\n# /ctx-reflect\n
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#conversational-approach-interactive-sessions","level":3,"title":"Conversational Approach (Interactive Sessions)","text":"
    You: \"We just picked Postgres over SQLite for the concurrency\n      reasons. Save that?\"\n\nAgent: \"Recorded. Added to DECISIONS.md:\n  - Decision: Use PostgreSQL over SQLite\n  - Context: Need concurrent multi-user access\n  - Rationale: SQLite locks on writes; Postgres handles concurrency\n  - Consequence: Requires a database server; team needs Postgres\n    training\"\n\nYou: \"Also that migration thing. The deploy blew up when the\n      migration ran twice.\"\n\nAgent: \"Got it. Added to LEARNINGS.md:\n  - Learning: SQL migrations must be idempotent\n  - Lesson: CREATE TABLE without IF NOT EXISTS fails on retry\n  - Application: Always use IF NOT EXISTS guards in migrations\"\n\nYou: \"Good. Anything else worth saving before we wrap up?\"\n\nAgent: \"We've been using structured error returns in every API\n  handler this session. Want me to codify that as a convention?\"\n\nYou: \"Do it.\"\n\nAgent: \"Done. Added 'API handlers return structured errors' to\n  CONVENTIONS.md under the API section.\"\n

    Both approaches produce the same structured entries in the same context files.

    • The conversational approach is the natural fit for interactive sessions;
    • the CLI commands are better suited for scripts, hooks, and automation pipelines.
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tips","level":2,"title":"Tips","text":"
    • Record decisions at the moment of choice. The alternatives you considered and the reasons you rejected them fade quickly. Capture trade-offs while they are fresh.
    • Learnings should fail the Gemini test. If someone could find it in a 5-minute Gemini search, it does not belong in LEARNINGS.md.
    • Conventions earn their place through repetition. Add a convention the third time you see a pattern, not the first.
    • Use /ctx-reflect at natural breakpoints. The checklist catches items you might otherwise lose.
    • Keep the entries self-contained. Each entry should make sense on its own. A future session may load only one due to token budget constraints.
    • Reindex after every hand edit. It takes less than a second. A stale index causes AI tools to miss entries.
    • Prefer the structured fields. The verbosity forces clarity. A decision without a rationale is just a fact. A learning without an application is just a story.
    • Talk to your agent, do not type commands. In interactive sessions, the conversational approach is the recommended way to capture knowledge. Say \"save that as a learning\" or \"any decisions worth recording?\" and let the agent handle the structured fields. Reserve the CLI commands for scripting, automation, and CI/CD pipelines where there is no agent in the loop.
    • Trust the agent's proactive instincts. Agents trained on the ctx playbook will offer to persist context at milestones. A brief \"want me to save this?\" is cheaper than re-discovering the same lesson three sessions later.
    • Relax provenance per-project if --session-id, --branch, or --commit are impractical (e.g., manual notes outside an AI session). Add to .ctxrc:

      provenance_required:\n  session_id: false   # allow entries without --session-id\n  branch: true        # still require --branch\n  commit: true        # still require --commit\n

      Default is all three required. Only human config relaxes: Agents cannot bypass, and that's by design.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#next-up","level":2,"title":"Next Up","text":"

    Tracking Work Across Sessions →: Add, prioritize, complete, and archive tasks across sessions.

    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#see-also","level":2,"title":"See Also","text":"
    • Tracking Work Across Sessions: managing the tasks that decisions and learnings support
    • The Complete Session: full session lifecycle including reflection and context persistence
    • Detecting and Fixing Drift: keeping knowledge files accurate as the codebase evolves
    • CLI Reference: full documentation for ctx add, ctx decision, ctx learning
    • Context Files: format and conventions for DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md
    ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/memory-bridge/","level":1,"title":"Bridging Claude Code Auto Memory","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This file is:

    • Outside the repo - not version-controlled, not portable
    • Machine-specific - tied to one ~/.claude/ directory
    • Invisible to ctx - context loading and hooks don't read it

    Meanwhile, ctx maintains structured context files (DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) that are git-tracked, portable, and token-budgeted - but Claude Code doesn't automatically write to them.

    The two systems hold complementary knowledge with no bridge between them.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#tldr","level":2,"title":"TL;DR","text":"
    ctx memory sync          # Mirror MEMORY.md into .context/memory/mirror.md\nctx memory status        # Check for drift\nctx memory diff          # See what changed since last sync\n

    The check-memory-drift hook nudges automatically when MEMORY.md changes - you don't need to remember to sync manually.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx memory sync CLI command Copy MEMORY.md to mirror, archive previous ctx memory status CLI command Show drift, timestamps, line counts ctx memory diff CLI command Show changes since last sync ctx memory import CLI command Classify and promote entries to .context/ files ctx memory publish CLI command Push curated .context/ content to MEMORY.md ctx memory unpublish CLI command Remove published block from MEMORY.md ctx system check-memory-drift Hook Nudge when MEMORY.md has changed (once/session)","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#how-it-works","level":2,"title":"How It Works","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#discovery","level":3,"title":"Discovery","text":"

    Claude Code encodes project paths as directory names under ~/.claude/projects/. The encoding replaces / with - and prefixes with -:

    /home/jose/WORKSPACE/ctx  →  ~/.claude/projects/-home-jose-WORKSPACE-ctx/\n

    ctx memory uses this encoding to locate MEMORY.md automatically from your project root - no configuration needed.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#mirroring","level":3,"title":"Mirroring","text":"

    When you run ctx memory sync:

    1. The previous mirror is archived to .context/memory/archive/mirror-<timestamp>.md
    2. MEMORY.md is copied to .context/memory/mirror.md
    3. Sync state is updated in .context/state/memory-import.json

    The mirror is git-tracked, so it travels with the project. Archives provide a fallback for projects that don't use git.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#drift-detection","level":3,"title":"Drift Detection","text":"

    The check-memory-drift hook compares MEMORY.md's modification time against the mirror. When drift is detected, the agent sees:

    ┌─ Memory Drift ────────────────────────────────────────────────\n│ MEMORY.md has changed since last sync.\n│ Run: ctx memory sync\n│ Context: .context\n└────────────────────────────────────────────────────────────────\n

    The nudge fires once per session to avoid noise.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#typical-workflow","level":2,"title":"Typical Workflow","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#at-session-start","level":3,"title":"At Session Start","text":"

    If the hook fires a drift nudge, sync before diving into work:

    ctx memory diff     # Review what changed\nctx memory sync     # Mirror the changes\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#periodic-check","level":3,"title":"Periodic Check","text":"
    ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#dry-run","level":3,"title":"Dry Run","text":"

    Preview what sync would do without writing:

    ctx memory sync --dry-run\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#storage-layout","level":2,"title":"Storage Layout","text":"
    .context/\n├── memory/\n│   ├── mirror.md                          # Raw copy of MEMORY.md (often git-tracked)\n│   └── archive/\n│       ├── mirror-2026-03-05-143022.md    # Timestamped pre-sync snapshots\n│       └── mirror-2026-03-04-220015.md\n├── state/\n│   └── memory-import.json                 # Sync tracking state\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#edge-cases","level":2,"title":"Edge Cases","text":"Scenario Behavior Auto memory not active sync exits 1 with message. status reports \"not active\". Hook skips silently. First sync (no mirror) Creates mirror without archiving. MEMORY.md is empty Syncs to empty mirror (valid). Not initialized Init guard rejects (same as all ctx commands).","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#importing-entries","level":2,"title":"Importing Entries","text":"

    Once you've synced, you can classify and promote entries into structured .context/ files:

    ctx memory import --dry-run    # Preview classification\nctx memory import              # Actually promote entries\n

    Each entry is classified by keyword heuristics:

    Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

    Entries that don't match any pattern are skipped - they stay in the mirror for manual review. Deduplication (hash-based) prevents re-importing the same entry on subsequent runs.

    Review Before Importing

    Use --dry-run first. The heuristic classifier is deliberately simple - it may misclassify ambiguous entries. Review the plan, then import.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-workflow","level":3,"title":"Full Workflow","text":"
    ctx memory sync                # 1. Mirror MEMORY.md\nctx memory import --dry-run    # 2. Preview what would be imported\nctx memory import              # 3. Promote entries to .context/ files\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#publishing-context-to-memorymd","level":2,"title":"Publishing Context to MEMORY.md","text":"

    Push curated .context/ content back into MEMORY.md so Claude Code sees structured project context on session start - without needing hooks.

    ctx memory publish --dry-run    # Preview what would be published\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter line budget\n

    Published content is wrapped in markers:

    <!-- ctx:published -->\n# Project Context (managed by ctx)\n\n## Pending Tasks\n- [ ] Implement feature X\n...\n<!-- ctx:end -->\n

    Rules:

    • ctx owns everything between the markers
    • Claude owns everything outside the markers
    • ctx memory import reads only outside the markers
    • ctx memory publish replaces only inside the markers

    To remove the published block entirely:

    ctx memory unpublish\n

    Publish at Wrap-Up, Not on Commit

    The best time to publish is during session wrap-up, after persisting decisions and learnings. Never auto-publish - give yourself a chance to review what's going into MEMORY.md.

    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-bidirectional-workflow","level":3,"title":"Full Bidirectional Workflow","text":"
    ctx memory sync                 # 1. Mirror MEMORY.md\nctx memory import --dry-run     # 2. Check what Claude wrote\nctx memory import               # 3. Promote entries to .context/\nctx memory publish --dry-run    # 4. Check what would be published\nctx memory publish              # 5. Push context to MEMORY.md\n
    ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/multi-tool-setup/","level":1,"title":"Setup Across AI Tools","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-problem","level":2,"title":"The Problem","text":"

    You have installed ctx and want to set it up with your AI coding assistant so that context persists across sessions. Different tools have different integration depths. For example:

    • Claude Code supports native hooks that load and save context automatically.
    • Cursor injects context via its system prompt.
    • Aider reads context files through its --read flag.

    This recipe walks through the complete setup for each tool, from initialization through verification, so you end up with a working memory layer regardless of which AI tool you use.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tldr","level":2,"title":"TL;DR","text":"
    cd your-project\nctx init                      # creates .context/\nsource <(ctx completion zsh)  # shell completion (or bash/fish)\n\n# ## Claude Code (automatic after plugin install) ##\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n\n# ## Cursor / Aider / Copilot / Windsurf ##\nctx setup cursor # or: aider, copilot, windsurf\n\n# ## Companion tools (highly recommended) ##\nnpx gitnexus analyze          # code knowledge graph\n# Add Gemini Search MCP server for grounded web search\n

    Create a .ctxrc in your project root to configure token budgets, context directory, drift thresholds, and more.

    Then start your AI tool and ask: \"Do you remember?\"

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Create .context/ directory, templates, and permissions ctx setup Generate integration configuration for a specific AI tool ctx agent Print a token-budgeted context packet for AI consumption ctx load Output assembled context in read order (for manual pasting) ctx watch Auto-apply context updates from AI output (non-native tools) ctx completion Generate shell autocompletion for bash, zsh, or fish ctx journal import Import sessions to editable journal Markdown","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-1-initialize-ctx","level":3,"title":"Step 1: Initialize ctx","text":"

    Run ctx init in your project root. This creates the .context/ directory with all template files and seeds ctx permissions in settings.local.json.

    cd your-project\nctx init\n

    This produces the following structure:

    .context/\n  CONSTITUTION.md     # Hard rules the AI must never violate\n  TASKS.md            # Current and planned work\n  CONVENTIONS.md      # Code patterns and standards\n  ARCHITECTURE.md     # System overview\n  DECISIONS.md        # Architectural decisions with rationale\n  LEARNINGS.md        # Lessons learned, gotchas, tips\n  GLOSSARY.md         # Domain terms and abbreviations\n  AGENT_PLAYBOOK.md   # How AI tools should use this system\n

    Using a Different .context Directory

    The .context/ directory doesn't have to live inside your project. You can point ctx to an external folder via .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

    This is useful for monorepos or shared context across repositories.

    See Configuration for details and External Context for a full recipe.

    For Claude Code, install the ctx plugin to get hooks and skills:

    claude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

    If you only need the core files (useful for lightweight setups), use the --minimal flag:

    ctx init --minimal\n

    This creates only TASKS.md, DECISIONS.md, and CONSTITUTION.md.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-2-generate-tool-specific-hooks","level":3,"title":"Step 2: Generate Tool-Specific Hooks","text":"

    If you are using a tool other than Claude Code (which is configured automatically by ctx init), generate its integration configuration:

    # For Cursor\nctx setup cursor\n\n# For Aider\nctx setup aider\n\n# For GitHub Copilot\nctx setup copilot\n\n# For Windsurf\nctx setup windsurf\n

    Each command prints the configuration you need. How you apply it depends on the tool.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#claude-code","level":4,"title":"Claude Code","text":"

    No action needed. Just install ctx from the Marketplace as ActiveMemory/ctx.

    Claude Code Is a First-Class Citizen

    With the ctx plugin installed, Claude Code gets hooks and skills automatically. The PreToolUse hook runs ctx agent --budget 4000 on every tool call (with a 10-minute cooldown so it only fires once per window).

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#cursor","level":4,"title":"Cursor","text":"

    Add the system prompt snippet to .cursor/settings.json:

    {\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and .context/CONVENTIONS.md before responding. Follow rules in .context/CONSTITUTION.md.\"\n}\n

    Context files appear in Cursor's file tree. You can also paste a context packet directly into chat:

    ctx agent --budget 4000 | xclip    # Linux\nctx agent --budget 4000 | pbcopy   # macOS\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#aider","level":4,"title":"Aider","text":"

    Create .aider.conf.yml so context files are loaded on every session:

    read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n

    Then start Aider normally:

    aider\n

    Or specify files on the command line:

    aider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-3-set-up-shell-completion","level":3,"title":"Step 3: Set Up Shell Completion","text":"

    Shell completion lets you tab-complete ctx subcommands and flags, which is especially useful while learning the CLI.

    # Bash (add to ~/.bashrc)\nsource <(ctx completion bash)\n\n# Zsh (add to ~/.zshrc)\nsource <(ctx completion zsh)\n\n# Fish\nctx completion fish > ~/.config/fish/completions/ctx.fish\n

    After sourcing, typing ctx a<TAB> completes to ctx agent, and ctx journal <TAB> shows list, show, and export.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-4-verify-the-setup-works","level":3,"title":"Step 4: Verify the Setup Works","text":"

    Start a fresh session in your AI tool and ask:

    \"Do you remember?\"

    A correctly configured tool responds with specific context: current tasks from TASKS.md, recent decisions, and previous session topics. It should not say \"I don't have memory\" or \"Let me search for files.\"

    This question checks the passive side of memory. A properly set-up agent is also proactive: it treats context maintenance as part of its job:

    • After a debugging session, it offers to save a learning.
    • After a trade-off discussion, it asks whether to record the decision.
    • After completing a task, it suggests follow-up items.

    The \"do you remember?\" check verifies both halves: recall and responsibility.

    For example, after resolving a tricky bug, a proactive agent might say:

    That Redis timeout issue was subtle. Want me to save this as a *learning*\nso we don't hit it again?\n

    If you see behavior like this, the setup is working end to end.

    In Claude Code, you can also invoke the /ctx-status skill:

    /ctx-status\n

    This prints a summary of all context files, token counts, and recent activity, confirming that hooks are loading context.

    If context is not loading, check the basics:

    Symptom Fix ctx: command not found Ensure ctx is in your PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list Context not refreshing Cooldown may be active; wait 10 minutes or set --cooldown 0","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-5-enable-watch-mode-for-non-native-tools","level":3,"title":"Step 5: Enable Watch Mode for Non-Native Tools","text":"

    Tools like Aider, Copilot, and Windsurf do not support native hooks for saving context automatically. For these, run ctx watch alongside your AI tool.

    Pipe the AI tool's output through ctx watch:

    # Terminal 1: Run Aider with output logged\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch the log for context updates\nctx watch --log /tmp/aider.log\n

    Or for any generic tool:

    your-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

    When the AI emits structured update commands, ctx watch parses and applies them automatically:

    <context-update type=\"learning\"\n  context=\"Debugging rate limiter\"\n  lesson=\"Redis MULTI/EXEC does not roll back on error\"\n  application=\"Wrap rate-limit checks in Lua scripts instead\"\n>Redis Transaction Behavior</context-update>\n

    To preview changes without modifying files:

    ctx watch --dry-run --log /tmp/ai.log\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-6-import-session-transcripts-optional","level":3,"title":"Step 6: Import Session Transcripts (Optional)","text":"

    If you want to browse past session transcripts, import them to the journal:

    ctx journal import --all\n

    This converts raw session data into editable Markdown files in .context/journal/. You can then enrich them with metadata using /ctx-journal-enrich-all inside your AI assistant.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Here is the condensed setup for all three tools:

    # ## Common (run once per project) ##\ncd your-project\nctx init\nsource <(ctx completion zsh)       # or bash/fish\n\n# ## Claude Code (automatic, just verify) ##\n# Start Claude Code, then ask: \"Do you remember?\"\n\n# ## Cursor ##\nctx setup cursor\n# Add the system prompt to .cursor/settings.json\n# Paste context: ctx agent --budget 4000 | pbcopy\n\n# ## Aider ##\nctx setup aider\n# Create .aider.conf.yml with read: paths\n# Run watch mode alongside: ctx watch --log /tmp/aider.log\n\n# ## Verify any Tool ##\n# Ask your AI: \"Do you remember?\"\n# Expect: specific tasks, decisions, recent context\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tips","level":2,"title":"Tips","text":"
    • Start with ctx init (not --minimal) for your first project. The full template set gives the agent more to work with, and you can always delete files later.
    • For Claude Code, the token budget is configured in the plugin's hooks.json. To customize, adjust the --budget flag in the ctx agent hook command.
    • The --session $PPID flag isolates cooldowns per Claude Code process, so parallel sessions do not suppress each other.
    • Commit your .context/ directory to version control. Several ctx features (journals, changelogs, blog generation) rely on git history.
    • For Cursor and Copilot, keep CONVENTIONS.md visible. These tools treat open files as higher-priority context.
    • Run ctx drift periodically to catch stale references before they confuse the agent.
    • The agent playbook instructs the agent to persist context at natural milestones (completed tasks, decisions, gotchas). In practice, this works best when you reinforce the habit: a quick \"anything worth saving?\" after a debugging session goes a long way.
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#companion-tools-highly-recommended","level":2,"title":"Companion Tools (Highly Recommended)","text":"

    ctx skills can leverage external MCP servers for web search and code intelligence. ctx works without them, but they significantly improve agent behavior across sessions. The investment is small and the benefits compound. Skills like /ctx-code-review, /ctx-explain, and /ctx-refactor all become noticeably better with these tools connected.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gemini-search","level":3,"title":"Gemini Search","text":"

    Provides grounded web search with citations. Used by skills and the agent playbook as the preferred search backend (faster and more accurate than built-in web search).

    Setup: Add the Gemini Search MCP server to your Claude Code settings. See the Gemini Search MCP documentation for installation.

    Verification:

    # The agent checks this automatically during /ctx-remember\n# Manual test: ask the agent to search for something\n

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gitnexus","level":3,"title":"GitNexus","text":"

    Provides a code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Used by skills like /ctx-refactor (impact analysis) and /ctx-code-review (dependency awareness).

    Setup: Add the GitNexus MCP server to your Claude Code settings, then index your project:

    npx gitnexus analyze\n

    Verification:

    # The agent checks this automatically during /ctx-remember\n# If the index is stale, it will suggest rehydrating\n

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#suppressing-the-check","level":3,"title":"Suppressing the Check","text":"

    If you don't use companion tools and want to skip the availability check at session start, add to .ctxrc:

    companion_check: false\n
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#future-direction","level":3,"title":"Future Direction","text":"

    The companion tool integration is evolving toward a pluggable model: bring your own search engine, bring your own code intelligence. The current integration is MCP-based and limited to Gemini Search and GitNexus. If you use a different search or code intelligence tool, skills will degrade gracefully to built-in capabilities.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#next-up","level":2,"title":"Next Up","text":"

    Keeping Context in a Separate Repo →: Store context files outside the project tree for multi-repo or open source setups.

    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle recipe
    • Multilingual Session Parsing: configure session header prefixes for other languages
    • CLI Reference: all commands and flags
    • Integrations: detailed per-tool integration docs
    ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multilingual-sessions/","level":1,"title":"Multilingual Session Parsing","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#the-problem","level":2,"title":"The Problem","text":"

    Your team works across languages. Session files written by AI tools might use headers like # Oturum: 2026-01-15 - API Düzeltme (Turkish) or # セッション: 2026-01-15 - テスト (Japanese) instead of # Session: 2026-01-15 - Fix API.

    By default, ctx only recognizes Session: as a session header prefix. Files with other prefixes are silently skipped during journal import and journal generation: They look like regular Markdown, not sessions.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#tldr","level":2,"title":"TL;DR","text":"

    Add recognized prefixes to .ctxrc:

    session_prefixes:\n  - \"Session:\"      # English (include to keep default)\n  - \"Oturum:\"       # Turkish\n  - \"セッション:\"     # Japanese\n

    Restart your session. All configured prefixes are now recognized.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#how-it-works","level":2,"title":"How It Works","text":"

    The Markdown session parser detects session files by looking for an H1 header that starts with a known prefix followed by a date:

    # Session: 2026-01-15 - Fix API Rate Limiting\n# Oturum: 2026-01-15 - API Düzeltme\n# セッション: 2026-01-15 - テスト\n

    The list of recognized prefixes comes from session_prefixes in .ctxrc. When the key is absent or empty, ctx falls back to the built-in default: [\"Session:\"].

    Date-only headers (# 2026-01-15 - Morning Work) are always recognized regardless of prefix configuration.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#configuration","level":2,"title":"Configuration","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#adding-a-language","level":3,"title":"Adding a Language","text":"

    Add the prefix with a trailing colon to your .ctxrc:

    session_prefixes:\n  - \"Session:\"\n  - \"Sesión:\"       # Spanish\n

    Include Session: Explicitly

    When you override session_prefixes, the default is replaced, not extended. If you still want English headers recognized, include \"Session:\" in your list.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#team-setup","level":3,"title":"Team Setup","text":"

    Commit .ctxrc to the repo so all team members share the same prefix list. This ensures ctx journal import and journal generation pick up sessions from all team members regardless of language.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#common-prefixes","level":3,"title":"Common Prefixes","text":"Language Prefix English Session: Turkish Oturum: Spanish Sesión: French Session: German Sitzung: Japanese セッション: Korean 세션: Portuguese Sessão: Chinese 会话:","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#verifying","level":3,"title":"Verifying","text":"

    After configuring, test with ctx journal source. Sessions with the new prefixes should appear in the output.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#what-this-does-not-do","level":2,"title":"What This Does NOT Do","text":"
    • Change the interface language: ctx output is always English. This setting only controls which session files ctx can parse.
    • Generate headers: ctx never writes session headers. The prefix list is recognition-only (input, not output).
    • Affect JSONL sessions: Claude Code JSONL transcripts don't use header prefixes. This only applies to Markdown session files in .context/sessions/.
    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#see-also","level":2,"title":"See Also","text":"

    See also: Setup Across AI Tools - complete multi-tool setup including Markdown session configuration.

    See also: CLI Reference - full .ctxrc field reference including session_prefixes.

    ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/parallel-worktrees/","level":1,"title":"Parallel Agent Development with Git Worktrees","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-problem","level":2,"title":"The Problem","text":"

    You have a large backlog (10, 20, 30 open tasks) and many of them are independent: docs work that doesn't touch Go code, a new package that doesn't overlap with existing ones, test coverage for a stable module.

    Running one agent at a time means serial execution. You want 3-4 agents working in parallel, each on its own track, without stepping on each other's files.

    Git worktrees solve this.

    Each worktree is a separate working directory with its own branch, but they share the same .git object database. Combined with ctx's persistent context, each agent session picks up the full project state and works independently.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-worktree                                   # 1. group tasks by file overlap\ngit worktree add ../myproject-docs -b work/docs # 2. create worktrees\ncd ../myproject-docs && claude                  # 3. launch agents (one per track)\n/ctx-worktree teardown docs                     # 4. merge back and clean up\n

    TASKS.md will conflict on merge: Accept all [x] completions from both sides.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-worktree Skill Create, list, and tear down worktrees /ctx-next Skill Pick tasks from the backlog for each track git worktree Command Underlying git worktree management git merge Command Merge completed tracks back to main","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-1-assess-the-backlog","level":3,"title":"Step 1: Assess the Backlog","text":"

    Start in your main checkout. Ask the agent to analyze your tasks and group them by blast radius: which files and directories each task touches.

    /ctx-worktree\nLook at TASKS.md and group the pending tasks into 2-3 independent\ntracks based on which files they'd touch. Show me the grouping\nbefore creating anything.\n

    The agent reads TASKS.md, estimates file overlap, and proposes groups:

    Proposed worktree groups:\n\n  work/docs   # recipe updates, blog post (touches: docs/)\n  work/crypto # scratchpad encryption infra (touches: internal/crypto/)\n  work/tests  # journal test coverage (touches: internal/cli/journal/)\n
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-2-create-the-worktrees","level":3,"title":"Step 2: Create the Worktrees","text":"

    Once you approve the grouping, the agent creates worktrees as sibling directories:

    Create the worktrees for those three groups.\n

    Behind the scenes:

    git worktree add ../myproject-docs -b work/docs\ngit worktree add ../myproject-crypto -b work/crypto\ngit worktree add ../myproject-tests -b work/tests\n

    Each worktree is a full working copy on its own branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-3-launch-agents","level":3,"title":"Step 3: Launch Agents","text":"

    Open a separate terminal (or editor window) for each worktree and start a Claude Code session:

    # Terminal 1\ncd ../myproject-docs\nclaude\n\n# Terminal 2\ncd ../myproject-crypto\nclaude\n\n# Terminal 3\ncd ../myproject-tests\nclaude\n

    Each agent sees the full project, including .context/, and can work independently.

    Do Not Initialize Context in Worktrees

    Do not run ctx init in worktrees: The .context directory is already tracked in git.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-4-work","level":3,"title":"Step 4: Work","text":"

    Each agent works through its assigned tasks. They can read TASKS.md to know what's assigned to their track, use /ctx-next to pick the next item, and commit normally on their work/* branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-5-merge-back","level":3,"title":"Step 5: Merge Back","text":"

    As each track finishes, return to the main checkout and merge:

    /ctx-worktree teardown docs\n

    The agent checks for uncommitted changes, merges work/docs into your current branch, removes the worktree, and deletes the branch.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-6-handle-tasksmd-conflicts","level":3,"title":"Step 6: Handle TASKS.md Conflicts","text":"

    TASKS.md will almost always conflict when merging: Multiple agents will mark different tasks as [x]. This is expected and easy to resolve:

    Accept all completions from both sides. No task should go from [x] back to [ ]. The merge resolution is always additive.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-7-cleanup","level":3,"title":"Step 7: Cleanup","text":"

    After all tracks are merged, verify everything is clean:

    /ctx-worktree list\n

    Should show only the main working tree. All work/* branches should be gone.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't have to use the skill directly for every step. These natural prompts work:

    • \"I have a big backlog. Can we split it across worktrees?\"
    • \"Which of these tasks can run in parallel without conflicts?\"
    • \"Merge the docs track back in.\"
    • \"Clean up all the worktrees, we're done.\"
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#what-works-differently-in-worktrees","level":2,"title":"What Works Differently in Worktrees","text":"

    The encryption key lives at ~/.ctx/.ctx.key (user-level, outside the project). Because all worktrees on the same machine share this path, ctx pad and ctx hook notify work in worktrees automatically - no special setup needed.

    One thing to watch:

    • Journal enrichment: ctx journal import and ctx journal enrich write files relative to the current working directory. Enrichments created in a worktree stay there and are discarded on teardown. Enrich journals on the main branch after merging: the JSONL session logs are always intact, and you don't lose any data.

    Context Files Will Merge Just Fine

    Tracked context files (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) work normally; git handles them.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tips","level":2,"title":"Tips","text":"
    • 3-4 worktrees max. Beyond that, merge complexity outweighs the parallelism benefit. The skill enforces this limit.
    • Group by package or directory, not by priority. Two high-priority tasks that touch the same files must be in the same track.
    • TASKS.md will conflict on merge. This is normal. Accept all [x] completions: The resolution is always additive.
    • Don't run ctx init in worktrees. The .context/ directory is tracked in git. Running init overwrites shared context files.
    • Name worktrees by concern, not by number. work/docs and work/crypto are more useful than work/track-1 and work/track-2.
    • Commit frequently in each worktree. Smaller commits make merge conflicts easier to resolve.
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#next-up","level":2,"title":"Next Up","text":"

    Back to the beginning: Guide Your Agent →

    Or explore the full recipe list.

    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#see-also","level":2,"title":"See Also","text":"
    • Running an Unattended AI Agent: for serial autonomous loops instead of parallel tracks
    • Tracking Work Across Sessions: managing the task backlog that feeds into parallelization
    • The Complete Session: the complete session workflow end-to-end, with examples
    ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/permission-snapshots/","level":1,"title":"Permission Snapshots","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-problem","level":2,"title":"The Problem","text":"

    Claude Code's .claude/settings.local.json accumulates one-off permissions every time you click \"Allow\". After busy sessions the file is full of session-specific entries that expand the agent's surface area beyond intent.

    Since settings.local.json is .gitignored, there is no PR review or CI check. The file drifts independently on every machine, and there is no built-in way to reset to a known-good state.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#tldr","level":2,"title":"TL;DR","text":"
    /ctx-permission-sanitize               # audit for dangerous patterns\nctx permission snapshot            # save golden image\n# ... sessions accumulate cruft ...\nctx permission restore             # reset to golden state\n
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-solution","level":2,"title":"The Solution","text":"

    Save a curated settings.local.json as a golden image, then restore from it to drop session-accumulated permissions. The golden file (.claude/settings.golden.json) is committed to version control and shared with the team.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx permission snapshot Save settings.local.json as golden image ctx permission restore Reset settings.local.json from golden image /ctx-permission-sanitize Audit for dangerous patterns before snapshotting","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#step-by-step","level":2,"title":"Step by Step","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#1-curate-your-permissions","level":3,"title":"1. Curate Your Permissions","text":"

    Start with a clean settings.local.json. Optionally run /ctx-permission-sanitize to remove dangerous patterns first.

    Review the file manually. Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

    See the Permission Hygiene recipe for recommended defaults.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#2-take-a-snapshot","level":3,"title":"2. Take a Snapshot","text":"
    ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n

    This creates a byte-for-byte copy. No re-encoding, no indent changes.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#3-commit-the-golden-file","level":3,"title":"3. Commit the Golden File","text":"
    git add .claude/settings.golden.json\ngit commit -m \"Add permission golden image\"\n

    The golden file is not gitignored (unlike settings.local.json). This is intentional: it becomes a team-shared baseline.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#4-auto-restore-at-the-session-start","level":3,"title":"4. Auto-Restore at the Session Start","text":"

    Add this instruction to your CLAUDE.md:

    ## On Session Start\n\nRun `ctx permission restore` to reset permissions to the golden image.\n

    The agent will restore the golden image at the start of every session, automatically dropping any permissions accumulated during previous sessions.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#5-update-when-intentional-changes-are-made","level":3,"title":"5. Update When Intentional Changes Are Made","text":"

    When you add a new permanent permission (not a one-off debugging entry):

    # Edit settings.local.json with the new permission\n# Then update the golden image:\nctx permission snapshot\ngit add .claude/settings.golden.json\ngit commit -m \"Update permission golden image: add cargo test\"\n
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#conversational-approach","level":2,"title":"Conversational Approach","text":"

    You don't need to remember exact commands. These natural-language prompts work with agents trained on the ctx playbook:

    What you say What happens \"Save my current permissions as baseline\" Agent runs ctx permission snapshot \"Reset permissions to the golden image\" Agent runs ctx permission restore \"Clean up my permissions\" Agent runs /ctx-permission-sanitize then snapshot \"What permissions did I accumulate?\" Agent diffs local vs golden","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#next-up","level":2,"title":"Next Up","text":"

    Turning Activity into Content →: Generate blog posts, changelogs, and journal sites from your project activity.

    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#see-also","level":2,"title":"See Also","text":"
    • Permission Hygiene: recommended defaults and maintenance workflow
    • CLI Reference: ctx permission: full command documentation
    ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/publishing/","level":1,"title":"Turning Activity into Content","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-problem","level":2,"title":"The Problem","text":"

    Your .context/ directory is full of decisions, learnings, and session history.

    Your git log tells the story of a project evolving.

    But none of this is visible to anyone outside your terminal.

    You want to turn this raw activity into:

    • a browsable journal site,
    • blog posts,
    • changelog posts.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tldr","level":2,"title":"TL;DR","text":"
    ctx journal import --all             # 1. import sessions to markdown\n\n/ctx-journal-enrich-all             # 2. add metadata and tags\n\nctx journal site --serve            # 3. build and serve the journal\n\n/ctx-blog about the caching layer   # 4. draft a blog post\n/ctx-blog-changelog v0.1.0 \"v0.2\"   # 5. write a changelog post\n

    Read on for details on each stage.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal import Command Import session JSONL to editable markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx serve Command Serve any zensical directory (default: journal) ctx site feed Command Generate Atom feed from finalized blog posts make journal Makefile Shortcut for import + site rebuild /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich (recommended) /ctx-journal-enrich Skill Add metadata, summaries, and tags to one entry /ctx-blog Skill Draft a blog post from recent project activity /ctx-blog-changelog Skill Write a themed post from a commit range","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-1-import-sessions-to-markdown","level":3,"title":"Step 1: Import Sessions to Markdown","text":"

    Raw session data lives as JSONL files in Claude Code's internal storage. The first step is converting these into readable, editable markdown.

    # Import all sessions from the current project\nctx journal import --all\n\n# Import from all projects (if you work across multiple repos)\nctx journal import --all --all-projects\n\n# Import a single session by ID or slug\nctx journal import abc123\nctx journal import gleaming-wobbling-sutherland\n

    Imported files land in .context/journal/ as individual Markdown files with session metadata and the full conversation transcript.

    --all is safe by default: Only new sessions are imported. Existing files are skipped. Use --regenerate to re-import existing files (YAML frontmatter is preserved). Use --regenerate --keep-frontmatter=false -y to regenerate everything including frontmatter.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-2-enrich-entries-with-metadata","level":3,"title":"Step 2: Enrich Entries with Metadata","text":"

    Raw entries have timestamps and conversations but lack the structured metadata that makes a journal searchable. Use /ctx-journal-enrich-all to process your entire backlog at once:

    /ctx-journal-enrich-all\n

    The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

    For large backlogs (20+ entries), it can spawn subagents to process entries in parallel.

    To enrich a single entry instead:

    /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich 2026-01-24\n

    After enrichment, an entry gains YAML frontmatter:

    ---\ntitle: \"Implement Redis caching for API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n

    This metadata powers better navigation in the journal site:

    • titles replace slugs,
    • summaries appear in the index,
    • and search covers topics and technologies.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-3-generate-the-journal-site","level":3,"title":"Step 3: Generate the Journal Site","text":"

    With entries exported and enriched, generate the static site:

    # Generate site files\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally (opens at http://localhost:8000)\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

    The site is generated in .context/journal-site/ by default. It uses zensical for static site generation (pipx install zensical).

    Or use the Makefile shortcut that combines export and rebuild:

    make journal\n

    This runs ctx journal import --all followed by ctx journal site --build, then reminds you to enrich before rebuilding. To serve the built site, use make journal-serve or ctx serve (serve-only, no regeneration).

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#alternative-export-to-obsidian-vault","level":3,"title":"Alternative: Export to Obsidian Vault","text":"

    If you use Obsidian for knowledge management, generate a vault instead of (or alongside) the static site:

    ctx journal obsidian\nctx journal obsidian --output ~/vaults/ctx-journal\n

    This produces an Obsidian-ready directory with wikilinks, MOC (Map of Content) pages for topics/files/types, and a \"Related Sessions\" footer on each entry for graph connectivity. Open the output directory in Obsidian as a vault.

    The vault uses the same enriched source entries as the static site. Both outputs can coexist: The static site goes to .context/journal-site/, the vault to .context/journal-obsidian/.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-4-draft-blog-posts-from-activity","level":3,"title":"Step 4: Draft Blog Posts from Activity","text":"

    When your project reaches a milestone worth sharing, use /ctx-blog to draft a post from recent activity. The skill gathers context from multiple sources: git log, DECISIONS.md, LEARNINGS.md, completed tasks, and journal entries.

    /ctx-blog about the caching layer we just built\n/ctx-blog last week's refactoring work\n/ctx-blog lessons learned from the migration\n

    The skill gathers recent commits, decisions, and learnings; identifies a narrative arc; drafts an outline for approval; writes the full post; and saves it to docs/blog/YYYY-MM-DD-slug.md.

    Posts are written in first person with code snippets, commit references, and an honest discussion of what went wrong.

    The Output Is zensical-Flavored Markdown

    The blog skills produce Markdown tuned for a zensical site: topics: frontmatter (zensical's tag field), a docs/blog/ output path, and a banner image reference.

    The content is still standard Markdown and can be adapted to other static site generators, but the defaults assume a zensical project structure.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-5-write-changelog-posts-from-commit-ranges","level":3,"title":"Step 5: Write Changelog Posts from Commit Ranges","text":"

    For release notes or \"what changed\" posts, /ctx-blog-changelog takes a starting commit and a theme, then analyzes everything that changed:

    /ctx-blog-changelog 040ce99 \"building the journal system\"\n/ctx-blog-changelog HEAD~30 \"what's new in v0.2.0\"\n/ctx-blog-changelog v0.1.0 \"the road to v0.2.0\"\n

    The skill diffs the commit range, identifies the most-changed files, and constructs a narrative organized by theme rather than chronology, including a key commits table and before/after comparisons.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-6-generate-the-blog-feed","level":3,"title":"Step 6: Generate the Blog Feed","text":"

    After publishing blog posts, generate the Atom feed so readers and automation can discover new content:

    ctx site feed\n

    This scans docs/blog/ for finalized posts (reviewed_and_finalized: true), extracts title, date, author, topics, and summary, and writes a valid Atom 1.0 feed to site/feed.xml. The feed is also generated automatically as part of make site.

    The feed is available at ctx.ist/feed.xml.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-conversational-approach","level":2,"title":"The Conversational Approach","text":"

    You can also drive your publishing anytime with natural language:

    \"write about what we did this week\"\n\"turn today's session into a blog post\"\n\"make a changelog post covering everything since the last release\"\n\"enrich the last few journal entries\"\n

    The agent has full visibility into your .context/ state (tasks completed, decisions recorded, learnings captured), so its suggestions are grounded in what actually happened.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    The full pipeline from raw transcripts to published content:

    # 1. Import all sessions\nctx journal import --all\n\n# 2. In Claude Code: enrich all entries with metadata\n/ctx-journal-enrich-all\n\n# 3. Build and serve the journal site\nmake journal\nmake journal-serve\n\n# 3b. Or generate an Obsidian vault\nctx journal obsidian\n\n# 4. In Claude Code: draft a blog post\n/ctx-blog about the features we shipped this week\n\n# 5. In Claude Code: write a changelog post\n/ctx-blog-changelog v0.1.0 \"what's new in v0.2.0\"\n

    The journal pipeline is idempotent at every stage. You can rerun ctx journal import --all without losing enrichment. You can rebuild the site as many times as you want.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tips","level":2,"title":"Tips","text":"
    • Import regularly. Run ctx journal import --all after each session to keep your journal current. Only new sessions are imported: Existing files are skipped by default.
    • Use batch enrichment. /ctx-journal-enrich-all filters noise (suggestion sessions, trivial sessions, multipart continuations) so you do not have to decide what is worth enriching.
    • Keep journal files in .gitignore. Session journals can contain sensitive data: file contents, commands, internal discussions, and error messages with stack traces. Add .context/journal/ and .context/journal-site/ to .gitignore.
    • Use /ctx-blog for narrative posts and /ctx-blog-changelog for release posts. One finds a story in recent activity, the other explains a commit range by theme.
    • Edit the drafts. These skills produce drafts, not final posts. Review the narrative, add your perspective, and remove anything that does not serve the reader.
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#next-up","level":2,"title":"Next Up","text":"

    Running an Unattended AI Agent →: Set up an AI agent that works through tasks overnight without you at the keyboard.

    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#see-also","level":2,"title":"See Also","text":"
    • Session Journal: journal system, enrichment schema
    • CLI Reference: ctx journal: import, list, show session history
    • CLI Reference: ctx journal site: static site generation
    • CLI Reference: ctx journal obsidian: Obsidian vault export
    • CLI Reference: ctx serve: serve-only (no regeneration)
    • Browsing and Enriching Past Sessions: journal browsing workflow
    • The Complete Session: capturing context during a session
    ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/scratchpad-sync/","level":1,"title":"Syncing Scratchpad Notes Across Machines","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-problem","level":2,"title":"The Problem","text":"

    You work from multiple machines: a desktop and a laptop, or a local machine and a remote dev server.

    The scratchpad entries are encrypted. The ciphertext (.context/scratchpad.enc) travels with git, but the encryption key lives outside the project at ~/.ctx/.ctx.key and is never committed. Without the key on each machine, you cannot read or write entries.

    How do you distribute the key and keep the scratchpad in sync?

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tldr","level":2,"title":"TL;DR","text":"
    ctx init                                                  # 1. generates key\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key        # 2. copy key\nchmod 600 ~/.ctx/.ctx.key                                 # 3. secure it\n# Normal git push/pull syncs the encrypted scratchpad.enc\n# On conflict: ctx pad resolve → rebuild → git add + commit\n

    Finding Your Key File

    The key is always at ~/.ctx/.ctx.key - one key, one machine.

    Treat the Key like a Password

    The scratchpad key is the only thing protecting your encrypted entries.

    Store a backup in a secure enclave such as a password manager, and treat it with the same care you would give passwords, certificates, or API tokens.

    Anyone with the key can decrypt every scratchpad entry.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context (generates the key automatically) ctx pad add CLI command Add a scratchpad entry ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad edit CLI command Edit a scratchpad entry ctx pad resolve CLI command Show both sides of a merge conflict ctx pad merge CLI command Merge entries from other scratchpad files ctx pad import CLI command Bulk-import lines from a file ctx pad export CLI command Export blob entries to a directory scp Shell Copy the key file between machines git push / git pull Shell Sync the encrypted file via git/ctx-pad Skill Natural language interface to pad commands","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-1-initialize-on-machine-a","level":3,"title":"Step 1: Initialize on Machine A","text":"

    Run ctx init on your first machine. The key is created automatically at ~/.ctx/.ctx.key:

    ctx init\n# ...\n# Created ~/.ctx/.ctx.key (0600)\n# Created .context/scratchpad.enc\n

    The key lives outside the project directory and is never committed. The .enc file is tracked in git.

    Key Folder Change (v0.7.0+)

    If you built ctx from source or upgraded past v0.6.0, the key location changed to ~/.ctx/.ctx.key. Check these legacy folders and copy your key manually:

    # Old locations (pick whichever exists)\nls ~/.local/ctx/keys/        # pre-v0.7.0 user-level\nls .context/.ctx.key         # pre-v0.6.0 project-local\n\n# Copy to the new location\nmkdir -p ~/.ctx && chmod 700 ~/.ctx\ncp <old-key-path> ~/.ctx/.ctx.key\nchmod 600 ~/.ctx/.ctx.key\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-2-copy-the-key-to-machine-b","level":3,"title":"Step 2: Copy the Key to Machine B","text":"

    Use any secure transfer method. The key is always at ~/.ctx/.ctx.key:

    # scp - create the target directory first\nssh user@machine-b \"mkdir -p ~/.ctx && chmod 700 ~/.ctx\"\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key\n\n# Or use a password manager, USB drive, etc.\n

    Set permissions on Machine B:

    chmod 600 ~/.ctx/.ctx.key\n

    Secure the Transfer

    The key is a raw 256-bit AES key. Anyone with the key can decrypt the scratchpad. Use an encrypted channel (SSH, password manager, vault).

    Never paste it in plaintext over email or chat.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-3-normal-pushpull-workflow","level":3,"title":"Step 3: Normal Push/Pull Workflow","text":"

    The encrypted file is committed, so standard git sync works:

    # Machine A: add entries and push\nctx pad add \"staging API key: sk-test-abc123\"\ngit add .context/scratchpad.enc\ngit commit -m \"Update scratchpad\"\ngit push\n\n# Machine B: pull and read\ngit pull\nctx pad\n#   1. staging API key: sk-test-abc123\n

    Both machines have the same key, so both can decrypt the same .enc file.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-4-read-and-write-from-either-machine","level":3,"title":"Step 4: Read and Write from Either Machine","text":"

    Once the key is distributed, all ctx pad commands work identically on both machines. Entries added on Machine A are visible on Machine B after a git pull, and vice versa.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-5-handle-merge-conflicts","level":3,"title":"Step 5: Handle Merge Conflicts","text":"

    If both machines add entries between syncs, pulling will create a merge conflict on .context/scratchpad.enc. Git cannot merge binary (encrypted) content automatically.

    The fastest approach is ctx pad merge: It reads both conflict sides, deduplicates, and writes the union:

    # Extract theirs to a temp file, then merge it in\ngit show :3:.context/scratchpad.enc > /tmp/theirs.enc\ngit checkout --ours .context/scratchpad.enc\nctx pad merge /tmp/theirs.enc\n\n# Done: Commit the resolved scratchpad:\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n

    Alternatively, use ctx pad resolve to inspect both sides manually:

    ctx pad resolve\n# === Ours (this machine) ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n#\n# === Theirs (incoming) ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n

    Then reconstruct the merged scratchpad:

    # Start fresh with all entries from both sides\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\n# Mark the conflict resolved\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#merge-conflict-walkthrough","level":2,"title":"Merge Conflict Walkthrough","text":"

    Here's a full scenario showing how conflicts arise and how to resolve them:

    1. Both machines start in sync (1 entry):

    Machine A: 1. staging API key: sk-test-abc123\nMachine B: 1. staging API key: sk-test-abc123\n

    2. Both add entries independently:

    Machine A adds: \"check DNS after deploy\"\nMachine B adds: \"new endpoint: api.example.com/v2\"\n

    3. Machine A pushes first. Machine B pulls and gets a conflict:

    git pull\n# CONFLICT (content): Merge conflict in .context/scratchpad.enc\n

    4. Machine B runs ctx pad resolve:

    ctx pad resolve\n# === Ours ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n#\n# === Theirs ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n

    5. Rebuild with entries from both sides and commit:

    # Clear and rebuild (or use the skill to guide you)\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\ngit add .context/scratchpad.enc\ngit commit -m \"Merge scratchpad: keep entries from both machines\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#conversational-approach","level":3,"title":"Conversational Approach","text":"

    When working with an AI assistant, you can resolve conflicts naturally:

    You: \"I have a scratchpad merge conflict. Can you resolve it?\"\n\nAgent: \"Let me extract theirs and merge it in.\"\n       [runs git show :3:.context/scratchpad.enc > /tmp/theirs.enc]\n       [runs git checkout --ours .context/scratchpad.enc]\n       [runs ctx pad merge /tmp/theirs.enc]\n       \"Merged 2 new entries (1 duplicate skipped). Want me to\n       commit the resolution?\"\n
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tips","level":2,"title":"Tips","text":"
    • Back up the key: If you lose it, you lose access to all encrypted entries. Store a copy in your password manager.
    • One key per project: Each ctx init generates a unique key. Don't reuse keys across projects.
    • Keys work in worktrees: Because the key lives at ~/.ctx/.ctx.key (outside the project), git worktrees on the same machine share the key automatically. No special setup needed.
    • Plaintext fallback for non-sensitive projects: If encryption adds friction and you have nothing sensitive, set scratchpad_encrypt: false in .ctxrc. Merge conflicts become trivial text merges.
    • Never commit the key: The key is stored outside the project at ~/.ctx/.ctx.key and should never be copied into the repository.
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#next-up","level":2,"title":"Next Up","text":"

    Hook Output Patterns →: Choose the right output pattern for your Claude Code hooks.

    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#see-also","level":2,"title":"See Also","text":"
    • Scratchpad: feature overview, all commands, when to use scratchpad vs context files
    • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
    ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-with-claude/","level":1,"title":"Using the Scratchpad","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-problem","level":2,"title":"The Problem","text":"

    During a session you accumulate quick notes, reminders, intermediate values, and sometimes sensitive tokens. They don't fit TASKS.md (not work items) or DECISIONS.md (not decisions). They don't have the structured fields that LEARNINGS.md requires.

    Without somewhere to put them, they get lost between sessions.

    How do you capture working memory that persists across sessions without polluting your structured context files?

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tldr","level":2,"title":"TL;DR","text":"
    ctx pad add \"check DNS propagation after deploy\"\nctx pad         # list entries\nctx pad show 1  # print entry (pipe-friendly)\n

    Entries are encrypted at rest and travel with git.

    Use the /ctx-pad skill to manage entries from inside your AI session.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx pad CLI command List all scratchpad entries ctx pad show N CLI command Output raw text of entry N (pipe-friendly) ctx pad add CLI command Add a new entry ctx pad edit CLI command Replace, append to, or prepend to an entry ctx pad add --file CLI command Ingest a file as a blob entry ctx pad show N --out CLI command Extract a blob entry to a file ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad normalize CLI command Reassign entry IDs as 1..N ctx pad mv CLI command Reorder entries ctx pad --tag CLI command Filter entries by tag ctx pad tags CLI command List all tags with counts ctx pad import CLI command Bulk-import lines from a file (or stdin) ctx pad export CLI command Export all blob entries to a directory /ctx-pad Skill Natural language interface to all pad commands","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-1-add-a-note-naturally","level":3,"title":"Step 1: Add a Note Naturally","text":"

    You don't need to remember any syntax. Just tell your assistant what to jot down:

    You: \"jot down: check DNS propagation after deploy\"\n\nAgent: \"Added to scratchpad:\n  1. check DNS propagation after deploy\"\n

    Behind the scenes, the agent runs ctx pad add \"check DNS propagation after deploy\".

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-2-view-your-scratchpad","level":3,"title":"Step 2: View Your Scratchpad","text":"
    You: \"show my scratchpad\"\n\nAgent: \"Your scratchpad has 3 entries:\n  1. check DNS propagation after deploy\n  2. staging API endpoint: api.staging.example.com/v2\n  3. retry limit should be 5, not 3\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-3-edit-an-entry","level":3,"title":"Step 3: Edit an Entry","text":"
    You: \"update entry 2 to include the new endpoint\"\n\nAgent: \"Updated entry 2:\n  was: staging API endpoint: api.staging.example.com/v2\n  now: staging API endpoint: api.prod.example.com/v2\"\n

    For partial edits, the agent can use --append or --prepend:

    You: \"append the port number to entry 2\"\n\nAgent: \"Updated entry 2:\n  staging API endpoint: api.prod.example.com/v2 :8443\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-4-compose-entries","level":3,"title":"Step 4: Compose Entries","text":"

    Use ctx pad show N to pipe entry content into other operations:

    You: \"append the content of entry 3 to entry 1\"\n\nAgent: [runs ctx pad edit 1 --append \"$(ctx pad show 3)\"]\n       \"Done. Entry 1 is now:\n        check DNS propagation after deploy retry limit should be 5, not 3\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-5-reorder-by-priority","level":3,"title":"Step 5: Reorder by Priority","text":"
    You: \"move entry 4 to the top\"\n\nAgent: [runs ctx pad mv 4 1]\n       \"Moved entry 4 to position 1. Scratchpad reordered.\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-6-clean-up","level":3,"title":"Step 6: Clean Up","text":"
    You: \"remove entry 2, it's done\"\n\nAgent: [runs ctx pad rm 2]\n       \"Removed entry 2. 3 entries remaining.\n        (IDs are stable; remaining entries keep their IDs.)\"\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-7-store-a-file-as-a-blob","level":3,"title":"Step 7: Store a File as a Blob","text":"

    The scratchpad can hold small files (up to 64 KB) as encrypted blob entries. The file is base64-encoded and stored alongside a label you provide:

    # Ingest a file: the first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# List shows the label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-8-extract-a-blob","level":3,"title":"Step 8: Extract a Blob","text":"

    Use show --out to write the decoded file back to disk:

    # Write blob entry to a file\nctx pad show 2 --out ./recovered-deploy.yaml\n\n# Or print to stdout (for piping)\nctx pad show 2 | head -5\n

    Blob entries are encrypted identically to text entries: They're just base64-encoded before encryption. The --out flag decodes and writes the raw bytes.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-9-bulk-import-notes","level":3,"title":"Step 9: Bulk Import Notes","text":"

    When you have a file with many notes (one per line), import them in bulk instead of adding one at a time:

    # Import from a file: Each non-empty line becomes an entry\nctx pad import notes.txt\n\n# Or pipe from stdin\ngrep TODO *.go | ctx pad import -\n

    All entries are written in a single encrypt/write cycle, regardless of how many lines the file contains.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-10-export-blobs-to-disk","level":3,"title":"Step 10: Export Blobs to Disk","text":"

    Export all blob entries to a directory as individual files. Each blob's label becomes the filename:

    # Export to a directory (created if needed)\nctx pad export ./ideas\n\n# Preview what would be exported\nctx pad export --dry-run ./ideas\n\n# Force overwrite existing files\nctx pad export --force ./backup\n

    When a file already exists, a unix timestamp is prepended to the filename to avoid collisions. Use --force to overwrite instead.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-11-tag-entries-for-organization","level":3,"title":"Step 11: Tag Entries for Organization","text":"

    Tags let you categorize entries without any structure beyond a #word token in the text. Add them when creating or editing entries:

    You: \"jot down: check DNS propagation #later\"\nYou: \"tag entry 2 as urgent\"\n\nAgent: [runs ctx pad edit 2 --tag urgent]\n       \"Updated entry 2.\"\n

    Filter your scratchpad by tag:

    You: \"show me everything tagged later\"\n\nAgent: [runs ctx pad --tag later]\n       \"  1. check DNS propagation #later\n        3. review PR feedback #later #ci\"\n

    Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry regardless of deletions or active filters. Use ctx pad normalize to reassign IDs as 1..N.

    Exclude a tag with ~:

    ctx pad --tag ~later         # everything NOT tagged #later\nctx pad --tag later --tag ci # entries with BOTH tags (AND logic)\n

    See what tags you're using:

    You: \"what tags do I have?\"\n\nAgent: [runs ctx pad tags]\n       \"ci       1\n        later    2\n        urgent   1\"\n

    Tags work on blob entries too; they're extracted from the label:

    ctx pad add \"deploy config #prod\" --file ./deploy.yaml\nctx pad --tag prod\n#   1. deploy config #prod [BLOB]\n
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#using-ctx-pad-in-a-session","level":2,"title":"Using /ctx-pad in a Session","text":"

    Invoke the /ctx-pad skill first, then describe what you want in natural language. Without the skill prefix, the agent may route your request to TASKS.md or another context file instead of the scratchpad.

    You: /ctx-pad jot down: check DNS after deploy\nYou: /ctx-pad show my scratchpad\nYou: /ctx-pad delete entry 3\n

    Once the skill is active, it translates intent into commands:

    You say (after /ctx-pad) What the agent does \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"remember this: retry limit is 5\" ctx pad add \"retry limit is 5\" \"show my scratchpad\" / \"what's on my pad\" ctx pad \"show me entry 3\" ctx pad show 3 \"delete the third one\" / \"remove entry 3\" ctx pad rm 3 \"remove entries 3 through 5\" ctx pad rm 3-5 \"renumber my scratchpad\" ctx pad normalize \"change entry 2 to ...\" ctx pad edit 2 \"new text\" \"append ' +important' to entry 3\" ctx pad edit 3 --append \" +important\" \"prepend 'URGENT:' to entry 1\" ctx pad edit 1 --prepend \"URGENT: \" \"prioritize entry 4\" / \"move to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./ideas\" ctx pad export ./ideas \"show entries tagged later\" ctx pad --tag later \"show everything except later\" ctx pad --tag ~later \"what tags do I have\" ctx pad tags \"tag entry 5 as urgent\" ctx pad edit 5 --tag urgent

    When in Doubt, Use the CLI Directly

    The ctx pad commands work the same whether you run them yourself or let the skill invoke them.

    If the agent misroutes a request, fall back to ctx pad add \"...\" in your terminal.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#when-to-use-scratchpad-vs-context-files","level":2,"title":"When to Use Scratchpad vs Context Files","text":"Situation Use Temporary reminders (\"check X after deploy\") Scratchpad Session-start reminders (\"remind me next session\") ctx remind Working values during debugging (ports, endpoints, counts) Scratchpad Sensitive tokens or API keys (short-term storage) Scratchpad Quick notes that don't fit anywhere else Scratchpad Work items with completion tracking TASKS.md Trade-offs between alternatives with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

    Decision Guide

    • If it has structured fields (context, rationale, lesson, application), it belongs in a context file like DECISIONS.md or LEARNINGS.md.
    • If it's a work item you'll mark done, it belongs in TASKS.md.
    • If you want a message relayed VERBATIM at the next session start, it belongs in ctx remind.
    • If it's a quick note, reminder, or working value (especially if it's sensitive or ephemeral) it belongs on the scratchpad.

    Scratchpad Is Not a Junk Drawer

    The scratchpad is for working memory, not long-term storage.

    If a note is still relevant after several sessions, promote it:

    A persistent reminder becomes a task, a recurring value becomes a convention, a hard-won insight becomes a learning.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tips","level":2,"title":"Tips","text":"
    • Entries persist across sessions: The scratchpad is committed (encrypted) to git, so entries survive session boundaries. Pick up where you left off.
    • Entries are numbered and reorderable: Use ctx pad mv to put high-priority items at the top.
    • ctx pad show N enables unix piping: Output raw entry text with no numbering prefix. Compose with --append, --prepend, or other shell tools.
    • Never mention the key file contents to the AI: The agent knows how to use ctx pad commands but should never read or print the encryption key (~/.ctx/.ctx.key) directly.
    • Encryption is transparent: You interact with plaintext; the encryption/decryption happens automatically on every read/write.
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#next-up","level":2,"title":"Next Up","text":"

    Syncing Scratchpad Notes Across Machines →: Distribute encryption keys and scratchpad data across environments.

    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#see-also","level":2,"title":"See Also","text":"
    • Scratchpad: feature overview, all commands, encryption details, plaintext override
    • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
    • The Complete Session: full session lifecycle showing how the scratchpad fits into the broader workflow
    ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/session-archaeology/","level":1,"title":"Browsing and Enriching Past Sessions","text":"","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-problem","level":2,"title":"The Problem","text":"

    After weeks of AI-assisted development you have dozens of sessions scattered across JSONL files in ~/.claude/projects/. Finding the session where you debugged the Redis connection pool, or remembering what you decided about the caching strategy three Tuesdays ago, often means grepping raw JSON.

    There is no table of contents, no search, and no summaries.

    This recipe shows how to turn that raw session history into a browsable, searchable, and enriched journal site you can navigate in your browser.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tldr","level":2,"title":"TL;DR","text":"

    Export and Generate

    ctx journal import --all\nctx journal site --serve\n

    Enrich

    /ctx-journal-enrich-all\n

    Rebuild

    ctx journal site --serve\n

    Read on for what each stage does and why.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal source Command List parsed sessions with metadata ctx journal source --show Command Inspect a specific session in detail ctx journal import Command Import sessions to editable journal Markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx journal schema check Command Validate JSONL files and report schema drift ctx journal schema dump Command Print the embedded JSONL schema definition ctx serve Command Serve any zensical directory (default: journal) /ctx-history Skill Browse sessions inside your AI assistant /ctx-journal-enrich Skill Add frontmatter metadata to a single entry /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-workflow","level":2,"title":"The Workflow","text":"

    The session journal follows a four-stage pipeline.

    Each stage is idempotent and safe to re-run:

    By default, each stage skips entries that have already been processed.

    import -> enrich -> rebuild\n
    Stage Tool What it does Skips if Where Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) CLI or agent Enrich /ctx-journal-enrich-all Adds frontmatter, summaries, topic tags Frontmatter already present Agent only Rebuild ctx journal site --build Generates browsable static HTML N/A CLI only Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks N/A CLI only

    Where Do You Run Each Stage?

    Import (Steps 1 to 3) works equally well from the terminal or inside your AI assistant via /ctx-history. The CLI is fine here: the agent adds no special intelligence, it just runs the same command.

    Enrich (Step 4) requires the agent: it reads conversation content and produces structured metadata.

    Rebuild and serve (Step 5) is a terminal operation that starts a long-running server.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-1-list-your-sessions","level":3,"title":"Step 1: List Your Sessions","text":"

    Start by seeing what sessions exist for the current project:

    ctx journal source\n

    Sample output:

    Sessions (newest first)\n=======================\n\n  Slug                           Project   Date         Duration  Turns  Tokens\n  gleaming-wobbling-sutherland   ctx       2026-02-07   1h 23m    47     82,341\n  twinkly-stirring-kettle        ctx       2026-02-06   0h 45m    22     38,102\n  bright-dancing-hopper          ctx       2026-02-05   2h 10m    63     124,500\n  quiet-flowing-dijkstra         ctx       2026-02-04   0h 18m    11     15,230\n  ...\n

    Slugs Look Cryptic?

    These auto-generated slugs (gleaming-wobbling-sutherland) are hard to recognize later.

    Use /ctx-journal-enrich to add human-readable titles, topic tags, and summaries to exported journal entries, making them easier to find.

    Filter by project or tool if you work across multiple codebases:

    ctx journal source --project ctx --limit 10\nctx journal source --tool claude-code\nctx journal source --all-projects\n
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-2-inspect-a-specific-session","level":3,"title":"Step 2: Inspect a Specific Session","text":"

    Before exporting everything, inspect a single session to see its metadata and conversation summary:

    ctx journal source --show --latest\n

    Or look up a specific session by its slug, partial ID, or UUID:

    ctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show twinkly\nctx journal source --show abc123\n

    Add --full to see the complete message content instead of the summary view:

    ctx journal source --show --latest --full\n

    This is useful for checking what happened before deciding whether to export and enrich it.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-3-import-sessions-to-the-journal","level":3,"title":"Step 3: Import Sessions to the Journal","text":"

    Import converts raw session data into editable Markdown files in .context/journal/:

    # Import all sessions from the current project\nctx journal import --all\n\n# Import a single session\nctx journal import gleaming-wobbling-sutherland\n\n# Include sessions from all projects\nctx journal import --all --all-projects\n

    --keep-frontmatter=false Discards Enrichments

    --keep-frontmatter=false discards enriched YAML frontmatter during regeneration.

    Back up your journal before using this flag.

    Each imported file contains session metadata (date, time, duration, model, project, git branch), a tool usage summary, and the full conversation transcript.

    Re-importing is safe. Running ctx journal import --all only imports new sessions: Existing files are never touched. Use --dry-run to preview what would be imported without writing anything.

    To re-import existing files (e.g., after a format improvement), use --regenerate: Conversation content is regenerated while preserving any YAML frontmatter you or the enrichment skill has added. You'll be prompted before any files are overwritten.

    --regenerate Replaces the Markdown Body

    --regenerate preserves YAML frontmatter but replaces the entire Markdown body with freshly generated content from the source JSONL.

    If you manually edited the conversation transcript (added notes, redacted sensitive content, restructured sections), those edits will be lost.

    BACK UP YOUR JOURNAL FIRST.

    To protect entries you've hand-edited, you can explicitly lock them:

    ctx journal lock <pattern>\n

    Locked entries are always skipped, regardless of flags.

    If you prefer to add locked: true directly in frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json:

    ctx journal sync\n

    See ctx journal lock --help and ctx journal sync --help for details.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-4-enrich-with-metadata","level":3,"title":"Step 4: Enrich with Metadata","text":"

    Raw imports have timestamps and transcripts but lack the semantic metadata that makes sessions searchable: topics, technology tags, outcome status, and summaries. The /ctx-journal-enrich* skills add this structured frontmatter.

    Locked entries are skipped by enrichment skills, just as they are by import. Lock entries you want to protect before running batch enrichment.

    Batch enrichment (recommended):

    /ctx-journal-enrich-all\n

    The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

    It shows you a grouped summary before applying changes so you can scan quickly rather than reviewing one by one.

    For large backlogs (20+ entries), the skill can spawn subagents to process entries in parallel.

    Single-entry enrichment:

    /ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-02-06\n

    Each enriched entry gets YAML frontmatter like this:

    ---\ntitle: \"Implement Redis caching middleware\"\ndate: 2026-02-06\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/api/middleware/cache.go\n---\n

    The skill also generates a summary and can extract decisions, learnings, and tasks mentioned during the session.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-5-generate-and-serve-the-site","level":3,"title":"Step 5: Generate and Serve the Site","text":"

    With imported and enriched journal files, generate the static site:

    # Generate site structure only\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate, build, and serve locally\nctx journal site --serve\n

    Then open http://localhost:8000 to browse.

    The site includes a date-sorted index, individual session pages with full conversations, search (press /), dark mode, and enriched titles in the navigation when frontmatter exists.

    You can also serve an already-generated site without regenerating using ctx serve (serve-only, no regeneration).

    The site generator requires zensical (pipx install zensical).

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#where-does-agent-add-value","level":2,"title":"Where Does Agent Add Value?","text":"

    Export, list, and show are mechanical: The agent runs the same CLI commands you would, so you can stay in your terminal for those.

    The agent earns its keep in enrich. It reads conversation content, understands what happened, and produces structured metadata. That is agent work, not CLI work.

    You can also ask your agent to browse sessions conversationally instead of remembering flags:

    What did we work on last week?\nShow me the session about Redis.\nImport everything to the journal.\n

    This is convenient but not required: ctx journal source gives you the same inventory.

    Where the agent genuinely helps is chaining the pipeline:

    You:   What happened last Tuesday?\nAgent: Last Tuesday you worked on two sessions:\n       - bright-dancing-hopper (2h 10m): refactored the middleware\n         pipeline and added Redis caching\n       - quiet-flowing-dijkstra (18m): quick fix for a nil pointer\n         in the config loader\n       Want me to export and enrich them?\nYou:   Yes, do it.\nAgent: Exports both, enriches, then proposes frontmatter.\n

    The value is staying in one context while the agent runs import -> enrich without you manually switching tools.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A typical pipeline from raw sessions to a browsable site:

    # Terminal: import and generate\nctx journal import --all\nctx journal site --serve\n
    # AI assistant: enrich\n/ctx-journal-enrich-all\n
    # Terminal: rebuild with enrichments\nctx journal site --serve\n

    If your project includes Makefile.ctx (deployed by ctx init), use make journal to combine import and rebuild stages. Then enrich inside Claude Code, then make journal again to pick up enrichments.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#session-retention-and-cleanup","level":2,"title":"Session Retention and Cleanup","text":"

    Claude Code does not keep JSONL transcripts forever. Understanding its cleanup behavior helps you avoid losing session history.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#default-behavior","level":3,"title":"Default Behavior","text":"

    Claude Code retains session transcripts for approximately 30 days. After that, JSONL files are automatically deleted during cleanup. Once deleted, ctx journal can no longer see those sessions - the data is gone.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-cleanupperioddays-setting","level":3,"title":"The cleanupPeriodDays Setting","text":"

    Claude Code exposes a cleanupPeriodDays setting in its configuration (~/.claude/settings.json) that controls retention:

    Value Behavior 30 (default) Transcripts older than 30 days are deleted 60, 90, etc. Extends the retention window 0 Disables writing new transcripts entirely - not \"keep forever\"

    Setting cleanupPeriodDays To 0

    Setting this to 0 does not mean \"never delete.\" It disables transcript creation altogether. No new JSONL files are written, which means ctx journal sees nothing new. This is rarely what you want.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#why-journal-import-matters","level":3,"title":"Why Journal Import Matters","text":"

    The journal import pipeline (Steps 1-4 above) is your archival mechanism. Imported Markdown files in .context/journal/ persist independently of Claude Code's cleanup cycle. Even after the source JSONL files are deleted, your journal entries remain.

    Recommendation: import regularly - weekly, or after any session worth revisiting. A quick ctx journal import --all takes seconds and ensures nothing falls through the 30-day window.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#quick-archival-checklist","level":3,"title":"Quick Archival Checklist","text":"
    1. Run ctx journal import --all at least weekly
    2. Enrich high-value sessions with /ctx-journal-enrich before the details fade from your own memory
    3. Lock enriched entries (ctx journal lock <pattern>) to protect them from accidental regeneration
    4. Rebuild the journal site periodically to keep it current
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tips","level":2,"title":"Tips","text":"
    • Start with /ctx-history inside your AI assistant. If you want to quickly check what happened in a recent session without leaving your editor, /ctx-history lets you browse interactively without importing.
    • Large sessions may be split automatically. Sessions with 200+ messages can be split into multiple parts (session-abc123.md, session-abc123-p2.md, session-abc123-p3.md) with navigation links between them. The site generator can handle this.
    • Suggestion sessions can be separated. Claude Code can generate short suggestion sessions for autocomplete. These may appear under a separate section in the site index, so they do not clutter your main session list.
    • Your agent is a good session browser. You do not need to remember slugs, dates, or flags. Ask \"what did we do yesterday?\" or \"find the session about Redis\" and it can map the question to recall commands.

    Journal Files Are Sensitive

    Journal files MUST be .gitignored.

    Session transcripts can contain sensitive data such as file contents, commands, error messages with stack traces, and potentially API keys.

    Add .context/journal/, .context/journal-site/, and .context/journal-obsidian/ to your .gitignore.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#next-up","level":2,"title":"Next Up","text":"

    Persisting Decisions, Learnings, and Conventions →: Record decisions, learnings, and conventions so they survive across sessions.

    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: where session saving fits in the daily workflow
    • Turning Activity into Content: generating blog posts from session history
    • Session Journal: full documentation of the journal system
    • CLI Reference: ctx journal: all journal subcommands and flags
    • CLI Reference: ctx serve: serve-only (no regeneration)
    • Context Files: the .context/ directory structure
    ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-ceremonies/","level":1,"title":"Session Ceremonies","text":"","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#the-problem","level":2,"title":"The Problem","text":"

    Sessions have two critical moments: the start and the end.

    • At the start, you need the agent to load context and confirm it knows what is going on.
    • At the end, you need to capture whatever the session produced before the conversation disappears.

    Most ctx skills work conversationally: \"jot down: check DNS after deploy\" is as good as /ctx-pad add \"check DNS after deploy\". But session boundaries are different. They are well-defined moments with specific requirements, and partial execution is costly.

    If the agent only half-loads context at the start, it works from stale assumptions. If it only half-persists at the end, learnings and decisions are lost.

    This Is One of the Few Times Being Explicit Matters

    Session ceremonies are the two bookend skills that mark these boundaries.

    They are the exception to the conversational rule:

    Invoke /ctx-remember and /ctx-wrap-up explicitly as slash commands.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tldr","level":2,"title":"TL;DR","text":"

    Start: /ctx-remember: load context, get a structured readback.

    End: /ctx-wrap-up: review session, propose candidates, persist approved items.

    Use the slash commands, not conversational triggers, for completeness.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#explicit-invocation-matters","level":2,"title":"Explicit Invocation Matters","text":"

    Most ctx skills encourage natural language. These two are different:

    Well-defined moments: Sessions have clear boundaries. A slash command marks the boundary unambiguously.

    Ambiguity risk: \"Do you remember?\" could mean many things. /ctx-remember means exactly one thing: load context and present a structured readback.

    Completeness: Conversational triggers risk partial execution. The agent might load some files but skip the session history, or persist one learning but forget to check for uncommitted changes. The slash command runs the full ceremony.

    Muscle memory: Typing /ctx-remember at session start and /ctx-wrap-up at session end becomes a habit, like opening and closing braces.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-remember Skill Load context and present structured readback /ctx-wrap-up Skill Gather session signal, propose and persist context /ctx-commit Skill Commit with context capture (offered by wrap-up) ctx agent CLI Load token-budgeted context packet ctx journal source CLI List recent sessions ctx add CLI Persist learnings, decisions, conventions, tasks","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-start-ctx-remember","level":2,"title":"Session Start: /ctx-remember","text":"

    Invoke at the beginning of every session:

    /ctx-remember\n

    The skill silently:

    1. Loads the context packet via ctx agent --budget 4000
    2. Reads TASKS.md, DECISIONS.md, LEARNINGS.md
    3. Checks recent sessions via ctx journal source --limit 3

    Then presents a structured readback with four sections:

    • Last session: topic, date, what was accomplished
    • Active work: pending and in-progress tasks
    • Recent context: 1-2 relevant decisions or learnings
    • Next step: suggestion or question about what to focus on

    The readback should feel like recall, not a file system tour. If the agent says \"Let me check if there are files...\" instead of a confident summary, the skill is not working correctly.

    What about 'do you remember?'

    The conversational trigger still works. But /ctx-remember guarantees the full ceremony runs:

    • context packet,
    • file reads,
    • session history,
    • and all four readback sections.

    The conversational version may cut corners.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-end-ctx-wrap-up","level":2,"title":"Session End: /ctx-wrap-up","text":"

    Invoke before ending a session where meaningful work happened:

    /ctx-wrap-up\n

    The skill runs four phases:

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-1-gather-signal","level":3,"title":"Phase 1: Gather Signal","text":"

    Silently checks git diff --stat, recent commits, and scans the conversation for themes: architectural choices, gotchas, patterns established, follow-up work identified.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-2-propose-candidates","level":3,"title":"Phase 2: Propose Candidates","text":"

    Presents a structured list grouped by type:

    ## Session Wrap-Up\n\n### Learnings (2 candidates)\n1. **PyMdownx details extension breaks pre/code rendering**\n   - Context: Journal site showed broken code blocks inside details tags\n   - Lesson: details extension wraps content in <details> HTML, which\n     interferes with <pre><code> rendering\n   - Application: Use fenced code blocks instead of indented code inside\n     admonitions when details extension is active\n\n2. **Hook subprocesses cannot propagate env vars**\n   - Context: Set env var in PreToolUse hook, invisible in main session\n   - Lesson: Hooks execute in child processes; env changes don't propagate\n   - Application: Use tombstone files for hook-to-session communication\n\n### Decisions (1 candidate)\n1. **File-based cooldown tokens over env vars**\n   - Context: Need session-scoped cooldown for ctx agent auto-loading\n   - Rationale: File tokens survive across processes, simpler than IPC\n   - Consequence: Tombstone files accumulate in /tmp; need TTL cleanup\n\nPersist all? Or select which to keep?\n

    Each candidate has complete structured fields, not just a title. Empty categories are omitted.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-3-persist","level":3,"title":"Phase 3: Persist","text":"

    After you approve (all, some, or modified), the skill runs the appropriate ctx add commands and reports results.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#nudge-suppression","level":3,"title":"Nudge Suppression","text":"

    After persisting, the skill marks the session as wrapped up via ctx system mark-wrapped-up. This suppresses context checkpoint nudges for 2 hours so the wrap-up ceremony itself does not trigger noisy reminders.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-4-commit-offer","level":3,"title":"Phase 4: Commit Offer","text":"

    If there are uncommitted changes, offers to run /ctx-commit. Does not auto-commit.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#when-to-skip","level":2,"title":"When to Skip","text":"

    Not every session needs ceremonies.

    Skip /ctx-remember when:

    • You are doing a quick one-off lookup (reading a file, checking a value)
    • Context was already loaded this session via /ctx-agent
    • You are continuing immediately after a previous session and context is still fresh

    Skip /ctx-wrap-up when:

    • Nothing meaningful happened (only read files, answered a question)
    • You already persisted everything manually during the session
    • The session was trivial (typo fix, quick config change)

    A good heuristic: if the session produced something a future session should know about, run /ctx-wrap-up. If not, just close.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#quick-reference","level":2,"title":"Quick Reference","text":"
    # Session start\n/ctx-remember\n\n# ... do work ...\n\n# Session end\n/ctx-wrap-up\n

    That is the complete ceremony. Two commands, bookending your session.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#relationship-to-other-skills","level":2,"title":"Relationship to Other Skills","text":"Skill When Purpose /ctx-remember Session start Load and confirm context /ctx-reflect Mid-session breakpoints Checkpoint at milestones /ctx-wrap-up Session end Full session review and persist /ctx-commit After completing work Commit with context capture

    /ctx-reflect is for mid-session checkpoints. /ctx-wrap-up is for end-of-session: it is more thorough, covers the full session arc, and includes the commit offer. If you already ran /ctx-reflect recently, /ctx-wrap-up avoids proposing the same candidates again.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tips","level":2,"title":"Tips","text":"
    • Make it a habit: The value of ceremonies compounds over sessions. Each /ctx-wrap-up makes the next /ctx-remember richer.
    • Trust the candidates: The agent scans the full conversation. It often catches learnings you forgot about.
    • Edit before approving: If a proposed candidate is close but not quite right, tell the agent what to change. Do not settle for a vague learning when a precise one is possible.
    • Do not force empty ceremonies: If /ctx-wrap-up finds nothing worth persisting, that is fine. A session that only read files and answered questions does not need artificial learnings.
    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#next-up","level":2,"title":"Next Up","text":"

    Browsing and Enriching Past Sessions →: Export session history to a browsable journal and enrich entries with metadata.

    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: the full session workflow that ceremonies bookend
    • Persisting Decisions, Learnings, and Conventions: deep dive on what gets persisted during wrap-up
    • Detecting and Fixing Drift: keeping context files accurate between ceremonies
    • Pausing Context Hooks: skip ceremonies entirely for quick tasks that don't need them
    ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-changes/","level":1,"title":"Reviewing Session Changes","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-changed-while-you-were-away","level":2,"title":"What Changed While You Were Away?","text":"

    Between sessions, teammates commit code, context files get updated, and decisions pile up. ctx change gives you a single-command summary of everything that moved since your last session.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#quick-start","level":2,"title":"Quick Start","text":"
    # Auto-detects your last session and shows what changed\nctx change\n\n# Check what changed in the last 48 hours\nctx change --since 48h\n\n# Check since a specific date\nctx change --since 2026-03-10\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#how-reference-time-works","level":2,"title":"How Reference Time Works","text":"

    ctx change needs a reference point to compare against. It tries these sources in order:

    1. --since flag: explicit duration (24h, 72h) or date (2026-03-10, RFC3339 timestamp)
    2. Session markers: ctx-loaded-* files in .context/state/; picks the second-most-recent (your previous session start)
    3. Event log: last context-load-gate event from .context/state/events.jsonl
    4. Fallback: 24 hours ago

    The marker-based detection means ctx change usually just works without any flags: it knows when you last loaded context and shows everything after that.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-it-reports","level":2,"title":"What It Reports","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#context-file-changes","level":3,"title":"Context File Changes","text":"

    Any .md file in .context/ modified after the reference time:

    ### Context File Changes\n- `TASKS.md` - modified 2026-03-11 14:30\n- `DECISIONS.md` - modified 2026-03-11 09:15\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#code-changes","level":3,"title":"Code Changes","text":"

    Git activity since the reference time:

    ### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#integrating-into-session-start","level":2,"title":"Integrating into Session Start","text":"

    Pair ctx change with the /ctx-remember ceremony for a complete session-start picture:

    # 1. Load context (this also creates the session marker)\nctx agent --budget 4000\n\n# 2. See what changed since your last session\nctx change\n

    Or script it:

    # .context/hooks/session-start.sh\nctx agent --budget 4000\necho \"---\"\nctx change\n
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#team-workflows","level":2,"title":"Team Workflows","text":"

    When multiple people share a .context/ directory, ctx change shows who changed what:

    # After pulling from remote\ngit pull\nctx change --since 72h\n

    This surfaces context file changes from teammates that you might otherwise miss in the commit log.

    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#tips","level":2,"title":"Tips","text":"
    • No changes? If nothing shows up, the reference time might be wrong. Use --since 48h to widen the window.
    • Works without git. Context file changes are detected by filesystem mtime, not git. Code changes require git.
    • Hook integration. The context-load-gate hook writes the session marker that ctx change uses for auto-detection. If you're not using the ctx plugin, markers won't exist and it falls back to the event log or 24h window.
    ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-lifecycle/","level":1,"title":"The Complete Session","text":"","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-problem","level":2,"title":"The Problem","text":"

    \"What does a full ctx session look like from start to finish?\"

    You have ctx installed and your .context/ directory initialized, but the individual commands and skills feel disconnected.

    How do they fit together into a coherent workflow?

    This recipe walks through a complete session, from opening your editor to persisting context before you close it, so you can see how each piece connects.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tldr","level":2,"title":"TL;DR","text":"
    1. Load: /ctx-remember: load context, get structured readback.
    2. Orient: /ctx-status: check file health and token usage.
    3. Pick: /ctx-next: choose what to work on.
    4. Work: implement, test, iterate.
    5. Commit: /ctx-commit: commit and capture decisions/learnings.
    6. Reflect: /ctx-reflect: identify what to persist (at milestones)
    7. Wrap up: /ctx-wrap-up: end-of-session ceremony.

    Read on for the full walkthrough with examples.

    What Is a Readback?

    A readback is a structured summary where the agent plays back what it knows:

    • last session,
    • active tasks,
    • recent decisions.

    This way, you can confirm it loaded the right context.

    The term \"readback\" comes from aviation, where pilots repeat instructions back to air traffic control to confirm they heard correctly.

    Same idea in ctx: The agent tells you what it \"thinks\" is going on, and you correct anything that's off before the work begins.

    • Last session: topic, date, what was accomplished
    • Active work: pending and in-progress tasks
    • Recent context: 1-2 decisions or learnings that matter now
    • Next step: suggestion or question about what to focus on
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx status CLI command Quick health check on context files ctx agent CLI command Load token-budgeted context packet ctx journal source CLI command List previous sessions ctx journal source --show CLI command Inspect a specific session in detail /ctx-remember Skill Recall project context with structured readback /ctx-agent Skill Load full context packet inside the assistant /ctx-status Skill Show context summary with commentary /ctx-next Skill Suggest what to work on with rationale /ctx-commit Skill Commit code and prompt for context capture /ctx-reflect Skill Structured reflection checkpoint /ctx-history Skill Browse session history inside your AI assistant","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-workflow","level":2,"title":"The Workflow","text":"

    The session lifecycle has seven steps. You will not always use every step (for example, a quick bugfix might skip reflection, and a research session might skip committing), but the full arc looks like this:

    Load context > Orient > Pick a Task > Work > Commit > Reflect

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-1-load-context","level":3,"title":"Step 1: Load Context","text":"

    Start every session by loading what you know. The fastest way is a single prompt:

    Do you remember what we were working on?\n

    This triggers the /ctx-remember skill. Behind the scenes, the assistant runs ctx agent --budget 4000, reads the files listed in the context packet (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md), checks ctx journal source --limit 3 for recent sessions, and then presents a structured readback.

    The readback should feel like a recall, not a file system tour. If you see \"Let me check if there are files...\" instead of a confident summary, the context system is not loaded properly.

    As an alternative, if you want raw data instead of a readback, run ctx status in your terminal or invoke /ctx-status for a summarized health check showing file counts, token usage, and recent activity.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-2-orient","level":3,"title":"Step 2: Orient","text":"

    After loading context, verify you understand the current state.

    /ctx-status\n

    The status output shows which context files are populated, how many tokens they consume, and which files were recently modified. Look for:

    • Empty core files: TASKS.md or CONVENTIONS.md with no content means the context is sparse
    • High token count (over 30k): the context is bloated and might need ctx compact
    • No recent activity: files may be stale and need updating

    If the status looks healthy and the readback from Step 1 gave you enough context, skip ahead.

    If something seems off (stale tasks, missing decisions...), spend a minute reading the relevant file before proceeding.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

    With context loaded, choose a task. You can pick one yourself, or ask the assistant to recommend:

    /ctx-next\n

    The skill reads TASKS.md, checks recent sessions to avoid re-suggesting completed work, and presents 1-3 ranked recommendations with rationale.

    It prioritizes in-progress tasks over new starts (finishing is better than starting), respects explicit priority tags, and favors momentum: continuing a thread from a recent session is cheaper than context-switching.

    If you already know what you want to work on, state it directly:

    Let's work on the session enrichment feature.\n
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-4-do-the-work","level":3,"title":"Step 4: Do the Work","text":"

    This is the main body of the session: write code, fix bugs, refactor, research: whatever the task requires.

    During this phase, a few ctx-specific patterns help:

    Check decisions before choosing: when you face a design choice, check if a prior decision covers it.

    Is this consistent with our decisions?\n

    Constrain scope: keep the assistant focused on the task at hand.

    Only change files in internal/cli/session/. Nothing else.\n

    Use /ctx-implement for multistep plans: if the task has multiple steps, this skill executes them one at a time with build/test verification between each step.

    Context monitoring runs automatically: the check-context-size hook monitors context capacity at adaptive intervals. Early in a session it stays silent. After 16+ prompts it starts monitoring, and past 30 prompts it checks frequently. If context capacity is running high, it will suggest saving unsaved work. No manual invocation is needed.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-5-commit-with-context","level":3,"title":"Step 5: Commit with Context","text":"

    When the work is ready, use the context-aware commit instead of raw git commit:

    /ctx-commit\n

    The Agent May Recommend Committing

    You do not always need to invoke /ctx-commit explicitly.

    After a commit, the agent may proactively offer to capture context:

    \"We just made a trade-off there. Want me to record it as a decision?\"

    This is normal: The Agent Playbook encourages persisting at milestones, and a commit is a natural milestone.

    As an alternative, you can ask the assistant \"can we commit this?\" and it will pick up the /ctx-commit skill for you.

    The skill runs a pre-commit build check (for Go projects, go build), reviews the staged changes, drafts a commit message focused on \"why\" rather than \"what\", and then commits.

    After the commit succeeds, it prompts you:

    **Any context to capture?**\n\n- **Decision**: Did you make a design choice or trade-off?\n- **Learning**: Did you hit a gotcha or discover something?\n- **Neither**: No context to capture; we are done.\n

    If you made a decision, the skill records it with ctx add decision. If you learned something, it records it with ctx add learning including context, lesson, and application fields. This is the bridge between committing code and remembering why the code looks the way it does.

    If source code changed in areas that affect documentation, the skill also offers to check for doc drift.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-6-reflect","level":3,"title":"Step 6: Reflect","text":"

    At natural breakpoints (after finishing a feature, resolving a complex bug, or before switching tasks) pause to reflect:

    /ctx-reflect\n

    Agents Reflect at Milestones

    Agents often reflect without explicit invocation.

    After completing a significant piece of work, the agent may naturally surface items worth persisting:

    \"We discovered that $PPID resolves differently inside hooks. Should I save that as a learning?\"

    This is the agent following the Work-Reflect-Persist cycle from the Agent Playbook.

    You do not need to say /ctx-reflect for this to happen; the agent treats milestones as reflection triggers on its own.

    The skill works through a checklist: learnings discovered, decisions made, tasks completed or created, and whether there are items worth persisting. It then presents a summary with specific items to persist, each with the exact command to run:

    I would suggest persisting:\n\n- **Learning**: `$PPID` in PreToolUse hooks resolves to the Claude Code PID\n  `ctx add learning --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Task**: mark \"Add cooldown to ctx agent\" as done\n- **Decision**: tombstone-based cooldown with 10m default\n  `ctx add decision \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

    The skill asks before persisting anything. You choose what to keep.

    Not every commit needs reflection. A typo fix does not. But when you have been debugging for an hour and finally understand the root cause, that is worth a reflection checkpoint.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-7-persist-before-ending","level":3,"title":"Step 7: Persist Before Ending","text":"

    Before ending the session, run the wrap-up ceremony to capture outstanding learnings, decisions, conventions, and tasks:

    /ctx-wrap-up\n

    Ceremony Skills

    /ctx-remember at session start and /ctx-wrap-up at session end are ceremony skills: Invoke them as explicit slash commands for completeness, not conversationally.

    See Session Ceremonies for details on why these two skills are the exception to the conversational rule.

    The skill gathers signal from git diff, recent commits, and the conversation, then proposes structured candidates for your approval. After you select which to keep, it persists them via ctx add and offers /ctx-commit if uncommitted changes remain.

    Session transcripts are automatically captured by Claude Code and can be browsed later with ctx journal source and ctx journal source --show.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#conversational-session-flow","level":2,"title":"Conversational Session Flow","text":"

    The steps above describe the mechanics. In practice, a session with a ctx-aware agent feels more like a conversation than a command sequence.

    The agent knows the lifecycle and moves through it naturally, often without explicit invocation. Here is a realistic example:

    You: Do you remember what we were working on?

    Agent (loads context silently, presents structured readback):

    Last session (Feb 7, caching-layer): we implemented the in-memory cache with\nTTL expiration and wrote tests for the eviction policy. \n\nYou have 3 pending tasks; the biggest one is adding Redis as a backend.\n\nRecent decision:<br>\nwe chose LRU over LFU because the access pattern is mostly recency-based.\n\nWant to continue with the Redis backend, or tackle something else?\n

    You: Let's work on the Redis caching backend.

    Agent (picks up the task, begins implementation):

    Got it. I see the task in TASKS.md: \"Add Redis backend for cache layer.\"\n\nI will start with the connection pool setup. Only touching\n`internal/cache/redis/` for now.\n

    (The agent works... writes code, runs tests, iterates.)

    Agent (after implementing the connection pool and choosing a retry strategy):

    The connection pool is working. I went with exponential backoff with jitter\nfor retries instead of fixed intervals; it handles thundering herd better\nunder reconnection storms. That is a design choice worth recording. \n\nWant me to save it as a decision?\n

    You: Yes, go ahead.

    Agent:

    Done. Added to DECISIONS.md: \"Exponential backoff with jitter for Redis\nreconnection, to avoid thundering herd on pool recovery.\"\n

    (More work happens... the agent implements the cache read/write paths.)

    You: What else did we learn during this session?

    Agent:

    A few things worth capturing:\n\n* **Learning**: Redis `SET` with `NX` and `PX` flags handles atomic\n  set-if-not-exists with TTL in a single call. No need for a separate\n  `EXPIRE` command. We initially did it in two calls and hit a race condition.\n* **Learning**: The `go-redis` library's `PoolSize` default is 10 per CPU.\n  For our workload, explicit sizing at 20 was more predictable.\n* **Task update**: The connection pool subtask is done; the read/write path\n  is implemented but needs integration tests.\n\nWant me to persist these?\n

    You: Yes. And let's wrap up.

    Agent (reflects and persists... all in one flow):

    Persisted:\n\n* 2 learnings added to LEARNINGS.md\n* Marked \"connection pool setup\" done in TASKS.md\n* Added \"write integration tests for Redis cache\" as a new task\n\nContext is up to date. You are good to close.\n

    Notice What Happened

    In the above workflow, the user never typed /ctx-reflect or ctx add learning.

    The agent moved through Load, Orient, Pick, Work, Commit, and Reflect driven by natural conversation.

    \"Let's wrap up\" was enough to trigger the full reflect-and-persist flow.

    The agent surfaced persist-worthy items at milestones: after a design choice, after discovering a gotcha: without waiting to be asked.

    This is the intended experience.

    The commands and skills still exist for when you want precise control, but the agent is a proactive partner in the lifecycle, not a passive executor of slash commands.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    Quick-reference checklist for a complete session:

    • Load: /ctx-remember: load context and confirm readback
    • Orient: /ctx-status: check file health and token usage
    • Pick: /ctx-next: choose what to work on
    • Work: implement, test, iterate (scope with \"only change X\")
    • Commit: /ctx-commit: commit and capture decisions/learnings
    • Reflect: /ctx-reflect: identify what to persist (at milestones)
    • Wrap up: /ctx-wrap-up: end-of-session ceremony

    Conversational equivalents: you can drive the same lifecycle with plain language:

    Step Slash command Natural language Load /ctx-remember \"Do you remember?\" / \"What were we working on?\" Orient /ctx-status \"How's our context looking?\" Pick /ctx-next \"What should we work on?\" / \"Let's do the caching task\" Work -- \"Only change files in internal/cache/\" Commit /ctx-commit \"Commit this\" / \"Ship it\" Reflect /ctx-reflect \"What did we learn?\" / (agent offers at milestones) Wrap up /ctx-wrap-up (use the slash command for completeness)

    The agent understands both columns.

    In practice, most sessions use a mix:

    • Explicit Commands when you want precision;
    • Natural Language when you want flow and agentic autonomy.

    The agent will also initiate steps on its own (particularly \"Reflect\") when it recognizes a milestone.

    Short sessions (quick bugfix) might only use: Load, Work, Commit.

    Long sessions should Reflect after each major milestone and persist learnings and decisions before ending.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tips","level":2,"title":"Tips","text":"

    Persist early if context is running low. A hook monitors context capacity and notifies you when it gets high, but do not wait for the notification. If you have been working for a while and have unpersisted learnings, persist proactively.

    Browse previous sessions by topic. If you need context from a prior session, ctx journal source --show auth will match by keyword. You do not need to remember the exact date or slug.

    Reflection is optional but valuable. You can skip /ctx-reflect for small changes, but always persist learnings and decisions before ending a session where you did meaningful work. These are what the next session loads.

    Let the hook handle context loading. The PreToolUse hook runs ctx agent automatically with a cooldown, so context loads on first tool use without you asking. The /ctx-remember prompt at session start is for your benefit (to get a readback), not because the assistant needs it.

    The agent is a proactive partner, not a passive tool. A ctx-aware agent follows the Agent Playbook: it watches for milestones (completed tasks, design decisions, discovered gotchas) and offers to persist them without being asked. If you finish a tricky debugging session, it may say \"That root cause is worth saving as a learning. Want me to record it?\" before you think to ask. This is by design.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#next-up","level":2,"title":"Next Up","text":"

    Session Ceremonies →: The two bookend rituals for every session: /ctx-remember at the start, /ctx-wrap-up at the end.

    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#see-also","level":2,"title":"See Also","text":"
    • Session Ceremonies: why /ctx-remember and /ctx-wrap-up are explicit slash commands, not conversational
    • CLI Reference: full documentation for all ctx commands
    • Prompting Guide: effective prompts for ctx-enabled projects
    • Tracking Work Across Sessions: deep dive on task management
    • Persisting Decisions, Learnings, and Conventions: deep dive on knowledge capture
    • Detecting and Fixing Drift: keeping context files accurate
    • Pausing Context Hooks: shortcut the full lifecycle for quick tasks that don't need ceremony overhead
    ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-pause/","level":1,"title":"Pausing Context Hooks","text":"","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#the-problem","level":2,"title":"The Problem","text":"

    Not every session needs the full ceremony. Quick investigations, one-off questions, small fixes unrelated to active project work: These tasks don't benefit from persistence nudges, ceremony reminders, or knowledge checks. Every hook still fires, consuming tokens and attention on work that won't produce learnings or decisions worth capturing.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tldr","level":2,"title":"TL;DR","text":"Command What it does ctx hook pause or /ctx-pause Silence all nudge hooks for this session ctx hook resume or /ctx-resume Restore normal hook behavior

    Pause is session-scoped: It only affects the current session. Other sessions (same project, different terminal) are unaffected.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-gets-paused","level":2,"title":"What Gets Paused","text":"

    All nudge and reminder hooks go silent:

    • Context size checkpoints
    • Ceremony adoption nudges
    • Persistence reminders
    • Journal maintenance reminders
    • Knowledge growth nudges
    • Map staleness nudges
    • Version update nudges
    • Resource pressure warnings
    • QA reminders
    • Post-commit nudges
    • Specs nudges
    • Backup age warnings
    • Context load gate
    • Pending reminders relay
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-still-fires","level":2,"title":"What Still Fires","text":"

    Security hooks always run, even when paused:

    • block-non-path-ctx: prevents ./ctx invocations
    • block-dangerous-commands: blocks sudo, force push, etc.
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#workflow","level":2,"title":"Workflow","text":"
    # 1. Session starts: Context loads normally.\n\n# 2. You realize this is a quick task\nctx hook pause\n\n# 3. Work without interruption: hooks are silent\n\n# 4. Session evolves into real work? Resume first\nctx hook resume\n\n# 5. Now wrap up normally\n# /ctx-wrap-up\n
    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#graduated-reminder","level":2,"title":"Graduated Reminder","text":"

    Paused hooks aren't completely invisible. A minimal indicator appears so you always know the state:

    Paused turns What you see 1-5 ctx:paused 6+ ctx:paused (N turns): resume with /ctx-resume

    This prevents the \"forgot I paused\" problem during long sessions.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tips","level":2,"title":"Tips","text":"
    • Resume before wrapping up. If your quick task turns into real work, resume hooks before running /ctx-wrap-up. The wrap-up ceremony needs active hooks to capture learnings properly.

    • Initial context load is unaffected. The ~8k token startup injection (CLAUDE.md, playbook, constitution) happens before any command runs. Pause only affects hooks that fire during the session.

    • Use for quick investigations. Debugging a stack trace? Checking a git log? Answering a colleague's question? Pause, do the work, close the session. No ceremony needed.

    • Don't use for real work. If you're implementing features, fixing bugs, or making decisions: keep hooks active. The nudges exist to prevent context loss.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#see-also","level":2,"title":"See Also","text":"

    See also: Session Ceremonies: the bookend rituals that pause lets you skip when they aren't needed.

    See also: Customizing Hook Messages: if you want to change what hooks say rather than silencing them entirely.

    See also: The Complete Session: the full session workflow that pause shortcuts for quick tasks.

    ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-reminders/","level":1,"title":"Session Reminders","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-problem","level":2,"title":"The Problem","text":"

    You're deep in a session and realize: \"I need to refactor the swagger definitions next time.\" You could add a task, but this isn't a work item: it's a note to future-you. You could jot it on the scratchpad, but scratchpad entries don't announce themselves.

    How do you leave a message that your next session opens with?

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tldr","level":2,"title":"TL;DR","text":"
    ctx remind \"refactor the swagger definitions\"\nctx remind list\nctx remind dismiss 1       # or batch: ctx remind dismiss 1 3-5\n

    Reminders surface automatically at session start: VERBATIM, every session, until you dismiss them.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx remind CLI command Add a reminder (default action) ctx remind list CLI command Show all pending reminders ctx remind dismiss CLI command Remove a reminder by ID (or --all) /ctx-remind Skill Natural language interface to reminders","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-1-leave-a-reminder","level":3,"title":"Step 1: Leave a Reminder","text":"

    Tell your agent what to remember, or run it directly:

    You: \"remind me to refactor the swagger definitions\"\n\nAgent: [runs ctx remind \"refactor the swagger definitions\"]\n       \"Reminder set:\n         + [1] refactor the swagger definitions\"\n

    Or from the terminal:

    ctx remind \"refactor the swagger definitions\"\n
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-2-set-a-date-gate-optional","level":3,"title":"Step 2: Set a Date Gate (Optional)","text":"

    If the reminder shouldn't fire until a specific date:

    You: \"remind me to check the deploy logs after Tuesday\"\n\nAgent: [runs ctx remind \"check the deploy logs\" --after 2026-02-25]\n       \"Reminder set:\n         + [2] check the deploy logs  (after 2026-02-25)\"\n

    The reminder stays silent until that date, then fires every session.

    The agent converts natural language dates (\"tomorrow\", \"next week\", \"after the release on Friday\") to YYYY-MM-DD. If it's ambiguous, it asks.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-3-start-a-new-session","level":3,"title":"Step 3: Start a New Session","text":"

    Next session, the reminder appears automatically before anything else:

    ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n

    No action needed: The check-reminders hook fires on UserPromptSubmit and the agent relays the box verbatim.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-4-dismiss-when-done","level":3,"title":"Step 4: Dismiss When Done","text":"

    After you've acted on a reminder (or decided to skip it):

    You: \"dismiss reminder 1\"\n\nAgent: [runs ctx remind dismiss 1]\n       \"Dismissed:\n         - [1] refactor the swagger definitions\"\n\n# Batch dismiss also works:\n# \"dismiss reminders 3, 5 through 7\"\n# → ctx remind dismiss 3 5-7\n

    Or clear everything:

    ctx remind dismiss --all\n
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-5-check-whats-pending","level":3,"title":"Step 5: Check What's Pending","text":"
    ctx remind list\n
      [1] refactor the swagger definitions\n  [3] review auth token expiry logic\n  [4] check deploy logs  (after 2026-02-25, not yet due)\n

    Date-gated reminders that haven't reached their date show (not yet due).

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#using-ctx-remind-in-a-session","level":2,"title":"Using /ctx-remind in a Session","text":"

    Invoke the /ctx-remind skill, then describe what you want:

    You: /ctx-remind remind me to update the API docs\nYou: /ctx-remind what reminders do I have?\nYou: /ctx-remind dismiss reminder 3\n
    You say (after /ctx-remind) What the agent does \"remind me to update the API docs\" ctx remind \"update the API docs\" \"remind me next week to check staging\" ctx remind \"check staging\" --after 2026-03-02 \"what reminders do I have?\" ctx remind list \"dismiss reminder 3\" ctx remind dismiss 3 \"dismiss reminders 3, 5 through 7\" ctx remind dismiss 3 5-7 \"clear all reminders\" ctx remind dismiss --all","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#reminders-vs-scratchpad-vs-tasks","level":2,"title":"Reminders vs Scratchpad vs Tasks","text":"You want to... Use Leave a note that announces itself next session ctx remind Jot down a quick value or sensitive token ctx pad Track work with status and completion TASKS.md Record a decision or lesson for all sessions Context files

    Decision guide:

    • If it should announce itself at session start → ctx remind
    • If it's a quiet note you'll check manually → ctx pad
    • If it's a work item you'll mark done → TASKS.md

    Reminders Are Sticky Notes, Not Tasks

    A reminder has no status, no priority, no lifecycle. It's a message to \"future you\" that fires until dismissed.

    If you need tracking, use a task in TASKS.md.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tips","level":2,"title":"Tips","text":"
    • Reminders fire every session: Unlike nudges (which throttle to once per day), reminders repeat until you dismiss them. This is intentional: You asked to be reminded.
    • Date gating is session-scoped, not clock-scoped: --after 2026-02-25 means \"don't show until sessions on or after Feb 25.\" It does not mean \"alarm at midnight on Feb 25.\"
    • The agent handles date parsing: Say \"next week\" or \"after Friday\": The agent converts it to YYYY-MM-DD. The CLI only accepts the explicit date format.
    • Reminders are committed to git: They travel with the repo. If you switch machines, your reminders follow.
    • IDs never reuse: After dismissing reminder 3, the next reminder gets ID 4 (or higher). No confusion from recycled numbers.
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#next-up","level":2,"title":"Next Up","text":"

    Using the Scratchpad →: For quiet notes and sensitive values that don't need session-start announcements.

    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#see-also","level":2,"title":"See Also","text":"
    • CLI Reference: ctx remind: full command syntax and flags
    • The Complete Session: how reminders fit into the session lifecycle
    • Managing Tasks: for work items that need status tracking
    ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/state-maintenance/","level":1,"title":"State Directory Maintenance","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-problem","level":2,"title":"The Problem","text":"

    Every session creates tombstone files in .context/state/ - small markers that suppress repeat hook nudges (\"already checked context size\", \"already sent persistence reminder\"). Over days and weeks, these accumulate into hundreds of files from long-dead sessions.

    The files are harmless individually, but the clutter makes it harder to reason about state, and stale global tombstones can suppress nudges across sessions entirely.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tldr","level":2,"title":"TL;DR","text":"
    ctx prune --dry-run     # preview what would be removed\nctx prune               # prune files older than 7 days\nctx prune --days 1      # more aggressive: keep only today\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx prune Command Remove old per-session state files ctx status Command Quick health overview including state dir","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#understanding-state-files","level":2,"title":"Understanding State Files","text":"

    State files fall into two categories:

    Session-scoped (contain a UUID in the filename): Created per-session to suppress repeat nudges. Safe to prune once the session ends. Examples:

    context-check-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\nheartbeat-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\npersistence-nudge-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\n

    Global (no UUID): Persist across sessions. ctx prune preserves these automatically. Some are legitimate state (events.jsonl, memory-import.json); others may be stale tombstones that need manual review.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-workflow","level":2,"title":"The Workflow","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-1-preview","level":3,"title":"Step 1: Preview","text":"

    Always dry-run first to see what would be removed:

    ctx prune --dry-run\n

    The output shows each file, its age, and a summary:

      would prune: context-check-abc123... (age: 3d)\n  would prune: heartbeat-abc123... (age: 3d)\n\nDry run - would prune 150 files (skip 70 recent, preserve 14 global)\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-2-prune","level":3,"title":"Step 2: Prune","text":"

    Choose an age threshold. The default is 7 days:

    ctx prune               # older than 7 days\nctx prune --days 3      # older than 3 days\nctx prune --days 1      # older than 1 day (aggressive)\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-3-review-global-files","level":3,"title":"Step 3: Review Global Files","text":"

    After pruning, check what prune preserved:

    ls .context/state/ | grep -v '[0-9a-f]\\{8\\}-[0-9a-f]\\{4\\}'\n

    Legitimate global files (keep):

    • events.jsonl - event log
    • memory-import.json - import tracking state

    Stale global tombstones (safe to delete):

    • Files like backup-reminded, ceremony-reminded, version-checked with no session UUID are one-shot markers. If they are from a previous session, they are stale and can be removed manually.
    rm .context/state/backup-reminded .context/state/ceremony-reminded\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-4-verify","level":3,"title":"Step 4: Verify","text":"
    ls .context/state/ | wc -l    # should be manageable\n
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#when-to-prune","level":2,"title":"When to Prune","text":"
    • Weekly: ctx prune with default 7-day threshold
    • After heavy parallel work: Multiple concurrent sessions create many tombstones. Prune with --days 1 afterward.
    • When state directory exceeds ~100 files: A sign that pruning hasn't run recently
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tips","level":2,"title":"Tips","text":"

    Pruning active sessions is safe but noisy: If you prune a file belonging to a still-running session, the corresponding hook will re-fire its nudge on the next prompt. Minor UX annoyance, not data loss.

    No context files are stored in state: The state directory contains only tombstones, counters, and diagnostic data. Nothing in .context/state/ affects your decisions, learnings, tasks, or conventions.

    Test artifacts sneak in: Files like context-check-statstest or heartbeat-unknown are artifacts from development or testing. They lack UUIDs so prune preserves them. Delete manually.

    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#see-also","level":2,"title":"See Also","text":"
    • Detecting and Fixing Drift: broader context maintenance including drift detection and archival
    • Troubleshooting: diagnostic workflow using ctx doctor and event logs
    • CLI Reference: system: full flag documentation for ctx prune and related commands
    ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/steering/","level":1,"title":"Writing Steering Files","text":"","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#writing-steering-files","level":1,"title":"Writing Steering Files","text":"

    Steering files tell your AI assistant how to behave, not what was decided or how the codebase is written. This recipe walks through writing a steering file from scratch, validating which prompts will trigger it, and syncing it out to your configured AI tools.

    Before You Start

    If you're unsure whether a rule belongs in steering/, DECISIONS.md, or CONVENTIONS.md, read the \"Steering vs decisions vs conventions\" admonition on the ctx steering reference page. The short version: if the rule is \"the AI should always do X when asked about Y,\" that's steering. Otherwise it's probably a decision or convention.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#start-here-customize-the-foundation-files","level":2,"title":"Start Here: Customize the Foundation Files","text":"

    ctx init scaffolds four foundation steering files for you the first time you initialize a project:

    File Purpose .context/steering/product.md Product context, goals, target users .context/steering/tech.md Tech stack, constraints, key dependencies .context/steering/structure.md Directory layout, naming conventions .context/steering/workflow.md Branch strategy, commit rules, pre-commit

    Each file opens with an inline HTML comment that explains the three inclusion modes, what priority means, and the tools scope. The comment is invisible in rendered markdown but visible when you edit the file. Delete it once the file is yours.

    All four default to inclusion: always and priority: 10, so they fire on every AI tool call until you customize them. If you're reading this recipe and haven't touched them yet, open each one now and replace the placeholder bullet list with actual rules for your project. That's the highest-leverage five minutes you can spend in a new ctx setup.

    What to fill in, by file:

    product.md: The elevator pitch plus hard scope:

    • One-sentence product description.
    • Primary users and their top job-to-be-done.
    • Two or three \"this is explicitly out of scope\" items so the AI doesn't wander.

    tech.md: Technology and constraints:

    • Languages and versions (Go 1.22, Node 20, etc.).
    • Frameworks and key libraries.
    • Runtime and deployment target.
    • Hard constraints: \"no CGO\", \"no network at test time\", \"no external DB for unit tests\". These are the things that burn agents when they don't know them.

    structure.md: Layout and naming:

    • Top-level directories and their purpose.
    • Where new files should go (and where they should NOT).
    • Naming conventions for packages, files, types.

    workflow.md: Process rules:

    • Branch strategy (main-only, trunk-based, feature branches).
    • Commit message format, signed-off-by requirement.
    • Pre-commit and pre-push checks.
    • Review expectations.

    After editing, the next AI tool call in Claude Code will pick up the new rules automatically via the plugin's PreToolUse hook, with no sync step and no restart. Other tools (Cursor, Cline, Kiro) need ctx steering sync to export into their native format.

    Prefer a Bare .context/steering/ Directory?

    Re-run ctx init --no-steering-init and delete the scaffolded files. ctx init leaves existing files alone, so the flag is only needed if you want to opt out of the initial scaffold.

    The rest of this recipe walks through creating an additional, scenario-specific steering file beyond the four foundation defaults.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#scenario","level":2,"title":"Scenario","text":"

    You're working on a project with a strict input-validation policy: every new API handler must validate request bodies before touching the database. You want the AI to flag this concern automatically whenever it's asked to write an HTTP handler, without you having to remind it every session.

    Claude Code Users: Pick always, Not auto

    This walkthrough uses inclusion: auto because the scenario is a scoped rule that matches a specific kind of prompt. That works natively on Cursor, Cline, and Kiro (they resolve the description keyword match themselves).

    On Claude Code, auto does not fire through the plugin's PreToolUse hook. The hook passes an empty prompt to ctx agent, so only always files match. Claude can still reach an auto file by calling the ctx_steering_get MCP tool, but that requires Claude to decide to call it; there's no automatic injection.

    If Claude Code is your tool, set inclusion: always in Step 2 instead of auto. The rule will fire on every tool call regardless of topic. You may want to narrow the rule body so the extra tokens per turn aren't wasted on unrelated work.

    See the ctx steering reference \"Prefer inclusion: always for Claude Code\" section for the full trade-off.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-1-scaffold-the-file","level":2,"title":"Step 1: Scaffold the File","text":"
    ctx steering add api-validation\n

    That creates .context/steering/api-validation.md with default frontmatter:

    ---\nname: api-validation\ndescription:\ninclusion: manual\ntools: []\npriority: 50\n---\n

    The defaults are deliberately conservative: inclusion: manual means the file won't be applied until you opt in, which keeps the rules out of the prompt until you've reviewed them.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-2-fill-in-the-rule","level":2,"title":"Step 2: Fill in the Rule","text":"

    Open the file and write the rule body plus a focused description. The description is what inclusion: auto matches against later.

    ---\nname: api-validation\ndescription: HTTP handler input validation and request parsing\ninclusion: auto\ntools: []\npriority: 20\n---\n\n# API request validation\n\nEvery new HTTP handler MUST:\n\n1. Parse request bodies into typed structs, never `map[string]any`.\n2. Validate required fields before any database call.\n3. Return 400 with a machine-readable error for validation failures.\n4. Use `context.Context` from the request for all downstream calls.\n\nPrefer existing validation helpers in `internal/validate/`\nrather than inline checks.\n

    Notes on the choices:

    • inclusion: auto: this rule should fire automatically on HTTP-handler-shaped prompts, not always.
    • priority: 20: lower than the default, so this rule appears near the top of the prompt alongside other high-priority rules.
    • Description is keyword-rich (\"HTTP handler input validation and request parsing\"); the auto matcher scores prompts against these words.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-3-preview-which-prompts-match","level":2,"title":"Step 3: Preview Which Prompts Match","text":"

    Before committing the file, validate your description catches the prompts you care about:

    ctx steering preview \"add an endpoint for updating user email\"\n

    Expected output:

    Steering files matching prompt \"add an endpoint for updating user email\":\n  api-validation       inclusion=auto     priority=20  tools=all\n

    Good, the prompt matches. Try a negative case:

    ctx steering preview \"fix a bug in the JSON renderer\"\n

    Expected: empty match (or whatever else is currently auto). If api-validation incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-4-list-to-confirm-metadata","level":2,"title":"Step 4: List to Confirm Metadata","text":"
    ctx steering list\n

    Should show api-validation alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-5-get-the-rules-in-front-of-the-ai","level":2,"title":"Step 5: Get the Rules in Front of the AI","text":"

    Steering files are authored once in .context/steering/, but how they reach the AI depends on which tool you use. There are two delivery mechanisms:

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-a-native-rules-tools-cursor-cline-kiro","level":3,"title":"Path A: Native-Rules Tools (Cursor, Cline, Kiro)","text":"

    These tools read a specific directory for rules. ctx steering sync exports your files into that directory with tool-specific frontmatter:

    ctx steering sync\n

    Depending on the active tool in .ctxrc or --tool:

    Tool Target Cursor .cursor/rules/ Cline .clinerules/ Kiro .kiro/steering/

    The sync is idempotent; unchanged files are skipped. Run it whenever you edit a steering file.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-b-claude-code-and-codex-hook-mcp","level":3,"title":"Path B: Claude Code and Codex (Hook + MCP)","text":"

    Claude Code and Codex have no native rules primitive, so ctx steering sync is a no-op for them; it deliberately skips both. Instead, steering reaches these tools through two non-sync channels:

    1. PreToolUse hook (automatic). The ctx setup claude-code plugin installs a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them against the active prompt, and includes matching bodies as Tier 6 of the context packet. The packet gets injected into Claude's context automatically.

    2. ctx_steering_get MCP tool (on-demand). Claude can call this MCP tool mid-task to fetch matching steering files for a specific prompt. Automatic activation comes from Claude's judgment, not a hook.

    Both channels activate when you run:

    ctx setup claude-code --write\n

    That installs the plugin, wires the hook, and registers the MCP server. After that, steering files you edit are picked up on the next tool call, with no sync step needed.

    Running ctx steering sync with Claude Code

    It won't error; it will simply report that Claude and Codex aren't sync targets and skip them. If Claude Code is your only tool, you never need to run sync. If you use both Claude Code and (say) Cursor, run sync to keep Cursor up to date; the Claude pipeline takes care of itself via the hook.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-6-verify-the-ai-sees-it","level":2,"title":"Step 6: Verify the AI Sees It","text":"

    Open your AI tool and ask it something the rule should fire on:

    \"Add a POST /users endpoint that accepts email and name.\"

    If the rule is working, the AI's first response should mention input validation, typed structs, and the internal/validate/ package, because that's what the steering file told it to do.

    If nothing happens, the fix depends on which path you're on:

    Path A (Cursor/Cline/Kiro):

    1. Re-run ctx steering preview with the literal prompt to confirm the match.
    2. Run ctx steering list and verify inclusion is auto, not manual.
    3. Check the tool's own config directory (e.g. .cursor/rules/); the file should be there after ctx steering sync.

    Path B (Claude Code):

    1. Re-run ctx steering preview with the literal prompt to confirm the match.
    2. Verify the plugin is installed: cat .claude/hooks.json should include ctx agent --budget 8000 under PreToolUse. If not, re-run ctx setup claude-code --write.
    3. Run ctx agent --budget 8000 manually and grep the output for your rule body. If it's there, the data is fine; if it's missing, the inclusion mode or description is at fault.
    4. As a last resort, ask Claude directly: \"Call the ctx_steering_get MCP tool with my prompt and show me the result.\" If the MCP tool returns your rule, Claude has access but isn't pulling it into the initial context packet; tighten the description keywords.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    Too-generic descriptions. description: general coding will match almost every prompt and flood the context window. Keep descriptions specific to the scenario the rule applies to.

    Overlapping rules. If two steering files match the same prompt and contradict each other, the result is confusing. Use priority to resolve, but better: merge the files or narrow the descriptions so they don't overlap.

    Putting decisions in steering. \"We decided to use PostgreSQL\" is a decision, not a rule for the AI to follow on every prompt. Record decisions with ctx add decision, not ctx steering add.

    Committing inclusion: always without thinking. Rules marked always fire on every prompt, consuming tier-6 budget permanently. Only use always for true invariants (security, safety, licensing). Everything else should be auto or manual.

    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#see-also","level":2,"title":"See Also","text":"
    • ctx steering reference: full command, flag, and frontmatter reference.
    • ctx setup: configure which tools the steering sync writes to.
    • Authoring triggers: if you want script-based automation, not rule-based prompt injection.
    ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/system-hooks-audit/","level":1,"title":"Auditing System Hooks","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-problem","level":2,"title":"The Problem","text":"

    ctx runs 14 system hooks behind the scenes: nudging your agent to persist context, warning about resource pressure, gating commits on QA. But these hooks are invisible by design. You never see them fire. You never know if they stopped working.

    How do you verify your hooks are actually running, audit what they do, and get alerted when they go silent?

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tldr","level":2,"title":"TL;DR","text":"
    ctx system check-resources # run a hook manually\nls -la .context/logs/      # check hook execution logs\nctx hook notify setup      # get notified when hooks fire\n

    Or ask your agent: \"Are our hooks running?\"

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx system <hook> CLI command Run a system hook manually ctx sysinfo CLI command Show system resource status ctx usage CLI command Stream or dump per-session token stats ctx hook notify setup CLI command Configure webhook for audit trail ctx hook notify test CLI command Verify webhook delivery .ctxrcnotify.events Configuration Subscribe to relay for full hook audit .context/logs/ Log files Local hook execution ledger","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-are-system-hooks","level":2,"title":"What Are System Hooks?","text":"

    System hooks are plumbing commands that ctx registers with your AI tool (Claude Code, Cursor, etc.) via the plugin's hooks.json. They fire automatically at specific events during your AI session:

    Event When Hooks UserPromptSubmit Before the agent sees your prompt 10 check hooks + heartbeat PreToolUse Before the agent uses a tool block-non-path-ctx, qa-reminderPostToolUse After a tool call succeeds post-commit

    You never run these manually. Your AI tool runs them for you: That's the point.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-complete-hook-catalog","level":2,"title":"The Complete Hook Catalog","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#prompt-time-checks-userpromptsubmit","level":3,"title":"Prompt-Time Checks (UserPromptSubmit)","text":"

    These fire before every prompt, but most are throttled to avoid noise.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-context-size-context-capacity-warning","level":4,"title":"check-context-size: Context Capacity Warning","text":"

    What: Adaptive prompt counter. Silent for the first 15 prompts, then nudges with increasing frequency (every 5th, then every 3rd).

    Why: Long sessions lose coherence. The nudge reminds both you and the agent to persist context before the window fills up.

    Output: VERBATIM relay box with prompt count.

    ┌─ Context Checkpoint (prompt #20) ────────────────\n│ This session is getting deep. Consider wrapping up\n│ soon. If there are unsaved learnings, decisions, or\n│ conventions, now is a good time to persist them.\n│ ⏱ Context window: ~45k tokens (~22% of 200k)\n└──────────────────────────────────────────────────\n

    Usage: Every prompt records token usage to .context/state/stats-{session}.jsonl. Monitor live with ctx usage --follow or query with ctx usage --json. Usage is recorded even during wrap-up suppression (event: suppressed).

    Billing guard: When billing_token_warn is set in .ctxrc, a one-shot warning fires if session tokens exceed the threshold. This warning is independent of all other triggers - it fires even during wrap-up suppression.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-persistence-context-staleness-nudge","level":4,"title":"check-persistence: Context Staleness Nudge","text":"

    What: Tracks when .context/*.md files were last modified. If too many prompts pass without a write, nudges the agent to persist.

    Why: Sessions produce insights that evaporate if not recorded. This catches the \"we talked about it but never wrote it down\" failure mode.

    Output: VERBATIM relay after 20+ prompts without a context file change.

    ┌─ Persistence Checkpoint (prompt #20) ───────────\n│ No context files updated in 20+ prompts.\n│ Have you discovered learnings, made decisions,\n│ established conventions, or completed tasks\n│ worth persisting?\n│\n│ Run /ctx-wrap-up to capture session context.\n└──────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-ceremonies-session-ritual-adoption","level":4,"title":"check-ceremonies: Session Ritual Adoption","text":"

    What: Scans your last 3 journal entries for /ctx-remember and /ctx-wrap-up usage. Nudges once per day if missing.

    Why: Session ceremonies are the highest-leverage habit in ctx. This hook bootstraps the habit until it becomes automatic.

    Output: Tailored nudge depending on which ceremony is missing.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-journal-unimported-session-reminder","level":4,"title":"check-journal: Unimported Session Reminder","text":"

    What: Detects unimported Claude Code sessions and unenriched journal entries. Fires once per day.

    Why: Exported sessions become searchable history. Unenriched entries lack metadata for filtering. Both decay in value over time.

    Output: VERBATIM relay with counts and exact commands.

    ┌─ Journal Reminder ─────────────────────────────\n│ You have 3 new session(s) not yet exported.\n│ 5 existing entries need enrichment.\n│\n│ Export and enrich:\n│   ctx journal import --all\n│   /ctx-journal-enrich-all\n└────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-resources-system-resource-pressure","level":4,"title":"check-resources: System Resource Pressure","text":"

    What: Monitors memory, swap, disk, and CPU load. Only fires at DANGER severity (memory >= 90%, swap >= 75%, disk >= 95%, load >= 1.5x CPU count).

    Why: Resource exhaustion mid-session can corrupt work. This provides early warning to persist and exit.

    Output: VERBATIM relay listing critical resources.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-knowledge-knowledge-file-growth","level":4,"title":"check-knowledge: Knowledge File Growth","text":"

    What: Counts entries in LEARNINGS.md, DECISIONS.md, and lines in CONVENTIONS.md. Fires once per day when thresholds are exceeded.

    Why: Large knowledge files dilute agent context. 35 learnings compete for attention; 15 focused ones get applied. Thresholds are configurable in .ctxrc.

    Default thresholds:

    # .ctxrc\nentry_count_learnings: 30\nentry_count_decisions: 20\nconvention_line_count: 200\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-version-binaryplugin-version-drift","level":4,"title":"check-version: Binary/Plugin Version Drift","text":"

    What: Compares the ctx binary version against the plugin version. Fires once per day. Also checks encryption key age for rotation nudge.

    Why: Version drift means hooks reference features the binary doesn't have. The key rotation nudge prevents indefinite key reuse.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-reminders-pending-reminder-relay","level":4,"title":"check-reminders: Pending Reminder Relay","text":"

    What: Reads .context/reminders.json and surfaces any due reminders via VERBATIM relay. No throttle: fires every session until dismissed.

    Why: Reminders are sticky notes to future-you. Unlike nudges (which throttle to once per day), reminders repeat deliberately until the user dismisses them.

    Output: VERBATIM relay box listing due reminders.

    ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-freshness-technology-constant-staleness","level":4,"title":"check-freshness: Technology Constant Staleness","text":"

    What: Stats files listed in .ctxrc freshness_files and warns if any haven't been modified in over 6 months. Daily throttle. Silent when no files are configured (opt-in via .ctxrc).

    Why: Model capabilities evolve - token budgets, attention limits, and context window sizes that were accurate 6 months ago may no longer reflect best practices. This hook reminds you to review and touch the file to confirm values are still current.

    Config (.ctxrc):

    freshness_files:\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # optional\n

    Each entry has a path (relative to project root), desc (what constants live there), and optional review_url (where to check current values). When review_url is set, the nudge includes \"Review against: {url}\". When absent, just \"Touch the file to mark it as reviewed.\"

    Output: VERBATIM relay listing stale files, silent otherwise.

    ┌─ Technology Constants Stale ──────────────────────\n│   config/thresholds.yaml (210 days ago)\n│     - Model token limits and batch sizes\n│   Review against: https://docs.example.com/limits\n│ Touch each file to mark it as reviewed.\n└───────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-map-staleness-architecture-map-drift","level":4,"title":"check-map-staleness: Architecture Map Drift","text":"

    What: Checks whether map-tracking.json is older than 30 days and there are commits touching internal/ since the last map refresh. Daily throttle prevents repeated nudges.

    Why: Architecture documentation drifts silently as code evolves. This hook detects structural changes that the map hasn't caught up with and suggests running /ctx-architecture to refresh.

    Output: VERBATIM relay when stale and modules changed, silent otherwise.

    ┌─ Architecture Map Stale ────────────────────────────\n│ ARCHITECTURE.md hasn't been refreshed since 2026-01-15\n│ and there are commits touching 12 modules.\n│ /ctx-architecture keeps architecture docs drift-free.\n│\n│ Want me to run /ctx-architecture to refresh?\n└─────────────────────────────────────────────────────\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#heartbeat-session-heartbeat-webhook","level":4,"title":"heartbeat: Session Heartbeat Webhook","text":"

    What: Fires on every prompt. Sends a webhook notification with prompt count, session ID, context modification status, and token usage telemetry. Never produces stdout.

    Why: Other hooks only send webhooks when they \"speak\" (nudge/relay). When silent, you have no visibility into session activity. The heartbeat provides a continuous session-alive signal with token consumption data for observability dashboards or liveness monitoring.

    Output: None (webhook + event log only).

    Payload:

    {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  }\n}\n

    Token fields (tokens, context_window, usage_pct) are included when usage data is available from the session JSONL file.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tool-time-hooks-pretooluse-posttooluse","level":3,"title":"Tool-Time Hooks (PreToolUse / PostToolUse)","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#block-non-path-ctx-path-enforcement-hard-gate","level":4,"title":"block-non-path-ctx: PATH Enforcement (Hard Gate)","text":"

    What: Blocks any Bash command that invokes ./ctx, ./dist/ctx, go run ./cmd/ctx, or an absolute path to ctx. Only PATH invocations are allowed.

    Why: Enforces CONSTITUTION.md's invocation invariant. Running a dev-built binary in production context causes version confusion and silent behavior drift.

    Output: Block response (prevents the tool call):

    {\"decision\": \"block\", \"reason\": \"Use 'ctx' from PATH, not './ctx'...\"}\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#qa-reminder-pre-commit-qa-gate","level":4,"title":"qa-reminder: Pre-Commit QA Gate","text":"

    What: Fires on every Edit tool use. Reminds the agent to lint and test the entire project before committing.

    Why: Agents tend to \"I'll test later\" and then commit untested code. Repetition is intentional: the hook reinforces the habit on every edit, not just before commits.

    Output: Agent directive with hard QA gate instructions.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#post-commit-context-capture-after-commit","level":4,"title":"post-commit: Context Capture After Commit","text":"

    What: Fires after any git commit (excludes --amend). Prompts the agent to offer context capture (decision? learning?) and suggest running lints/tests before pushing.

    Why: Commits are natural reflection points. The nudge converts mechanical git operations into context-capturing opportunities.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-the-local-event-log","level":2,"title":"Auditing Hooks via the Local Event Log","text":"

    If you don't need an external audit trail, enable the local event log for a self-contained record of hook activity:

    # .ctxrc\nevent_log: true\n

    Once enabled, every hook that fires writes an entry to .context/state/events.jsonl. Query it with ctx hook event:

    ctx hook event                    # last 50 events\nctx hook event --hook qa-reminder # filter by hook\nctx hook event --session <id>     # filter by session\nctx hook event --json | jq '.'    # raw JSONL for processing\n

    The event log is local, queryable, and doesn't require any external service. For a full diagnostic workflow combining event logs with structural health checks, see Troubleshooting.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-webhooks","level":2,"title":"Auditing Hooks via Webhooks","text":"

    The most powerful audit setup pipes all hook output to a webhook, giving you a real-time external record of what your agent is being told.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-1-set-up-the-webhook","level":3,"title":"Step 1: Set Up the Webhook","text":"
    ctx hook notify setup\n# Enter your webhook URL (Slack, Discord, ntfy.sh, IFTTT, etc.)\n

    See Webhook Notifications for service-specific setup.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-2-subscribe-to-relay-events","level":3,"title":"Step 2: Subscribe to relay Events","text":"
    # .ctxrc\nnotify:\n  events:\n    - relay   # all hook output: VERBATIM relays, directives, blocks\n    - nudge   # just the user-facing VERBATIM relays\n

    The relay event fires for every hook that produces output. This includes:

    Hook Event sent check-context-sizerelay + nudgecheck-persistencerelay + nudgecheck-ceremoniesrelay + nudgecheck-journalrelay + nudgecheck-resourcesrelay + nudgecheck-knowledgerelay + nudgecheck-versionrelay + nudgecheck-remindersrelay + nudgecheck-freshnessrelay + nudgecheck-map-stalenessrelay + nudgeheartbeatheartbeat only block-non-path-ctxrelay only post-commitrelay only qa-reminderrelay only","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-3-cross-reference","level":3,"title":"Step 3: Cross-Reference","text":"

    With relay enabled, your webhook receives a JSON payload every time a hook fires:

    {\n  \"event\": \"relay\",\n  \"message\": \"check-persistence: No context updated in 20+ prompts\",\n  \"session_id\": \"b854bd9c\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"my-project\"\n}\n

    This creates an external audit trail independent of the agent. You can now cross-verify: did the agent actually relay the checkpoint the hook told it to relay?

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#verifying-hooks-actually-fire","level":2,"title":"Verifying Hooks Actually Fire","text":"

    Hooks are invisible. An invisible thing that breaks is indistinguishable from an invisible thing that never existed. Three verification methods, from simplest to most robust:

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-1-ask-the-agent","level":3,"title":"Method 1: Ask the Agent","text":"

    The simplest check. After a few prompts into a session:

    \"Did you receive any hook output this session? Print the last\ncontext checkpoint or persistence nudge you saw.\"\n

    The agent should be able to recall recent hook output from its context window. If it says \"I haven't received any hook output\", either:

    • The hooks aren't firing (check installation);
    • The session is too short (hooks throttle early);
    • The hooks fired but the agent absorbed them silently.

    Limitation: You are trusting the agent to report accurately. Agents sometimes confabulate or miss context. Use this as a quick smoke test, not definitive proof.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-2-check-the-webhook-trail","level":3,"title":"Method 2: Check the Webhook Trail","text":"

    If you have relay events enabled, check your webhook receiver. Every hook that fires sends a timestamped notification. No notification = no fire.

    This is the ground truth. The webhook is called directly by the ctx binary, not by the agent. The agent cannot fake, suppress, or modify webhook deliveries.

    Compare what the webhook received against what the agent claims to have relayed. Discrepancies mean the agent is absorbing nudges instead of surfacing them.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-3-read-the-local-logs","level":3,"title":"Method 3: Read the Local Logs","text":"

    Hooks that support logging write to .context/logs/:

    # Check context-size hook activity\ncat .context/logs/check-context-size.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] prompt#1 silent\n# [2026-02-22 09:17:33] [session:b854bd9c] prompt#16 CHECKPOINT\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 CHECKPOINT\n
    # Check persistence nudge activity\ncat .context/logs/check-persistence.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] init count=1 mtime=1770646611\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 NUDGE since_nudge=20\n

    Logs are append-only and written by the ctx binary, not the agent.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#detecting-silent-hook-failures","level":2,"title":"Detecting Silent Hook Failures","text":"

    The hardest failure mode: hooks that stop firing without error. The plugin config changes, a binary update drops a hook, or a PATH issue silently breaks execution. Nothing errors: The hook just never runs.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-staleness-signal","level":3,"title":"The Staleness Signal","text":"

    If .context/logs/check-context-size.log has no entries newer than 5 days but you've been running sessions daily, something is wrong. The absence of evidence is evidence of absence: but only if you control for inactivity.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#false-positive-protection","level":3,"title":"False Positive Protection","text":"

    A naive \"hooks haven't fired in N days\" alert fires incorrectly when you simply haven't used ctx. The correct check needs two inputs:

    1. Last hook fire time: from .context/logs/ or webhook history
    2. Last session activity: from journal entries or ctx journal source

    If sessions are happening but hooks aren't firing, that's a real problem. If neither sessions nor hooks are happening, that's a vacation.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-to-check","level":3,"title":"What to Check","text":"

    When you suspect hooks aren't firing:

    # 1. Verify the plugin is installed\nls ~/.claude/plugins/\n\n# 2. Check hook registration\ncat ~/.claude/plugins/ctx/hooks.json | head -20\n\n# 3. Run a hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-context-size\n\n# 4. Check for PATH issues\nwhich ctx\nctx --version\n
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tips","level":2,"title":"Tips","text":"
    • Start with nudge, graduate to relay: The nudge event covers user-facing VERBATIM relays. Add relay when you want full visibility into agent directives and hard gates.
    • Webhooks are your trust anchor: The agent can ignore a nudge, but it can't suppress the webhook. If the webhook fired and the agent didn't relay, you have proof of a compliance gap.
    • Hooks are throttled by design: Most check hooks fire once per day or use adaptive frequency. Don't expect a notification every prompt: Silence usually means the throttle is working, not that the hook is broken.
    • Daily markers live in .context/state/: Throttle files are stored in .context/state/ alongside other project-scoped state. If you need to force a hook to re-fire during testing, delete the corresponding marker file.
    • The QA reminder is intentionally noisy: Unlike other hooks, qa-reminder fires on every Edit call with no throttle. This is deliberate: The commit quality degrades when the reminder fades from salience.
    • Log files are safe to commit: .context/logs/ contains only timestamps, session IDs, and status keywords. No secrets, no code.
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#next-up","level":2,"title":"Next Up","text":"

    Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#see-also","level":2,"title":"See Also","text":"
    • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
    • Customizing Hook Messages: override what hooks say without changing what they do
    • Webhook Notifications: setting up and configuring the webhook system
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Detecting and Fixing Drift: structural checks that complement runtime hook auditing
    • CLI Reference: full ctx system command reference
    ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/task-management/","level":1,"title":"Tracking Work Across Sessions","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-problem","level":2,"title":"The Problem","text":"

    You have work that spans multiple sessions. Tasks get added during one session, partially finished in another, and completed days later.

    Without a system, follow-up items fall through the cracks, priorities drift, and you lose track of what was done versus what still needs doing. TASKS.md grows cluttered with completed checkboxes that obscure the remaining work.

    How do you manage work items that span multiple sessions without losing context?

    Prefer Skills over Raw Commands

    When working with an AI agent, use /ctx-task-add instead of raw ctx add task. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tldr","level":2,"title":"TL;DR","text":"

    Manage Tasks:

    ctx add task \"Fix race condition\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add\nctx add task \"Write tests\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add to phase\nctx task complete \"race condition\"                      # mark done\nctx task snapshot \"before-refactor\"               # backup\nctx task archive                                  # clean up\n

    Pick Up the Next Task:

    /ctx-next # pick what's next\n

    Read on for the full workflow and conversational patterns.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add task Command Add a new task to TASKS.mdctx task complete Command Mark a task as done by number or text ctx task snapshot Command Create a point-in-time backup of TASKS.mdctx task archive Command Move completed tasks to archive file /ctx-task-add Skill AI-assisted task creation with validation /ctx-archive Skill AI-guided archival with safety checks /ctx-next Skill Pick what to work on based on priorities","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-1-add-tasks-with-priorities","level":3,"title":"Step 1: Add Tasks with Priorities","text":"

    Every piece of follow-up work gets a task. Use ctx add task from the terminal or /ctx-task-add from your AI assistant. Tasks should start with a verb and be specific enough that someone unfamiliar with the session could act on them.

    # High-priority bug found during code review\nctx add task \"Fix race condition in session cooldown\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Medium-priority feature work\nctx add task \"Add --format json flag to ctx status for CI integration\" --priority medium \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Low-priority cleanup\nctx add task \"Remove deprecated --raw flag from ctx load\" --priority low \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

    The /ctx-task-add skill validates your task before recording it. It checks that the description is actionable, not a duplicate, and specific enough for someone else to pick up.

    If you say \"fix the bug,\" it will ask you to clarify which bug and where.

    Tasks Are Often Created Proactively

    In practice, many tasks are created proactively by the agent rather than by explicit CLI commands.

    After completing a feature, the agent will often identify follow-up work: tests, docs, edge cases, error handling, and offer to add them as tasks.

    You do not need to dictate ctx add task commands; the agent picks up on work context and suggests tasks naturally.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-2-organize-with-phase-sections","level":3,"title":"Step 2: Organize with Phase Sections","text":"

    Tasks live in phase sections inside TASKS.md.

    Phases provide logical groupings that preserve order and enable replay.

    A task does not move between sections. It stays in its phase permanently, and status is tracked via checkboxes and inline tags.

    ## Phase 1: Core CLI\n\n- [x] Implement ctx add command\n- [x] Implement ctx task complete command\n- [ ] Add --section flag to ctx add task `#priority:medium`\n\n## Phase 2: AI Integration\n\n- [ ] Implement ctx agent cooldown `#priority:high` `#in-progress`\n- [ ] Add ctx watch XML parsing `#priority:medium`\n  - Blocked by: Need to finalize agent output format\n\n## Backlog\n\n- [ ] Performance optimization for large TASKS.md files `#priority:low`\n- [ ] Add metrics dashboard to ctx status `#priority:deferred`\n

    Use --section when adding a task to a specific phase:

    ctx add task \"Add ctx watch XML parsing\" --priority medium --section \\\n    \"Phase 2: AI Integration\" \\\n    --session-id abc12345 --branch main --commit 68fbc00a\n

    Without --section, the task is inserted before the first unchecked task in TASKS.md.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

    At the start of a session, or after finishing a task, use /ctx-next to get prioritized recommendations.

    The skill reads TASKS.md, checks recent sessions, and ranks candidates using explicit priority, blocking status, in-progress state, momentum from recent work, and phase order.

    You can also ask naturally: \"what should we work on?\" or \"what's the highest priority right now?\"

    /ctx-next\n

    The output looks like this:

    **1. Implement ctx agent cooldown** `#priority:high`\n\n    Still in-progress from yesterday's session. The tombstone file approach is\n    half-built. Finishing is cheaper than context-switching.\n\n**2. Add --section flag to ctx add task** `#priority:medium`\n\n    Last Phase 1 item. Quick win that unblocks organized task entry.\n\n---\n\n*Based on 8 pending tasks across 3 phases.\n\nLast session: agent-cooldown (2026-02-06).*\n

    In-progress tasks almost always come first:

    Finishing existing work takes priority over starting new work.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-4-complete-tasks","level":3,"title":"Step 4: Complete Tasks","text":"

    When a task is done, mark it complete by number or partial text match:

    # By task number (as shown in TASKS.md)\nctx task complete 3\n\n# By partial text match\nctx task complete \"agent cooldown\"\n

    The task's checkbox changes from [ ] to [x]. Tasks are never deleted: they stay in their phase section so history is preserved.

    Be Conversational

    You rarely need to run ctx task complete yourself during an interactive session.

    When you say something like \"the rate limiter is done\" or \"we finished that,\" the agent marks the task complete and moves on to suggesting what is next.

    The CLI commands are most useful for manual housekeeping, scripted workflows, or when you want precision.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-5-snapshot-before-risky-changes","level":3,"title":"Step 5: Snapshot Before Risky Changes","text":"

    Before a major refactor or any change that might break things, snapshot your current task state. This creates a copy of TASKS.md in .context/archive/ without modifying the original.

    # Default snapshot\nctx task snapshot\n\n# Named snapshot (recommended before big changes)\nctx task snapshot \"before-refactor\"\n

    This creates a file like .context/archive/tasks-before-refactor-2026-02-08-1430.md. If the refactor goes sideways, and you need to confirm what the task state looked like before you started, the snapshot is there.

    Snapshots are cheap: Take them before any change you might want to undo or review later.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-6-archive-when-tasksmd-gets-cluttered","level":3,"title":"Step 6: Archive When TASKS.md Gets Cluttered","text":"

    After several sessions, TASKS.md accumulates completed tasks that make it hard to see what is still pending.

    Use ctx task archive to move all [x] items to a timestamped archive file.

    Start with a dry run to preview what will be moved:

    ctx task archive --dry-run\n

    Then archive:

    ctx task archive\n

    Completed tasks move to .context/archive/tasks-2026-02-08.md. Phase headers are preserved in the archive for traceability. Pending tasks ([ ]) remain in TASKS.md.

    The /ctx-archive skill adds two safety checks before archiving: it verifies that completed tasks are genuinely cluttering the view and that nothing was marked [x] prematurely.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#letting-the-agent-drive","level":2,"title":"Letting the Agent Drive","text":"

    The steps above show the CLI commands for task management.

    In practice, most task management happens conversationally:

    An agent that has loaded the context files does not need you to type ctx add task. It tracks work naturally and offers the right operations.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#use-natural-language-instead-of-commands","level":3,"title":"Use Natural Language Instead of Commands","text":"

    These conversational prompts replace explicit commands during interactive sessions:

    Instead of typing... Just say... ctx add task \"Write tests for X\" --session-id ID --branch BR --commit HASH \"We should add tests for this: track that?\" /ctx-next \"What should we work on?\" ctx task complete \"rate limiting\" \"The rate limiter is done, what's next?\" ctx task archive \"TASKS.md is getting long, can you clean it up?\" ctx add task ... --session-id ID --branch BR --commit HASH && ctx add task ... \"Add follow-ups for what we just built.\"

    The agent translates these into the right ctx operations behind the scenes.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-creates-tasks-proactively","level":3,"title":"The Agent Creates Tasks Proactively","text":"

    After completing a piece of work, the agent does not wait for you to dictate follow-up items. It identifies them from the work context and offers:

    **Agent**: \"I've finished implementing the rate limiter middleware. I noticed\nthree follow-up items:\n\n1. Write integration tests for the rate limiter (the unit tests pass but\n   there's no end-to-end coverage)\n2. Add rate limit headers to API responses (X-RateLimit-Remaining, etc.)\n3. Document the rate limit configuration in the ops runbook\n\nWant me to add these as tasks?\"\n

    This is how an agent behaves once it has read the Agent Playbook.

    The playbook's \"Work, Reflect, Persist\" cycle means the agent asks itself after every milestone: Did I just complete a task? Did new work emerge?

    When the answer is yes, it acts.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#asking-the-agent-what-to-capture","level":3,"title":"Asking the Agent What to Capture","text":"

    You don't always know what's worth persisting.

    Instead of reviewing the session yourself and typing specific commands, ask the agent to do the thinking:

    You say What happens \"What tasks came out of this session?\" Agent reviews the work, identifies follow-ups, and offers to add them \"Are there any tasks, learnings, or decisions worth adding?\" Agent scans the full session for all three types and proposes a batch \"What should we remember from today?\" Agent surfaces everything persist-worthy: not just tasks \"Anything we should track before we move on?\" Agent checks for loose ends specific to the work just completed

    This is especially powerful after long or exploratory sessions where multiple threads were touched.

    The agent has the full conversation in context and can spot follow-ups you might miss:

    You:   \"Are there any tasks, learnings, or decisions worth adding\n        from this session?\"\n\nAgent: \"Yes. Here's what I'd suggest:\n\n  Tasks:\n  - Add retry logic to the webhook delivery (we discussed it when\n    the timeout test failed but never tracked it)\n  - Update the API docs to reflect the new rate limit headers\n\n  Learnings:\n  - net/http's default client has no timeout: we burned 15 minutes\n    on that before adding an explicit 30s timeout\n\n  Decisions:\n  - We chose token-bucket over sliding-window for rate limiting\n    because it handles bursts better and the library is simpler\n\n  Want me to add all of these?\"\n\nYou:   \"All of them.\"\n

    The agent fills in structured fields from what it observed during the session. You get the same quality entries as if you typed verbose commands, without needing to remember what happened or how to format it.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-completes-tasks-and-moves-on","level":3,"title":"The Agent Completes Tasks and Moves On","text":"

    When you confirm that work is finished, or when the agent can see that it is, it marks the task done and suggests the next step:

    You: \n\"The cooldown logic is working now.\"\n\nAgent: \n\"Marked 'Implement ctx agent cooldown' as done. Your highest-\npriority remaining task is 'Add --section flag to ctx add task'.\n\nIt's the last item in Phase 1 and a quick win. Want to start on that?\"\n

    The agent chains completion into next-task selection.

    One sentence from you triggers two operations.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#end-of-session-capture","level":3,"title":"End-of-Session Capture","text":"

    At the end of a session, the agent reviews what happened and catches loose ends:

    You: \n\"I think we're good for today.\"\n\nAgent: \n\"Before we wrap up: during the middleware work you mentioned\nwanting to add metrics for rate limit hits. That never made it into TASKS.md.\n\nWant me to add it as a task so it's there next session?\"\n

    This is the proactive persistence model at work.

    The agent monitors the conversation for mentioned-but-not-tracked work and surfaces it before context is lost.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#a-conversational-session-example","level":3,"title":"A Conversational Session Example","text":"

    Here is what task management looks like in a real session when the agent is driving:

    You:   What's left to do?\nAgent: You have 5 pending tasks across 2 phases. The highest priority is\n       \"Implement ctx agent cooldown\": it's marked in-progress from\n       yesterday and half-built. Want to pick that up?\n\nYou:   Yeah, let's finish it.\n       [... 20 minutes of implementation work ...]\n\nYou:   That's working. Tests pass.\nAgent: Nice. I'll mark \"Implement ctx agent cooldown\" as done.\n       I noticed two things from the implementation:\n       1. The tombstone file cleanup should happen on a timer, not just\n          at startup. Want me to add that as a task?\n       2. We learned that file locks don't work reliably on NFS. I'll\n          add that to LEARNINGS.md.\n\nYou:   Yes to both. What's next?\nAgent: Added the cleanup task as high priority. Next up is \"Add --section\n       flag to ctx add task\": last item in Phase 1. It should be\n       straightforward since the flag parsing pattern already exists\n       in the codebase. Ready?\n

    It's All Conversational

    Notice what did not happen: The user never typed a ctx command.

    The agent handled task completion, follow-up creation, learning capture, and next-task selection: all from natural conversation.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"
    # Add a task\nctx add task \"Implement rate limiting for API endpoints\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to a specific phase\nctx add task \"Write integration tests for rate limiter\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# See what to work on\n# (from AI assistant) /ctx-next\n\n# Mark done by text\nctx task complete \"rate limiting\"\n\n# Mark done by number\nctx task complete 5\n\n# Snapshot before a risky refactor\nctx task snapshot \"before-middleware-rewrite\"\n\n# Archive completed tasks when the list gets long\nctx task archive --dry-run     # preview first\nctx task archive               # then archive\n
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tips","level":2,"title":"Tips","text":"
    • Start tasks with a verb: \"Add,\" \"Fix,\" \"Implement,\" \"Investigate\": not just a topic like \"Authentication.\"
    • Include the why in the task description. Future sessions lack the context of why you added the task. \"Add rate limiting\" is worse than \"Add rate limiting to prevent abuse on the public API after the load test showed 10x traffic spikes.\"
    • Use #in-progress sparingly. Only one or two tasks should carry this tag at a time. If everything is in-progress, nothing is.
    • Snapshot before, not after. The point of a snapshot is to capture the state before a change, not to celebrate what you just finished.
    • Archive regularly. Once completed tasks outnumber pending ones, it is time to archive. A clean TASKS.md helps both you and your AI assistant focus.
    • Never delete tasks. Mark them [x] (completed) or [-] (skipped with a reason). Deletion breaks the audit trail.
    • Trust the agent's task instincts. When the agent suggests follow-up items after completing work, it is drawing on the full context of what just happened.
    • Conversational prompts beat commands in interactive sessions. Saying \"what should we work on?\" is faster and more natural than running /ctx-next. Save explicit commands for scripts, CI, and unattended runs.
    • Let the agent chain operations. A single statement like \"that's done, what's next?\" can trigger completion, follow-up identification, and next-task selection in one flow.
    • Review proactive task suggestions before moving on. The best follow-ups come from items spotted in-context right after the work completes.
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#next-up","level":2,"title":"Next Up","text":"

    Using the Scratchpad →: Store short-lived sensitive notes in an encrypted scratchpad.

    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#see-also","level":2,"title":"See Also","text":"
    • The Complete Session: full session lifecycle including task management in context
    • Persisting Decisions, Learnings, and Conventions: capturing the \"why\" behind your work
    • Detecting and Fixing Drift: keeping TASKS.md accurate over time
    • CLI Reference: full documentation for ctx add, ctx task complete, ctx task
    • Context Files: TASKS.md: format and conventions for TASKS.md
    ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/triggers/","level":1,"title":"Authoring Lifecycle Triggers","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#authoring-lifecycle-triggers","level":1,"title":"Authoring Lifecycle Triggers","text":"

    Triggers are executable shell scripts that fire at specific events during an AI session. They're how you express \"when the AI saves a file, also do X\" or \"before the AI edits this path, check Y first.\" This recipe walks through writing your first trigger, testing it, and enabling it safely.

    Triggers Execute Arbitrary Code

    A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks:

    • Only enable scripts you have read and understand.
    • Never enable a trigger you downloaded from the internet without reviewing every line.
    • Avoid shelling out to user-controlled values (jq -r output, path field, tool field) without quoting.
    • A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

    The generated trigger template starts disabled (no executable bit) so you cannot accidentally run an unreviewed script. Enable it explicitly with ctx trigger enable.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#scenario","level":2,"title":"Scenario","text":"

    You want a pre-tool-use trigger that blocks the AI from editing anything in internal/crypto/ without explicit confirmation. Cryptographic code is sensitive, and accidental edits have caused outages before, and you want a hard gate.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-1-scaffold-the-script","level":2,"title":"Step 1: Scaffold the Script","text":"
    ctx trigger add pre-tool-use protect-crypto\n

    That creates .context/hooks/pre-tool-use/protect-crypto.sh with a template:

    #!/usr/bin/env bash\nset -euo pipefail\n\n# Read the JSON event from stdin.\npayload=$(cat)\n\n# Parse fields with jq.\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Your logic here.\n\n# Return a JSON result. action can be \"allow\", \"block\", or absent.\necho '{\"action\": \"allow\"}'\n

    Note: the directory is .context/hooks/pre-tool-use/; the on-disk layout still uses hooks/ even though the command is ctx trigger. If you ls .context/hooks/, that's where your triggers live.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-2-write-the-logic","level":2,"title":"Step 2: Write the Logic","text":"

    Open the file and replace the template body:

    #!/usr/bin/env bash\nset -euo pipefail\n\npayload=$(cat)\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Only gate write-family tools.\ncase \"$tool\" in\n  write_file|edit_file|apply_patch) ;;\n  *)\n    echo '{\"action\": \"allow\"}'\n    exit 0\n    ;;\nesac\n\n# Block any path under internal/crypto/.\ncase \"$path\" in\n  internal/crypto/*|*/internal/crypto/*)\n    jq -n --arg p \"$path\" '{\n      action: \"block\",\n      message: (\"Edits to \" + $p + \" require manual review. \" +\n                \"See CONVENTIONS.md for the crypto-change process.\")\n    }'\n    exit 0\n    ;;\nesac\n\necho '{\"action\": \"allow\"}'\n

    A few things to note:

    • set -euo pipefail: any unhandled error aborts the script. Critical for a security-relevant trigger.
    • Quote everything from jq: the path field comes from the AI tool; treat it as untrusted input.
    • Explicit allow case: the default is allow. An empty or missing response is a risky default.
    • Use jq -n --arg for output construction, as it is safer than string concatenation when the message may contain special characters.
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-3-test-with-a-mock-payload","level":2,"title":"Step 3: Test with a Mock Payload","text":"

    Before enabling the trigger, test it with a realistic mock input using ctx trigger test. This runs the script against a synthetic JSON payload without actually firing any AI tool.

    # Test the \"should block\" case\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\n

    Expected: the trigger returns {\"action\":\"block\", \"message\": \"...\"}.

    # Test the \"should allow\" case\nctx trigger test pre-tool-use --tool write_file --path internal/memory/mirror.go\n

    Expected: the trigger returns {\"action\":\"allow\"}.

    # Test that non-write tools pass through\nctx trigger test pre-tool-use --tool read_file --path internal/crypto/aes.go\n

    Expected: {\"action\":\"allow\"} because the case statement only gates write-family tools.

    If any of these cases misbehave, fix the trigger before enabling it. The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-4-enable-it","level":2,"title":"Step 4: Enable It","text":"

    Once the test cases pass, enable the trigger:

    ctx trigger enable protect-crypto\n

    That sets the executable bit. Next time the AI starts a pre-tool-use event, the trigger will fire.

    Verify it's enabled:

    ctx trigger list\n

    Should show protect-crypto under pre-tool-use with an enabled indicator.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-5-iterate-safely","level":2,"title":"Step 5: Iterate Safely","text":"

    If you discover a bug after enabling, disable first, fix second:

    ctx trigger disable protect-crypto\n# ...edit the script...\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\nctx trigger enable protect-crypto\n

    Disabling simply clears the executable bit; the script stays on disk, and ctx trigger enable re-enables it without rewriting anything.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#patterns-worth-copying","level":2,"title":"Patterns Worth Copying","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#logging-not-blocking","level":3,"title":"Logging, Not Blocking","text":"

    For auditing or analytics, return {\"action\":\"allow\"} always and append to a log as a side effect:

    #!/usr/bin/env bash\nset -euo pipefail\npayload=$(cat)\necho \"$payload\" >> .context/logs/tool-use.jsonl\necho '{\"action\":\"allow\"}'\n
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#context-injection-at-session-start","level":3,"title":"Context Injection at Session Start","text":"

    A session-start trigger can prepend text to the agent's initial prompt by emitting {\"action\":\"inject\", \"content\": \"...\"} . This is useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#chaining-triggers-of-the-same-type","level":3,"title":"Chaining Triggers of the Same Type","text":"

    Multiple scripts in the same type directory all run. If any returns action: block, the block wins. Keep individual triggers single-purpose and rely on composition.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    Forgetting the shebang. Without #!/usr/bin/env bash, the trigger won't execute even with the executable bit set.

    Not quoting $path. If you use $path in a command substitution or a case glob without quoting, a file name with spaces or metacharacters will break the trigger in surprising ways.

    Enabling before testing. ctx trigger enable makes the script live immediately. Always ctx trigger test first.

    Outputting non-JSON. The trigger's stdout must be valid JSON or ctx's trigger runner will log a parse error. Use jq -n to construct output rather than hand-writing JSON strings.

    Mixing hook and trigger vocabulary. The command is ctx trigger but the on-disk directory is .context/hooks/. The feature was renamed; the directory name lags behind. Don't let this confuse you; they refer to the same thing.

    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#see-also","level":2,"title":"See Also","text":"
    • ctx trigger reference: full command, flag, and event-type reference.
    • ctx steering: persistent rules, not scripts. Use steering when the thing you want is \"tell the AI to always do X\" rather than \"run a script when Y happens.\"
    • Writing steering files: the rule-based equivalent of this recipe.
    ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/troubleshooting/","level":1,"title":"Troubleshooting","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-problem","level":2,"title":"The Problem","text":"

    Something isn't working: a hook isn't firing, nudges are too noisy, context seems stale, or the agent isn't following instructions. The information to diagnose it exists (across status, drift, event logs, hook config, and session history), but assembling it manually is tedious.

    How do you figure out what's wrong and fix it?

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tldr","level":2,"title":"TL;DR","text":"
    ctx doctor                   # structural health check\nctx hook event --last 20  # recent hook activity\n# or ask: \"something seems off, can you diagnose?\"\n
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx doctor CLI command Structural health report ctx doctor --json CLI command Machine-readable health report ctx hook event CLI command Query local event log /ctx-doctor Skill Agent-driven diagnosis with analysis","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#quick-check-ctx-doctor","level":3,"title":"Quick Check: ctx doctor","text":"

    Run ctx doctor for an instant structural health report. It checks context initialization, required files, drift, hook configuration, event logging, webhooks, reminders, task completion ratio, and context token size: all in one pass:

    ctx doctor\n
    ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

    Warnings are non-critical but worth fixing. Errors need attention. Informational notes (○) flag optional features that aren't enabled.

    For scripting:

    ctx doctor --json | jq '.warnings'\n
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#deep-dive-ctx-doctor","level":3,"title":"Deep Dive: /ctx-doctor","text":"

    When you need the agent to reason about what's wrong, use the skill. Ask naturally or invoke directly:

    Why didn't my hook fire?\nSomething seems off, can you diagnose?\n/ctx-doctor\n

    The agent follows a triage sequence:

    1. Baseline: runs ctx doctor --json for structural health
    2. Events: runs ctx hook event --json --last 100 (if event logging enabled)
    3. Correlate: connects findings across both sources
    4. Present: structured findings with evidence
    5. Suggest: actionable next steps (but doesn't auto-fix)

    The skill degrades gracefully: without event logging enabled, it still runs structural checks and notes what you'd gain by enabling it.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#raw-event-inspection","level":3,"title":"Raw Event Inspection","text":"

    For power users: ctx hook event with filters gives direct access to the event log.

    # Last 50 events (default)\nctx hook event\n\n# Events from a specific session\nctx hook event --session eb1dc9cd-0163-4853-89d0-785fbfaae3a6\n\n# Only QA reminder events\nctx hook event --hook qa-reminder\n\n# Raw JSONL for jq processing\nctx hook event --json | jq '.message'\n\n# Include rotated (older) events\nctx hook event --all --last 100\n

    Filters use AND logic: --hook qa-reminder --session abc123 returns only QA reminder events from that specific session.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#common-problems","level":2,"title":"Common Problems","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#ctx-not-initialized","level":3,"title":"\"ctx: Not Initialized\"","text":"

    Symptoms: Any ctx command fails with ctx: not initialized - run \"ctx init\" first.

    Cause: You're running ctx in a directory without an initialized .context/ directory. This guard runs on all user-facing commands to prevent confusing downstream errors.

    Fix:

    ctx init          # create .context/ with template files\nctx init --minimal  # or just the essentials (CONSTITUTION, TASKS, DECISIONS)\n

    Commands that work without initialization: ctx init, ctx setup, ctx doctor, and help-only grouping commands (ctx, ctx system).

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#my-hook-isnt-firing","level":3,"title":"\"My Hook Isn't Firing\"","text":"

    Symptoms: No nudges appearing, webhook silent, event log shows no entries for the expected hook.

    Diagnosis:

    # 1. Check if ctx is installed and on PATH\nwhich ctx && ctx --version\n\n# 2. Check if the hook is registered\ngrep \"check-persistence\" ~/.claude/plugins/ctx/hooks.json\n\n# 3. Run the hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-persistence\n\n# 4. Check event log for the hook (if enabled)\nctx hook event --hook check-persistence\n

    Common causes:

    • Plugin is not installed: run ctx init --claude to reinstall
    • PATH issue: the hook invokes ctx from PATH; ensure it resolves
    • Throttle active: most hooks fire once per day: check .context/state/ for daily marker files
    • Hook silenced: a custom message override may be an empty file: check ctx hook message list for overrides
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#too-many-nudges","level":3,"title":"\"Too Many Nudges\"","text":"

    Symptoms: The agent is overwhelmed with hook output. Context checkpoints, persistence reminders, and QA gates fire constantly.

    Diagnosis:

    # Check how often hooks fired recently\nctx hook event --last 50\n\n# Count fires per hook\nctx hook event --json | jq -r '.detail.hook // \"unknown\"' \\\n  | sort | uniq -c | sort -rn\n

    Common causes:

    • QA reminder is noisy by design: it fires on every Edit call with no throttle. This is intentional. If it's too much, silence it with an empty override: ctx hook message edit qa-reminder gate, then empty the file
    • Long session: context checkpoint fires with increasing frequency after prompt 15. This is the system telling you the session is getting long: consider wrapping up
    • Short throttle window: if you deleted marker files in .context/state/, daily-throttled hooks will re-fire
    • Outdated Claude Code plugin: Update the plugin using Claude Code → /plugin → \"Marketplace\"
    • ctx version mismatch: Build (or download) and install the latest ctx vesion.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#context-seems-stale","level":3,"title":"\"Context Seems Stale\"","text":"

    Symptoms: The agent references outdated information, paths that don't exist, or decisions that were reversed.

    Diagnosis:

    # Structural drift check\nctx drift\n\n# Full doctor check (includes drift + more)\nctx doctor\n\n# Check when context files were last modified\nctx status --verbose\n

    Common causes:

    • Drift accumulated: stale path references in ARCHITECTURE.md or CONVENTIONS.md. Fix with ctx drift --fix or ask the agent to clean up.
    • Task backlog: too many completed tasks diluting active context. Archive with ctx task archive or ctx compact --archive.
    • Large context files: LEARNINGS.md with 40+ entries competes for attention. Consolidate with /ctx-consolidate.
    • Missing session ceremonies: if /ctx-remember and /ctx-wrap-up aren't being used, context doesn't get refreshed. See Session Ceremonies.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-agent-isnt-following-instructions","level":3,"title":"\"The Agent Isn't Following Instructions\"","text":"

    Symptoms: The agent ignores conventions, forgets decisions, or acts contrary to CONSTITUTION.md rules.

    Diagnosis:

    # Check context token size: Is it too large for the model?\nctx doctor --json | jq '.results[] | select(.name == \"context_size\")'\n\n# Check if context is actually being loaded\nctx hook event --hook context-load-gate\n

    Common causes:

    • Context too large: if total tokens exceed the model's effective attention, instructions get diluted. Check ctx doctor for the size check. Compact with ctx compact --archive.
    • Context not loading: if context-load-gate hasn't fired, the agent may not have received context. Verify the hook is registered.
    • Conflicting instructions: CONVENTIONS.md says one thing, AGENT_PLAYBOOK.md says another. Review both files for consistency.
    • Agent drift: the agent's behavior diverges from instructions over long sessions. This is normal. Use /ctx-reflect to re-anchor, or start a new session.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#prerequisites","level":2,"title":"Prerequisites","text":"
    • Event logging (optional but recommended): event_log: true in .ctxrc
    • ctx initialized: ctx init

    Event logging is not required for ctx doctor or /ctx-doctor to work. Both degrade gracefully: structural checks run regardless, and the skill notes when event data is unavailable.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tips","level":2,"title":"Tips","text":"
    • Start with ctx doctor: It's the fastest way to get a comprehensive health picture. Save event log inspection for when you need to understand when and how often something happened.
    • Enable event logging early: The log is opt-in and low-cost (~250 bytes per event, 1MB rotation cap). Enable it before you need it: Diagnosing a problem without historical data is much harder.
    • Use the skill for correlation: ctx doctor tells you what is wrong. /ctx-doctor tells you why by correlating structural findings with event patterns. The agent can spot connections that individual commands miss.
    • Event log is gitignored: It's machine-local diagnostic data, not project context. Different machines produce different event streams.
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#next-up","level":2,"title":"Next Up","text":"

    Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#see-also","level":2,"title":"See Also","text":"
    • Auditing System Hooks: the complete hook catalog and webhook-based audit trails
    • Detecting and Fixing Drift: structural and semantic drift detection and repair
    • Webhook Notifications: push notifications for hook activity
    • ctx doctor CLI: full command reference
    • ctx hook event CLI: event log query reference
    • /ctx-doctor skill: agent-driven diagnosis
    ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/webhook-notifications/","level":1,"title":"Webhook Notifications","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-problem","level":2,"title":"The Problem","text":"

    Your agent runs autonomously (loops, implements, releases) while you are away from the terminal. You have no way to know when it finishes, hits a limit, or when a hook fires a nudge.

    How do you get notified about agent activity without watching the terminal?

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tldr","level":2,"title":"TL;DR","text":"
    ctx hook notify setup  # configure webhook URL (encrypted)\nctx hook notify test   # verify delivery\n# Hooks auto-notify on: session-end, loop-iteration, resource-danger\n
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx hook notify setup CLI command Configure and encrypt webhook URL ctx hook notify test CLI command Send a test notification ctx hook notify --event <name> \"msg\" CLI command Send a notification from scripts/skills .ctxrcnotify.events Configuration Filter which events reach your webhook","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-1-get-a-webhook-url","level":3,"title":"Step 1: Get a Webhook URL","text":"

    Any service that accepts HTTP POST with JSON works. Common options:

    Service How to get a URL IFTTT Create an applet with the \"Webhooks\" trigger Slack Create an Incoming Webhook Discord Channel Settings > Integrations > Webhooks ntfy.sh Use https://ntfy.sh/your-topic (no signup) Pushover Use API endpoint with your user key

    The URL contains auth tokens. ctx encrypts it; it never appears in plaintext in your repo.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-2-configure-the-webhook","level":3,"title":"Step 2: Configure the Webhook","text":"
    ctx hook notify setup\n# Enter webhook URL: https://maker.ifttt.com/trigger/ctx/json/with/key/YOUR_KEY\n# Webhook configured: https://maker.ifttt.com/***\n# Encrypted at: .context/.notify.enc\n

    This encrypts the URL with AES-256-GCM using the same key as the scratchpad (~/.ctx/.ctx.key). The encrypted file (.context/.notify.enc) is safe to commit. The key lives outside the project and is never committed.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-3-test-it","level":3,"title":"Step 3: Test It","text":"
    ctx hook notify test\n# Webhook responded: HTTP 200 OK\n

    If you see No webhook configured, run ctx hook notify setup first.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-4-configure-events","level":3,"title":"Step 4: Configure Events","text":"

    Notifications are opt-in: no events are sent unless you configure an event list in .ctxrc:

    # .ctxrc\nnotify:\n  events:\n    - loop       # loop completion or max-iteration hit\n    - nudge      # VERBATIM relay hooks (context checkpoint, persistence, etc.)\n    - relay      # all hook output (verbose, for debugging)\n    - heartbeat  # every-prompt session-alive signal with metadata\n

    Only listed events fire. Omitting an event silently drops it.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-5-use-in-your-own-skills","level":3,"title":"Step 5: Use in Your Own Skills","text":"

    Add ctx hook notify calls to any skill or script:

    # In a release skill\nctx hook notify --event release \"v1.2.0 released successfully\" 2>/dev/null || true\n\n# In a backup script\nctx hook notify --event backup \"Nightly backup completed\" 2>/dev/null || true\n

    The 2>/dev/null || true suffix ensures the notification never breaks your script: If there's no webhook or the HTTP call fails, it's a silent noop.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-types","level":2,"title":"Event Types","text":"

    ctx fires these events automatically:

    Event Source When loop Loop script Loop completes or hits max iterations nudge System hooks VERBATIM relay nudge is emitted (context checkpoint, persistence, ceremonies, journal, resources, knowledge, version) relay System hooks Any hook output (VERBATIM relays, agent directives, block responses) heartbeat System hook Every prompt: session-alive signal with prompt count and context modification status testctx hook notify test Manual test notification (custom) Your skills You wire ctx hook notify --event <name> in your own scripts

    nudge vs relay: The nudge event fires only for VERBATIM relay hooks (the ones the agent is instructed to show verbatim). The relay event fires for all hook output: VERBATIM relays, agent directives, and hard gates. Subscribe to relay for debugging (\"did the agent get the post-commit nudge?\"), nudge for user-facing assurance (\"was the checkpoint emitted?\").

    Webhooks as a Hook Audit Trail

    Subscribe to relay events and you get an external record of every hook that fires, independent of the agent.

    This lets you verify hooks are running and catch cases where the agent absorbs a nudge instead of surfacing it.

    See Auditing System Hooks for the full workflow.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#payload-format","level":2,"title":"Payload Format","text":"

    Every notification sends a JSON POST:

    {\n  \"event\": \"nudge\",\n  \"message\": \"check-context-size: Context window at 82%\",\n  \"detail\": {\n    \"hook\": \"check-context-size\",\n    \"variant\": \"window\",\n    \"variables\": {\"Percentage\": 82, \"TokenCount\": \"164k\"}\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n

    The detail field is a structured template reference containing the hook name, variant, and any template variables. This lets receivers filter by hook or variant without parsing rendered text. The field is omitted when no template reference applies (e.g. custom ctx hook notify calls).

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#heartbeat-payload","level":3,"title":"Heartbeat Payload","text":"

    The heartbeat event fires on every prompt with session metadata and token usage telemetry:

    {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc123-...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-28T10:15:00Z\",\n  \"project\": \"ctx\"\n}\n

    The tokens, context_window, and usage_pct fields are included when token data is available from the session JSONL file. They are omitted when no usage data has been recorded yet (e.g. first prompt).

    Unlike other events, heartbeat fires every prompt (not throttled). Use it for observability dashboards or liveness monitoring of long-running sessions.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#security-model","level":2,"title":"Security Model","text":"Component Location Committed? Permissions Encryption key ~/.ctx/.ctx.key No (user-level) 0600 Encrypted URL .context/.notify.enc Yes (safe) 0600 Webhook URL Never on disk in plaintext N/A N/A

    The key is shared with the scratchpad. If you rotate the encryption key, re-run ctx hook notify setup to re-encrypt the webhook URL with the new key.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#key-rotation","level":2,"title":"Key Rotation","text":"

    ctx checks the age of the encryption key once per day. If it's older than 90 days (configurable via key_rotation_days), a VERBATIM nudge is emitted suggesting rotation.

    # .ctxrc\nkey_rotation_days: 30   # nudge sooner (default: 90)\n
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#worktrees","level":2,"title":"Worktrees","text":"

    The webhook URL is encrypted with the same encryption key (~/.ctx/.ctx.key). Because the key lives at the user level, it is shared across all worktrees on the same machine - notifications work in worktrees automatically.

    This means agents running in worktrees cannot send webhook alerts. For autonomous runs where worktree agents are opaque, monitor them from the terminal rather than relying on webhooks. Enrich journals and review results on the main branch after merging.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-log-the-local-complement","level":2,"title":"Event Log: The Local Complement","text":"

    Don't need a webhook but want diagnostic visibility? Enable event_log: true in .ctxrc. The event log writes the same payload as webhooks to a local JSONL file (.context/state/events.jsonl) that you can query without any external service:

    ctx hook event --last 20          # recent hook activity\nctx hook event --hook qa-reminder # filter by hook\n

    Webhooks and event logging are independent: you can use either, both, or neither. Webhooks give you push notifications and an external audit trail. The event log gives you local queryability and ctx doctor integration.

    See Troubleshooting for how they work together.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tips","level":2,"title":"Tips","text":"
    • Fire-and-forget: Notifications never block. HTTP errors are silently ignored. No retry, no response parsing.
    • No webhook = no cost: When no webhook is configured, ctx hook notify exits immediately. System hooks that call notify.Send() add zero overhead.
    • Multiple projects: Each project has its own .notify.enc. You can point different projects at different webhooks.
    • Event filter is per-project: Configure notify.events in each project's .ctxrc independently.
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#next-up","level":2,"title":"Next Up","text":"

    Auditing System Hooks →: Verify your hooks are running, audit what they do, and get alerted when they go silent.

    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#see-also","level":2,"title":"See Also","text":"
    • CLI Reference: ctx hook notify: full command reference
    • Configuration: .ctxrc settings including notify options
    • Running an Unattended AI Agent: how loops work and how notifications fit in
    • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
    • Auditing System Hooks: using webhooks as an external audit trail for hook execution
    ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/","level":1,"title":"When to Use a Team of Agents","text":"","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-problem","level":2,"title":"The Problem","text":"

    You have a task, and you are wondering: \"should I throw more agents at it?\"

    More agents can mean faster results, but they also mean coordination overhead, merge conflicts, divergent mental models, and wasted tokens re-reading context.

    The wrong setup costs more than it saves.

    This recipe is a decision framework: It helps you choose between a single agent, parallel worktrees, and a full agent team, and explains what ctx provides at each level.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tldr","level":2,"title":"TL;DR","text":"
    • Single agent for most work;
    • Parallel worktrees when tasks touch disjoint file sets;
    • Agent teams only when tasks need real-time coordination. When in doubt, start with one agent.
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-spectrum","level":2,"title":"The Spectrum","text":"

    There are three modes, ordered by complexity:

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#1-single-agent-default","level":3,"title":"1. Single Agent (Default)","text":"

    One agent, one session, one branch. This is correct for most work.

    Use this when:

    • The task has linear dependencies (step 2 needs step 1's output);
    • Changes touch overlapping files;
    • You need tight feedback loops (review each change before the next);
    • The task requires deep understanding of a single area;
    • Total effort is less than a few hours of agent time.

    ctx provides: Full .context/: tasks, decisions, learnings, conventions, all in one session.

    The agent builds a coherent mental model and persists it as it goes.

    Example tasks: Bug fixes, feature implementation, refactoring a module, writing documentation for one area, debugging.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#2-parallel-worktrees-independent-tracks","level":3,"title":"2. Parallel Worktrees (Independent Tracks)","text":"

    2-4 agents, each in a separate git worktree on its own branch, working on non-overlapping parts of the codebase.

    Use this when:

    • You have 5+ independent tasks in the backlog;
    • Tasks group cleanly by directory or package;
    • File overlap between groups is zero or near-zero;
    • Each track can be completed and merged independently;
    • You want parallelism without coordination complexity.

    ctx provides: Shared .context/ via git (each worktree sees the same tasks, decisions, conventions). /ctx-worktree skill for setup and teardown. TASKS.md as a lightweight work queue.

    Example tasks: Docs + new package + test coverage (three tracks that don't touch the same files). Parallel recipe writing. Independent module development.

    See: Parallel Agent Development with Git Worktrees

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#3-agent-team-coordinated-swarm","level":3,"title":"3. Agent Team (Coordinated Swarm)","text":"

    Multiple agents communicating via messages, sharing a task list, with a lead agent coordinating. Claude Code's team/swarm feature.

    Use this when:

    • Tasks have dependencies but can still partially overlap;
    • You need research and implementation happening simultaneously;
    • The work requires different roles (researcher, implementer, tester);
    • A lead agent needs to review and integrate others' work;
    • The task is large enough that coordination cost is justified.

    ctx provides: .context/ as shared state that all agents can read. Task tracking for work assignment. Decisions and learnings as team memory that survives individual agent turnover.

    Example tasks: Large refactor across modules where a lead reviews merges. Research and implementation where one agent explores options while another builds. Multi-file feature that needs integration testing after parallel implementation.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-decision-framework","level":2,"title":"The Decision Framework","text":"

    Ask these questions in order:

    Can one agent do this in a reasonable time?\n  YES → Single agent. Stop here.\n  NO  ↓\n\nCan the work be split into non-overlapping file sets?\n  YES → Parallel worktrees (2-4 tracks)\n  NO  ↓\n\nDo the subtasks need to communicate during execution?\n  YES → Agent team with lead coordination\n  NO  → Parallel worktrees with a merge step\n
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-file-overlap-test","level":3,"title":"The File Overlap Test","text":"

    This is the critical decision point. Before choosing multi-agent, list the files each subtask would touch. If two subtasks modify the same file, they belong in the same track (or the same single-agent session).

    You: \"I want to parallelize these tasks. Which files would each one touch?\"\n\nAgent: [reads `TASKS.md`, analyzes codebase]\n       \"Task A touches internal/config/ and internal/cli/initialize/\n        Task B touches docs/ and site/\n        Task C touches internal/config/ and internal/cli/status/\n\n        Tasks A and C overlap on internal/config/ # they should be\n        in the same track. Task B is independent.\"\n

    When in doubt, keep things in one track. A merge conflict in a critical file costs more time than the parallelism saves.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#when-teams-make-things-worse","level":2,"title":"When Teams Make Things Worse","text":"

    \"More agents\" is not always better. Watch for these patterns:

    Merge hell: If you are spending more time resolving conflicts than the parallel work saved, you split wrong: Re-group by file overlap.

    Context divergence: Each agent builds its own mental model. After 30 minutes of independent work, agent A might make assumptions that contradict agent B's approach. Shorter tracks with frequent merges reduce this.

    Coordination theater: A lead agent spending most of its time assigning tasks, checking status, and sending messages instead of doing work. If the task list is clear enough, worktrees with no communication are cheaper.

    Re-reading overhead: Every agent reads .context/ on startup. A team of 4 agents each reading 4000 tokens of context = 16000 tokens before anyone does any work. For small tasks, that overhead dominates.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#what-ctx-gives-you-at-each-level","level":2,"title":"What ctx Gives You at Each Level","text":"ctx Feature Single Agent Worktrees Team .context/ files Full access Shared via git Shared via filesystem TASKS.md Work queue Split by track Assigned by lead Decisions/Learnings Persisted in session Persisted per branch Persisted by any agent /ctx-next Picks next task Picks within track Lead assigns /ctx-worktree N/A Setup + teardown Optional /ctx-commit Normal commits Per-branch commits Per-agent commits","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#team-composition-recipes","level":2,"title":"Team Composition Recipes","text":"

    Four practical team compositions for common workflows.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#feature-development-3-agents","level":3,"title":"Feature Development (3 Agents)","text":"Role Responsibility Architect Writes spec in specs/, breaks work into TASKS.md phases Implementer Picks tasks from TASKS.md, writes code, marks [x] done Reviewer Runs tests, ctx drift, lint; files issues as new tasks

    Coordination: TASKS.md checkboxes. Architect writes tasks before implementer starts. Reviewer runs after each implementer commit.

    Anti-pattern: All three agents editing the same file simultaneously. Sequence the work so only one agent touches a file at a time.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#consolidation-sprint-3-4-agents","level":3,"title":"Consolidation Sprint (3-4 Agents)","text":"Role Responsibility Auditor Runs ctx drift, identifies stale paths and broken refs Code Fixer Updates source code to match context (or vice versa) Doc Writer Updates ARCHITECTURE.md, CONVENTIONS.md, and docs/ Test Fixer (Optional) Fixes tests broken by the fixer's changes

    Coordination: Auditor's ctx drift output is the shared work queue. Each agent claims a subset of issues by adding #in-progress labels.

    Anti-pattern: Fixer and doc writer both editing ARCHITECTURE.md. Assign file ownership explicitly.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#release-prep-2-agents","level":3,"title":"Release Prep (2 Agents)","text":"Role Responsibility Release Notes Generates changelog from commits, writes release notes Validation Runs full test suite, lint, build across platforms

    Coordination: Both read TASKS.md to identify what shipped. Release notes agent works from git log; validation agent works from make audit.

    Anti-pattern: Release notes agent running tests \"to verify.\" Each agent stays in its lane.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#documentation-sprint-3-agents","level":3,"title":"Documentation Sprint (3 Agents)","text":"Role Responsibility Content Writes new pages, expands existing docs Cross-linker Adds nav entries, cross-references, \"See Also\" sections Verifier Builds site, checks broken links, validates rendering

    Coordination: Content agent writes files first. Cross-linker updates zensical.toml and index pages after content lands. Verifier builds after each batch.

    Antipattern: Content and cross-linker both editing zensical.toml. Batch nav updates into the cross-linker's pass.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tips","level":2,"title":"Tips","text":"
    • Start with one agent: Only add parallelism when you have identified the bottleneck. \"This would go faster with more agents\" is usually wrong for tasks under 2 hours.
    • The 3-4 agent ceiling is real: Coordination overhead grows quadratically. 2 agents = 1 communication pair. 4 agents = 6 pairs. Beyond 4, you are managing agents more than doing work.
    • Worktrees > teams for most parallelism needs: If agents don't need to talk to each other during execution, worktrees give you parallelism with zero coordination overhead.
    • Use ctx as the shared brain: Whether it's one agent or four, the .context/ directory is the single source of truth. Decisions go in DECISIONS.md, not in chat messages between agents.
    • Merge early, merge often: Long-lived parallel branches diverge. Merge a track as soon as it's done rather than waiting for all tracks to finish.
    • TASKS.md conflicts are normal: Multiple agents completing different tasks will conflict on merge. The resolution is always additive: accept all [x] completions from both sides.
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#next-up","level":2,"title":"Next Up","text":"

    Parallel Agent Development with Git Worktrees →: Run multiple agents on independent task tracks using git worktrees.

    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#go-deeper","level":2,"title":"Go Deeper","text":"
    • CLI Reference: all commands and flags
    • Integrations: setup for Claude Code, Cursor, Aider
    • Session Journal: browse and search session history
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#see-also","level":2,"title":"See Also","text":"
    • Parallel Agent Development with Git Worktrees: the mechanical \"how\" for worktree-based parallelism
    • Running an Unattended AI Agent: serial autonomous loops: a different scaling strategy
    • Tracking Work Across Sessions: managing the task backlog that feeds into any multi-agent setup
    ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"reference/","level":1,"title":"Reference","text":"

    Technical reference for ctx commands, skills, and internals.

    ","path":["Reference"],"tags":[]},{"location":"reference/#the-system-explains-itself","level":3,"title":"The System Explains Itself","text":"

    The 12 properties that must hold for any valid ctx implementation. Not features: constraints. The system's contract with its users and contributors.

    ","path":["Reference"],"tags":[]},{"location":"reference/#code-conventions","level":3,"title":"Code Conventions","text":"

    Common patterns and fixes for the AST compliance tests in internal/audit/. When a test fails, find the matching section.

    ","path":["Reference"],"tags":[]},{"location":"reference/#cli","level":3,"title":"CLI","text":"

    Every command, subcommand, and flag. Now a top-level section: see CLI Reference.

    ","path":["Reference"],"tags":[]},{"location":"reference/#skills","level":3,"title":"Skills","text":"

    The full skill catalog: what each skill does, when it triggers, and how skills interact with commands.

    ","path":["Reference"],"tags":[]},{"location":"reference/#tool-ecosystem","level":3,"title":"Tool Ecosystem","text":"

    How ctx compares to Cursor Rules, Aider conventions, CLAUDE.md, and other context approaches.

    ","path":["Reference"],"tags":[]},{"location":"reference/#session-journal","level":3,"title":"Session Journal","text":"

    Export, browse, and enrich your session history. Covers the journal site, Obsidian export, and the enrichment pipeline.

    ","path":["Reference"],"tags":[]},{"location":"reference/#scratchpad","level":3,"title":"Scratchpad","text":"

    Encrypted, git-tracked scratch space for short notes and sensitive values that travel with the project.

    ","path":["Reference"],"tags":[]},{"location":"reference/#version-history","level":3,"title":"Version History","text":"

    Changelog for every ctx release.

    ","path":["Reference"],"tags":[]},{"location":"reference/audit-conventions/","level":1,"title":"Code Conventions","text":"","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#code-conventions-common-patterns-and-fixes","level":1,"title":"Code Conventions: Common Patterns and Fixes","text":"

    This guide documents the code conventions enforced by internal/audit/ AST tests. Each section shows the violation pattern, the fix, and the rationale. When a test fails, find the matching section below.

    All tests skip _test.go files. The patterns apply only to production code under internal/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#variable-shadowing-bare-err-reuse","level":2,"title":"Variable Shadowing (Bare err := Reuse)","text":"

    Test: TestNoVariableShadowing

    When a function has multiple := assignments to err, each shadows the previous one. This makes it impossible to tell which error a later if err != nil is checking.

    Before:

    func Run(cmd *cobra.Command) error {\n    data, err := os.ReadFile(path) \n    if err != nil {\n        return err\n    }\n\n    result, err := json.Unmarshal(data)  // shadows first err\n    if err != nil {\n        return err\n    }\n\n    err = validate(result)  // shadows again\n    return err\n}\n

    After:

    func Run(cmd *cobra.Command) error {\n    data, readErr := os.ReadFile(path)\n    if readErr != nil {\n        return readErr\n    }\n\n    result, parseErr := json.Unmarshal(data)\n    if parseErr != nil {\n        return parseErr\n    }\n\n    validateErr := validate(result)\n    return validateErr\n}\n

    Rule: Use descriptive error names (readErr, writeErr, parseErr, walkErr, absErr, relErr) so each error site is independently identifiable.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#import-name-shadowing","level":2,"title":"Import Name Shadowing","text":"

    Test: TestNoImportNameShadowing

    When a local variable has the same name as an imported package, the import becomes inaccessible in that scope.

    Before:

    import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(session *entity.Session) {  // param shadows import\n    // session package is now unreachable here\n}\n

    After:

    import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(sess *entity.Session) {\n    // session package still accessible\n}\n

    Rule: Parameters, variables, and return values must not reuse imported package names. Common renames: session -> sess, token -> tok, config -> cfg, entry -> ent.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-strings","level":2,"title":"Magic Strings","text":"

    Test: TestNoMagicStrings

    String literals in function bodies are invisible to refactoring tools and cause silent breakage when the value changes in one place but not another.

    Before (string literals):

    func loadContext() {\n    data := filepath.Join(dir, \"TASKS.md\")\n    if strings.HasSuffix(name, \".yaml\") {\n        // ...\n    }\n}\n

    After:

    func loadContext() {\n    data := filepath.Join(dir, config.FilenameTask)\n    if strings.HasSuffix(name, config.ExtYAML) {\n        // ...\n    }\n}\n

    Before (format verbs, also caught):

    func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return fmt.Sprintf(\"%x\", h[:8])\n}\n

    After:

    func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return hex.EncodeToString(h[:cfgFmt.HashPrefixLen])\n}\n

    Before (URL schemes, also caught):

    if strings.HasPrefix(target, \"https://\") ||\n    strings.HasPrefix(target, \"http://\") {\n    return target\n}\n

    After:

    if strings.HasPrefix(target, cfgHTTP.PrefixHTTPS) ||\n    strings.HasPrefix(target, cfgHTTP.PrefixHTTP) {\n    return target\n}\n

    Exempt from this check:

    • Empty string \"\", single space \" \", indentation strings
    • Regex capture references ($1, ${name})
    • const and var definition sites (that's where constants live)
    • Struct tags
    • Import paths
    • Packages under internal/config/, internal/assets/tpl/

    Rule: If a string is used for comparison, path construction, or appears in 3+ files, it belongs in internal/config/ as a constant. Format strings belong in internal/config/ as named constants (e.g., cfgGit.FlagLastN, cfgTrace.RefFormat). User-facing prose belongs in internal/assets/ YAML files accessed via desc.Text().

    Common fix for fmt.Sprintf with format verbs:

    Pattern Fix fmt.Sprintf(\"%d\", n)strconv.Itoa(n)fmt.Sprintf(\"%d\", int64Val)strconv.FormatInt(int64Val, 10)fmt.Sprintf(\"%x\", bytes)hex.EncodeToString(bytes)fmt.Sprintf(\"%q\", s)strconv.Quote(s)fmt.Sscanf(s, \"%d\", &n)strconv.Atoi(s)fmt.Sprintf(\"-%d\", n)fmt.Sprintf(cfgGit.FlagLastN, n)\"https://\"cfgHTTP.PrefixHTTPS\"&lt;\" config constant in config/html/","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-printf-calls","level":2,"title":"Direct Printf Calls","text":"

    Test: TestNoPrintfCalls

    cmd.Printf and cmd.PrintErrf bypass the write-package formatting pipeline and scatter user-facing text across the codebase.

    Before:

    func Run(cmd *cobra.Command, args []string) {\n    cmd.Printf(\"Found %d tasks\\n\", count)\n}\n

    After:

    func Run(cmd *cobra.Command, args []string) {\n    write.TaskCount(cmd, count)\n}\n

    Rule: All formatted output goes through internal/write/ which uses cmd.Print/cmd.Println with pre-formatted strings from desc.Text().

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#raw-time-format-strings","level":2,"title":"Raw Time Format Strings","text":"

    Test: TestNoRawTimeFormats

    Inline time format strings (\"2006-01-02\", \"15:04:05\") drift when one call site is updated but others are missed.

    Before:

    func formatDate(t time.Time) string {\n    return t.Format(\"2006-01-02\")\n}\n

    After:

    func formatDate(t time.Time) string {\n    return t.Format(cfgTime.DateFormat)\n}\n

    Rule: All time format strings must use constants from internal/config/time/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-flag-registration","level":2,"title":"Direct Flag Registration","text":"

    Test: TestNoFlagBindOutsideFlagbind

    Direct cobra flag calls (.Flags().StringVar(), etc.) scatter flag wiring across dozens of cmd.go files. Centralizing through internal/flagbind/ gives one place to audit flag names, defaults, and description key lookups.

    Before:

    func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    c.Flags().StringVarP(&output, \"output\", \"o\", \"\",\n        \"output format\")\n    return c\n}\n

    After:

    func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    flagbind.StringFlagShort(c, &output, flag.Output,\n        flag.OutputShort, cmd.DescKeyOutput)\n    return c\n}\n

    Rule: All flag registration goes through internal/flagbind/. If the helper you need doesn't exist, add it to flagbind/flag.go before using it.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#todo-comments","level":2,"title":"TODO Comments","text":"

    Test: TestNoTODOComments

    TODO, FIXME, HACK, and XXX comments in production code are invisible to project tracking. They accumulate silently and never get addressed.

    Before:

    // TODO: handle pagination\nfunc listEntries() []Entry {\n

    After:

    Remove the comment and add a task to .context/TASKS.md:

    - [ ] Handle pagination in listEntries (internal/task/task.go)\n

    Rule: Deferred work lives in TASKS.md, not in source comments.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#dead-exports","level":2,"title":"Dead Exports","text":"

    Test: TestNoDeadExports

    Exported symbols with zero references outside their definition file are dead weight. They increase API surface, confuse contributors, and cost maintenance.

    Fix: Either delete the export (preferred) or demote it to unexported if it's still used within the file.

    If the symbol existed for historical reasons and might be needed again, move it to quarantine/deadcode/ with a .dead extension. This preserves the code in git without polluting the live codebase:

    quarantine/deadcode/internal/config/flag/flag.go.dead\n

    Each .dead file includes a header:

    // Dead exports quarantined from internal/config/flag/flag.go\n// Quarantined: 2026-04-02\n// Restore from git history if needed.\n

    Rule: If a test-only allowlist entry is needed (the export exists only for test use), add the fully qualified symbol to testOnlyExports in dead_exports_test.go. Keep this list small; prefer eliminating the export.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#core-package-structure","level":2,"title":"Core Package Structure","text":"

    Test: TestCoreStructure

    core/ directories under internal/cli/ must contain only doc.go and test files at the top level. All domain logic lives in subpackages. This prevents core/ from becoming a god package.

    Before:

    internal/cli/dep/core/\n    go.go           # violation: logic at core/ level\n    python.go       # violation\n    node.go         # violation\n    types.go        # violation\n

    After:

    internal/cli/dep/core/\n    doc.go          # package doc only\n    golang/\n        golang.go\n        golang_test.go\n        doc.go\n    python/\n        python.go\n        python_test.go\n        doc.go\n    node/\n        node.go\n        node_test.go\n        doc.go\n

    Rule: Extract each logical unit into its own subpackage under core/. Each subpackage gets a doc.go. The subpackage name should match the domain concept (golang, check, fix, store), not a generic label (util, helper).

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cross-package-types","level":2,"title":"Cross-Package Types","text":"

    Test: TestCrossPackageTypes

    When a type defined in one package is used from a different module (e.g., cli/doctor importing a type from cli/notify), the type has crossed its module boundary. Cross-cutting types belong in internal/entity/ for discoverability.

    Before:

    // internal/cli/notify/core/types.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/cli/notify/core\"\nfunc check(p core.NotifyPayload) { ... }\n

    After:

    // internal/entity/notify.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/entity\"\nfunc check(p entity.NotifyPayload) { ... }\n

    Exempt: Types inside entity/, proto/, core/ subpackages, and config/ packages. Same-module usage (e.g., cli/doctor/cmd/ using cli/doctor/core/) is not flagged.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#type-file-convention","level":2,"title":"Type File Convention","text":"

    Test: TestTypeFileConvention, TestTypeFileConventionReport

    Exported types in core/ subpackages should live in types.go (the convention from CONVENTIONS.md), not scattered across implementation files. This makes type definitions discoverable. TestTypeFileConventionReport generates a diagnostic summary of all type placements for triage.

    Exception: entity/ organizes by domain (task.go, session.go), proto/ uses schema.go, and err/ packages colocate error types with their domain context.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-yaml-linkage","level":2,"title":"DescKey / YAML Linkage","text":"

    Test: TestDescKeyYAMLLinkage

    Every DescKey constant must have a corresponding key in the YAML asset files, and every YAML key must have a corresponding DescKey constant. Orphans in either direction mean dead text or runtime panics.

    Fix for orphan YAML key: Delete the YAML entry, or add the corresponding DescKey constant in config/embed/{text,cmd,flag}/.

    Fix for orphan DescKey: Delete the constant, or add the corresponding entry in the YAML file under internal/assets/commands/text/, cmd/, or flag/.

    If the orphan YAML entry was once valid but the feature was removed, move the YAML entry to a .dead file in quarantine/deadcode/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#package-doc-quality","level":2,"title":"Package Doc Quality","text":"

    Test: TestPackageDocQuality

    Every package under internal/ must have a doc.go with a meaningful package doc comment (at least 8 lines of real content). One-liners and file-list patterns (// - foo.go, // Source files:) are flagged because they drift as files change.

    Template:

    //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n\n// Package mypackage does X.\n//\n// It handles Y by doing Z. The main entry point is [FunctionName]\n// which accepts A and returns B.\n//\n// Configuration is read from [config.SomeConstant]. Output is\n// written through [write.SomeHelper].\n//\n// This package is used by [parentpackage] during the W lifecycle\n// phase.\npackage mypackage\n
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-regex-compilation","level":2,"title":"Inline Regex Compilation","text":"

    Test: TestNoInlineRegexpCompile

    regexp.MustCompile and regexp.Compile inside function bodies recompile the pattern on every call. Compiled patterns belong at package level.

    Before:

    func parse(s string) bool {\n    re := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n    return re.MatchString(s)\n}\n

    After:

    // In internal/config/regex/regex.go:\n// DatePattern matches ISO date format (YYYY-MM-DD).\nvar DatePattern = regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n\n// In calling package:\nfunc parse(s string) bool {\n    return regex.DatePattern.MatchString(s)\n}\n

    Rule: All compiled regexes live in internal/config/regex/ as package-level var declarations. Two tests enforce this: TestNoInlineRegexpCompile catches function-body compilation, and TestNoRegexpOutsideRegexPkg catches package-level compilation outside config/regex/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#doc-comments","level":2,"title":"Doc Comments","text":"

    Test: TestDocComments

    All functions (exported and unexported), structs, and package-level variables must have a doc comment. Config packages allow group doc comments for const blocks.

    Before:

    func buildIndex(entries []Entry) map[string]int {\n

    After:

    // buildIndex maps entry names to their position in the\n// ordered slice for O(1) lookup during reconciliation.\n//\n// Parameters:\n//   - entries: ordered slice of entries to index\n//\n// Returns:\n//   - map[string]int: name-to-position mapping\nfunc buildIndex(entries []Entry) map[string]int {\n

    Rule: Every function, struct, and package-level var gets a doc comment in godoc format. Functions include Parameters: and Returns: sections. Structs with 2+ fields document every field. See CONVENTIONS.md for the full template.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#line-length","level":2,"title":"Line Length","text":"

    Test: TestLineLength

    Lines in non-test Go files must not exceed 80 characters. This is a hard check, not a suggestion.

    Before:

    _ = trace.Record(fmt.Sprintf(cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum), state.Dir())\n

    After:

    ref := fmt.Sprintf(\n    cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum,\n)\n_ = trace.Record(ref, state.Dir())\n

    Rule: Break at natural points: function arguments, struct fields, chained calls. Long strings (URLs, struct tags) are the rare acceptable exception.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#literal-whitespace","level":2,"title":"Literal Whitespace","text":"

    Test: TestNoLiteralWhitespace

    Bare whitespace string and byte literals (\"\\n\", \"\\r\\n\", \"\\t\") must not appear outside internal/config/token/. All other packages use the token constants.

    Before:

    output := strings.Join(lines, \"\\n\")\n

    After:

    output := strings.Join(lines, token.Newline)\n

    Rule: Whitespace literals are defined once in internal/config/token/. Use token.Newline, token.Tab, token.CRLF, etc.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-numeric-values","level":2,"title":"Magic Numeric Values","text":"

    Test: TestNoMagicValues

    Numeric literals in function bodies need constants, with narrow exceptions.

    Before:

    if len(entries) > 100 {\n    entries = entries[:100]\n}\n

    After:

    if len(entries) > config.MaxEntries {\n    entries = entries[:config.MaxEntries]\n}\n

    Exempt: 0, 1, -1, 2-10, strconv radix/bitsize args (10, 32, 64 in strconv.Parse*/Format*), octal permissions (caught separately by TestNoRawPermissions), and const/var definition sites.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-separators","level":2,"title":"Inline Separators","text":"

    Test: TestNoInlineSeparators

    strings.Join calls must use token constants for their separator argument, not string literals.

    Before:

    result := strings.Join(parts, \", \")\n

    After:

    result := strings.Join(parts, token.CommaSep)\n

    Rule: Separator strings live in internal/config/token/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stuttery-function-names","level":2,"title":"Stuttery Function Names","text":"

    Test: TestNoStutteryFunctions

    Function names must not redundantly include their package name as a PascalCase word boundary. Go callers already write pkg.Function, so pkg.PkgFunction stutters.

    Before:

    // In package write\nfunc WriteJournal(cmd *cobra.Command, ...) {\n

    After:

    // In package write\nfunc Journal(cmd *cobra.Command, ...) {\n

    Exempt: Identity functions like write.Write / write.write.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#predicate-naming-no-ishascan-prefix","level":2,"title":"Predicate Naming (No Is/Has/Can Prefix)","text":"

    Test: None (manual review convention)

    Exported methods that return bool must not use Is, Has, or Can prefixes. The predicate reads more naturally without them, especially at call sites where the package name provides context.

    Before:

    func IsCompleted(t *Task) bool { ... }\nfunc HasChildren(n *Node) bool { ... }\nfunc IsExemptPackage(path string) bool { ... }\n

    After:

    func Completed(t *Task) bool { ... }\nfunc Children(n *Node) bool { ... }  // or: ChildCount > 0\nfunc ExemptPackage(path string) bool { ... }\n

    Rule: Drop the prefix. Private helpers may use prefixes when it reads more naturally (isValid in a local context is fine). This convention applies to exported methods and package-level functions. See CONVENTIONS.md \"Predicates\" section.

    This is not yet enforced by an AST test; it requires semantic understanding of return types and naming intent that makes automated detection fragile. Apply during code review.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#mixed-visibility","level":2,"title":"Mixed Visibility","text":"

    Test: TestNoMixedVisibility

    Files with exported functions must not also contain unexported functions. Public API and private helpers live in separate files.

    Before:

    load.go\n    func Load() { ... }        // exported\n    func parseHeader() { ... } // unexported, violation\n

    After:

    load.go\n    func Load() { ... }        // exported only\nparse.go\n    func parseHeader() { ... } // private helper\n

    Exempt: Files with exactly one function, doc.go, test files.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stray-errgo-files","level":2,"title":"Stray Err.Go Files","text":"

    Test: TestNoStrayErrFiles

    err.go files must only exist under internal/err/. Error constructors anywhere else create a broken-window pattern where contributors add local error definitions when they see a local err.go.

    Fix: Move the error constructor to internal/err/<domain>/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cli-cmd-structure","level":2,"title":"CLI Cmd Structure","text":"

    Test: TestCLICmdStructure

    Each cmd/$sub/ directory under internal/cli/ may contain only cmd.go, run.go, doc.go, and test files. Extra .go files (helpers, output formatters, types) belong in the corresponding core/ subpackage.

    Before:

    internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\n    format.go   # violation: helper in cmd dir\n

    After:

    internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\ninternal/cli/doctor/core/format/\n    format.go\n    doc.go\n
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-namespace","level":2,"title":"DescKey Namespace","text":"

    Test: TestUseConstantsOnlyInCobraUse, TestDescKeyOnlyInLookupCalls, TestNoWrongNamespaceLookup

    Three tests enforce DescKey/Use constant discipline:

    1. Use* constants appear only in cobra Use: struct field assignments, never as arguments to desc.Text() or elsewhere.
    2. DescKey* constants are passed only to assets.CommandDesc(), assets.FlagDesc(), or desc.Text(), never to cobra Use:.
    3. No cross-namespace lookups: TextDescKey must not be passed to CommandDesc(), FlagDescKey must not be passed to Text(), etc.
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#yaml-examples-registry-linkage","level":2,"title":"YAML Examples / Registry Linkage","text":"

    Test: TestExamplesYAMLLinkage, TestRegistryYAMLLinkage

    Every key in examples.yaml and registry.yaml must match a known entry type constant. Prevents orphan entries that are never rendered.

    Fix: Delete the orphan YAML entry, or add the corresponding constant in config/entry/.

    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#other-enforced-patterns","level":2,"title":"Other Enforced Patterns","text":"

    These tests follow the same fix approach: extract the operation to its designated package:

    Test Violation Fix TestNoNakedErrorsfmt.Errorf/errors.New outside internal/err/ Add error constructor to internal/err/<domain>/TestNoRawFileIO Direct os.ReadFile, os.Create, etc. Use io.SafeReadFile, io.SafeWriteFile, etc. TestNoRawLogging Direct fmt.Fprintf(os.Stderr, ...) Use log/warn.Warn() or log/event.Append()TestNoExecOutsideExecPkgexec.Command outside internal/exec/ Add command to internal/exec/<domain>/TestNoCmdPrintOutsideWritecmd.Print* outside internal/write/ Add output helper to internal/write/<domain>/TestNoRawPermissions Octal literals (0644, 0755) Use config/fs.PermFile, config/fs.PermExec, etc. TestNoErrorsAserrors.As() Use errors.AsType() (generic, Go 1.23+) TestNoStringConcatPathsdir + \"/\" + file Use filepath.Join(dir, file)","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#general-fix-workflow","level":2,"title":"General Fix Workflow","text":"

    When an audit test fails:

    1. Read the error message. It includes file:line and a description of the violation.
    2. Find the matching section above. The test name maps directly to a section.
    3. Apply the pattern. Most fixes are mechanical: extract to the right package, rename a variable, or replace a literal with a constant.
    4. Run make test before committing. Audit tests run as part of go test ./internal/audit/.
    5. Don't add allowlist entries as a first resort. Fix the code. Allowlists exist only for genuinely unfixable cases (test-only exports, config packages that are definitionally exempt).
    ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/comparison/","level":1,"title":"Tool Ecosystem","text":"","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#high-level-mental-model","level":2,"title":"High-Level Mental Model","text":"

    Many tools help AI think.

    ctx helps AI remember.

    • Not by storing thoughts,
    • but by preserving intent.
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#how-ctx-differs-from-similar-tools","level":2,"title":"How ctx Differs from Similar Tools","text":"

    There are many tools in the AI ecosystem that touch parts of the context problem:

    • Some manage prompts.
    • Some retrieve data.
    • Some provide runtime context objects.
    • Some offer enterprise platforms.

    ctx focuses on a different layer entirely.

    This page explains where ctx fits, and where it intentionally does not.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#the-core-distinction","level":2,"title":"The Core Distinction","text":"

    Most tools treat context as input.

    ctx treats context as infrastructure.

    That single difference explains nearly all of ctx's design choices.

    Question Most tools ctx Where does context live? In prompts or APIs In files How long does it last? One request / one session Across time Who can read it? The model Humans and tools How is it updated? Implicitly Explicitly Is it inspectable? Rarely Always","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#prompt-management-tools","level":2,"title":"Prompt Management Tools","text":"

    Examples include:

    • prompt templates;
    • reusable system prompts;
    • prompt libraries;
    • prompt versioning tools.

    These tools help you start a session.

    They do not help you continue one.

    Prompt tools:

    • inject text at session start;
    • are ephemeral by design;
    • do not evolve with the project.

    ctx:

    • persists knowledge over time;
    • accumulates decisions and learnings;
    • makes the context part of the repository itself.

    Prompt tooling and ctx are complementary; not competing. Yet, they operate in different layers.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#retrieval-augmented-generation-rag","level":2,"title":"Retrieval-Augmented Generation (RAG)","text":"

    RAG systems typically:

    • index documents
    • embed text
    • retrieve chunks dynamically at runtime

    They are excellent for:

    • large knowledge bases
    • static documentation
    • reference material

    RAG answers questions like:

    \"What information might be relevant right now?\"

    ctx answers a different question:

    \"What have we already decided, learned, or committed to?\"

    Here are some key differences:

    RAG ctx Statistical relevance Intentional relevance Embedding-based File-based Opaque retrieval Explicit structure Runtime query Persistent memory

    ctx does not replace RAG. Instead, it defines a persistent context layer that RAG can optionally augment.

    RAG belongs to the data plane; ctx defines the context control plane.

    It focuses on project memory, not knowledge search.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#agent-frameworks","level":2,"title":"Agent Frameworks","text":"

    Agent frameworks often provide:

    • task loops
    • tool orchestration
    • planner/executor patterns
    • autonomous iteration

    These systems are powerful, but they typically assume that:

    • memory is external
    • context is injected
    • state is transient

    Agent frameworks answer:

    \"How should the agent act?\"

    ctx answers:

    \"What should the agent remember?\"

    Without persistent context, agents tend to:

    • rediscover decisions
    • repeat mistakes
    • lose architectural intent

    This is why ctx pairs well with autonomous loop workflows:

    • The loop provides iteration
    • ctx provides continuity

    Together, loops become cumulative instead of forgetful.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#sdk-level-context-objects","level":2,"title":"SDK-Level Context Objects","text":"

    Some SDKs expose \"context\" objects that exist:

    • inside a process
    • during a request
    • for the lifetime of a call chain

    These are extremely useful and completely different.

    SDK context objects:

    • are in-memory
    • disappear when the process ends
    • are not shared across sessions

    ctx:

    • survives process restarts
    • survives new chats
    • survives new days

    They share a name, not a purpose.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#enterprise-context-platforms","level":2,"title":"Enterprise Context Platforms","text":"

    Enterprise platforms often provide:

    • centralized context services
    • dashboards
    • access control
    • organizational knowledge layers

    These tools are designed for:

    • teams
    • governance
    • compliance
    • managed environments

    ctx is intentionally:

    • local-first: context lives next to your code, not behind a service boundary.
    • file-based: everything important is a markdown file you can read, diff, grep, and version-control.
    • single-binary core: the context persistence path (init, add, agent, status, drift, load, sync, compact, task, decision, learning, and their siblings) is a single Go binary with no required runtime dependencies. Optional integrations (ctx trace (needs git), ctx serve (needs zensical), the ctx Hub (needs a running hub), Claude Code plugin (needs claude)) are opt-in and each declares its dependency explicitly.
    • CLI-driven: every feature is reachable from the command line and scriptable.
    • developer-controlled: no auto-updating cloud service, no telemetry, no account to sign up for.

    The core ctx binary does not require:

    • a server
    • a database
    • an account
    • a SaaS backend
    • network connectivity (for core operations)

    ctx optimizes for individual and small-team workflows where context should live next to code; not behind a service boundary.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#specific-tool-comparisons","level":2,"title":"Specific Tool Comparisons","text":"

    Users often evaluate ctx against specific tools they already use. These comparisons clarify where responsibilities overlap, where they diverge, and where the tools are genuinely complementary.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#claude-code-memory-anthropic-auto-memory","level":3,"title":"Claude Code Memory / Anthropic Auto-Memory","text":"

    Anthropic's auto-memory is tool-managed memory (L2): the model decides what to remember, stores it automatically, and retrieves it implicitly. ctx is system memory (L3): humans and agents explicitly curate decisions, learnings, and tasks in inspectable files.

    Auto-memory is convenient - you do not configure anything. But it is also opaque: you cannot see what was stored, edit it precisely, or share it across tools. ctx files are plain Markdown in your repository, visible in diffs and code review.

    The two are complementary. ctx can absorb auto-memory as an input source (importing what the model remembered into structured context files) while providing the durable, inspectable layer that auto-memory lacks.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cursorrules-clauderules","level":3,"title":".Cursorrules / .Claude/rules","text":"

    Static rule files (.cursorrules, .claude/rules/) declare conventions: coding style, forbidden patterns, preferred libraries. They are effective for what to do and load automatically at session start.

    ctx adds dimensions that rule files do not cover: architectural decisions with rationale, learnings discovered during development, active tasks, and a constitution that governs agent behavior. Critically, ctx context accumulates - each session can add to it, and token budgeting ensures only the most relevant context is injected.

    Use rule files for static conventions. Use ctx for evolving project memory.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#aider-read-watch","level":3,"title":"Aider --read / --watch","text":"

    Aider's --read flag injects file contents at session start; --watch reloads them on change. The concept is similar to ctx's \"load\" step: make the agent aware of specific files.

    The differences emerge beyond loading. Aider has no persistence model -- nothing the agent learns during a session is written back. There is no token budgeting (large files consume the full context window), no priority ordering across file types, and no structured format for decisions or learnings. ctx provides the full lifecycle: load, accumulate, persist, and budget.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#copilot-workspace","level":3,"title":"Copilot @Workspace","text":"

    GitHub Copilot's @workspace performs workspace-wide code search. It answers \"what code exists?\" - finding function definitions, usages, and file structure across the repository.

    ctx answers a different question: \"what did we decide?\" It stores architectural intent, not code indices. Copilot's workspace search and ctx's project memory are orthogonal; one finds code, the other preserves the reasoning behind it.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cline-memory","level":3,"title":"Cline Memory","text":"

    Cline's memory bank stores session context within the Cline extension. The motivation is similar to ctx: help the agent remember across sessions.

    The key difference is portability. Cline memory is tied to Cline - it does not transfer to Claude Code, Cursor, Aider, or any other tool. ctx is tool-agnostic: context lives in plain files that any editor, agent, or script can read. Switching tools does not mean losing memory.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-a-good-fit","level":2,"title":"When ctx Is a Good Fit","text":"

    ctx works best when:

    • you want AI work to compound over time;
    • architectural decisions matter;
    • context must be inspectable;
    • humans and AI must share the same source of truth;
    • Git history should include why, not just what.
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-not-the-right-tool","level":2,"title":"When ctx Is Not the Right Tool","text":"

    ctx is probably not what you want if:

    • you only need one-off prompts;
    • you rely exclusively on RAG;
    • you want autonomous agents without a human-readable state;
    • you require centralized enterprise control;
    • you want black-box memory systems,

    These are valid goals; just different ones.

    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#further-reading","level":2,"title":"Further Reading","text":"
    • You Can't Import Expertise: why project-specific context matters more than generic best practices
    ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/design-invariants/","level":1,"title":"Invariants","text":"","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-system-explains-itself","level":1,"title":"The System Explains Itself","text":"

    These are the properties that must hold for any valid ctx implementation.

    • These are not features.
    • These are constraints.

    A change that violates an invariant is a category error, not an improvement.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#cognitive-state-tiers","level":2,"title":"Cognitive State Tiers","text":"

    ctx distinguishes between three forms of state:

    • Authoritative state: Versioned, inspectable artifacts that define intent and survive time.
    • Delivery views: Deterministic assemblies of the authoritative state for a specific budget or workflow.
    • Ephemeral working state: Local, transient, or sensitive data that assists interaction but does not define system truth.

    The invariants below apply primarily to the authoritative cognitive state.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#1-cognitive-state-is-explicit","level":2,"title":"1. Cognitive State Is Explicit","text":"

    All authoritative context lives in artifacts that can be inspected, reviewed, and versioned.

    If something is important, it must exist as a file: Not only in a prompt, a chat, or a model's hidden memory.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#2-assembly-is-reproducible","level":2,"title":"2. Assembly Is Reproducible","text":"

    Given the same:

    • repository state,
    • configuration,
    • and inputs,

    context assembly produces the same result.

    Heuristics may rank or filter for delivery under constraints.

    They do not alter the authoritative state.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#3-the-authoritative-state-is-human-readable","level":2,"title":"3. The Authoritative State Is Human-Readable","text":"

    The authoritative cognitive state must be stored in formats that a human can:

    • read,
    • diff,
    • review,
    • and edit directly.

    Sensitive working memory may be encrypted at rest. However, encryption must not become the only representation of authoritative knowledge.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#4-artifacts-outlive-sessions","level":2,"title":"4. Artifacts Outlive Sessions","text":"

    Sessions are transient.

    Knowledge persists.

    Reasoning, decisions, and outcomes must remain available after the interaction that produced them has ended.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#5-authority-is-user-defined","level":2,"title":"5. Authority Is User-Defined","text":"

    What enters the authoritative context is an explicit human decision.

    Models may suggest.

    Automation may assist.

    Selection is never implicit.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#6-operation-is-local-first","level":2,"title":"6. Operation Is Local-First","text":"

    The core system must function without requiring network access or a remote service.

    External systems may extend ctx.

    They must not be required for its operation.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#7-versioning-is-the-memory-model","level":2,"title":"7. Versioning Is the Memory Model","text":"

    The evolution of the authoritative cognitive state must be:

    • preserved,
    • inspectable,
    • and branchable.

    Ephemeral and sensitive working state may use different retention and diff strategies by design.

    Understanding includes understanding how we arrived here.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#8-structure-enables-scale","level":2,"title":"8. Structure Enables Scale","text":"

    Unstructured accumulation is not memory.

    Authoritative cognitive state must have a defined layout that:

    • communicates intent,
    • supports navigation,
    • and prevents drift.
    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#9-verification-is-the-scoreboard","level":2,"title":"9. Verification Is the Scoreboard","text":"

    Claims without recorded outcomes are noise.

    Reality (observed and captured) is the only signal that compounds.

    This invariant defines a required direction:

    The authoritative state must be able to record expectation and result.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#10-capture-once-reuse-indefinitely","level":2,"title":"10. Capture Once, Reuse Indefinitely","text":"

    Work that has already produced understanding must not be re-derived from scratch.

    Explored paths, rejected options, and validated conclusions are permanent assets.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#11-policies-are-encoded-not-remembered","level":2,"title":"11. Policies Are Encoded, Not Remembered","text":"

    Alignment must not depend on recall or goodwill.

    Constraints that matter must exist in machine-readable form and participate in context assembly.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#12-the-system-explains-itself","level":2,"title":"12. The System Explains Itself","text":"

    From the repository state alone it must be possible to determine:

    • what was authoritative,
    • what constraints applied.

    Delivery views may be optimized.

    They must not become the only explanation.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#non-goals","level":1,"title":"Non-Goals","text":"

    To avoid category errors, ctx does not attempt to be:

    • a skill,
    • a prompt management tool,
    • a chat history viewer,
    • an autonomous agent runtime,
    • a vector database,
    • a hosted memory service.

    Such systems may integrate with ctx.

    They do not define it.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#implications-for-contributions","level":1,"title":"Implications for Contributions","text":"

    Valid contributions:

    • strengthen an invariant,
    • reduce the cost of maintaining an invariant,
    • or extend the system without violating invariants.

    Invalid contributions:

    • introduce hidden authoritative state,
    • replace reproducible assembly with non-reproducible behavior,
    • make core operation depend on external services,
    • reduce human inspectability of authoritative state,
    • or bypass explicit user authority over what becomes authoritative.
    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-contract","level":1,"title":"The Contract","text":"

    Everything else (commands, skills, layouts, integrations, optimizations) is an implementation detail.

    These invariants are the system.

    ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/scratchpad/","level":1,"title":"Scratchpad","text":"","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#what-is-ctx-scratchpad","level":2,"title":"What Is ctx Scratchpad?","text":"

    A one-liner scratchpad, encrypted at rest, synced via git.

    Quick notes that don't fit decisions, learnings, or tasks: reminders, intermediate values, sensitive tokens, working memory during debugging. Entries are numbered, reorderable, and persist across sessions.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#encrypted-by-default","level":2,"title":"Encrypted by Default","text":"

    Scratchpad entries are encrypted with AES-256-GCM before touching the disk.

    Component Path Git status Encryption key ~/.ctx/.ctx.key User-level, 0600 permissions Encrypted data .context/scratchpad.enc Committed

    The key is generated automatically during ctx init (256-bit via crypto/rand) and stored at ~/.ctx/.ctx.key. One key per machine, shared across all projects.

    The ciphertext format is [12-byte nonce][ciphertext+tag]. No external dependencies: Go stdlib only.

    Because the key is .gitignored and the data is committed, you get:

    • At-rest encryption: the .enc file is opaque without the key
    • Git sync: push/pull the encrypted file like any other tracked file
    • Key separation: the key never leaves the machine unless you copy it
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#commands","level":2,"title":"Commands","text":"Command Purpose ctx pad List all entries (numbered 1-based) ctx pad show N Output raw text of entry N (no prefix, pipe-friendly) ctx pad add \"text\" Append a new entry ctx pad rm ID [ID...] Remove entries by stable ID (supports ranges: 3-5) ctx pad edit N \"text\" Replace entry N with new text ctx pad edit N --append \"text\" Append text to the end of entry N ctx pad edit N --prepend \"text\" Prepend text to the beginning of entry N ctx pad edit N --tag tagname Add a tag to entry N ctx pad add TEXT --file PATH Ingest a file as a blob entry (TEXT is the label) ctx pad show N --out PATH Write decoded blob content to a file ctx pad normalize Reassign entry IDs as 1..N ctx pad mv N M Move entry from position N to position M ctx pad resolve Show both sides of a merge conflict for resolution ctx pad import FILE Bulk-import lines from a file (or stdin with -) ctx pad import --blob DIR Import directory files as blob entries ctx pad export [DIR] Export all blob entries to a directory as files ctx pad merge FILE... Merge entries from other scratchpad files into current ctx pad --tag TAG List entries filtered by tag (prefix with ~ to exclude) ctx pad tags List all tags with counts ctx pad tags --json List all tags with counts as JSON

    All commands decrypt on read, operate on plaintext in memory, and re-encrypt on write. The key file is never printed to stdout.

    For blob entries, --append, --prepend, and --tag modify the label while preserving the blob data.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#examples","level":3,"title":"Examples","text":"
    # Add a note\nctx pad add \"check DNS propagation after deploy\"\n\n# List everything\nctx pad\n#   1. check DNS propagation after deploy\n#   2. staging API key: sk-test-abc123\n\n# Show raw text (for piping)\nctx pad show 2\n# sk-test-abc123\n\n# Compose entries\nctx pad edit 1 --append \"$(ctx pad show 2)\"\n\n# Reorder\nctx pad mv 2 1\n\n# Clean up (IDs are stable; they don't shift when entries are deleted)\nctx pad rm 2\n
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#tags","level":2,"title":"Tags","text":"

    Entries can contain #word tags for lightweight categorization. Tags are convention-based: any #word token in an entry's text is a tag. No special syntax to add or remove them; use the existing add and edit commands.

    # Add tagged entries\nctx pad add \"check DNS propagation #later\"\nctx pad add \"deploy hotfix #urgent\"\nctx pad add \"review PR #later #ci\"\n\n# Filter by tag\nctx pad --tag later\n#   1. check DNS propagation #later\n#   3. review PR #later #ci\n\n# Exclude a tag\nctx pad --tag ~later\n#   2. deploy hotfix #urgent\n\n# Multiple filters (AND logic)\nctx pad --tag later --tag ci\n#   3. review PR #later #ci\n\n# List all tags with counts\nctx pad tags\n# ci       1\n# later    2\n# urgent   1\n\n# JSON output\nctx pad tags --json\n# [{\"tag\":\"ci\",\"count\":1},{\"tag\":\"later\",\"count\":2},{\"tag\":\"urgent\",\"count\":1}]\n\n# Add a tag to an existing entry\nctx pad edit 1 --tag done\n\n# Combine with other operations\nctx pad edit 1 --append \"checked\" --tag done\n\n# Remove a tag (replace entry text without the tag)\nctx pad edit 1 \"check DNS propagation\"\n

    Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry. Use ctx pad normalize to reassign IDs as 1..N if gaps bother you. Tags are case-sensitive and support letters, digits, hyphens, and underscores (#high-priority, #v2, #my_tag).

    For blob entries, tags are extracted from the label only.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#bulk-import-and-export","level":2,"title":"Bulk Import and Export","text":"

    Import lines from a file in bulk (each non-empty line becomes an entry):

    # Import from a file\nctx pad import notes.txt\n\n# Import from stdin\ngrep TODO *.go | ctx pad import -\n

    Export all blob entries to a directory as files:

    # Export to a directory\nctx pad export ./ideas\n\n# Preview without writing\nctx pad export --dry-run\n\n# Overwrite existing files\nctx pad export --force ./backup\n
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#merging-scratchpads","level":2,"title":"Merging Scratchpads","text":"

    Combine entries from other scratchpad files into your current pad. Useful when merging work from parallel worktrees, other machines, or teammates:

    # Merge from a worktree's encrypted scratchpad\nctx pad merge worktree/.context/scratchpad.enc\n\n# Merge from multiple sources (encrypted and plaintext)\nctx pad merge pad-a.enc notes.md\n\n# Merge a foreign encrypted pad using its key\nctx pad merge --key /other/.ctx.key foreign.enc\n\n# Preview without writing\nctx pad merge --dry-run pad-a.enc pad-b.md\n

    Each input file is auto-detected as encrypted or plaintext: decryption is attempted first, and on failure the file is parsed as plain text. Entries are deduplicated by exact content, so running merge twice with the same file is safe.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#file-blobs","level":2,"title":"File Blobs","text":"

    The scratchpad can store small files (up to 64 KB) as blob entries. Files are base64-encoded and stored with a human-readable label.

    # Ingest a file: first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# Listing shows label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n\n# Extract to a file\nctx pad show 2 --out ./recovered.yaml\n\n# Or print decoded content to stdout\nctx pad show 2\n

    Blob entries are encrypted identically to text entries. The internal format is label:::base64data: You never need to construct this manually.

    Constraint Value Max file size (pre-encoding) 64 KB Storage format label:::base64(content) Display label [BLOB] in listings

    When Should You Use Blobs

    Blobs are for small files you want encrypted and portable: config snippets, key fragments, deployment manifests, test fixtures. For anything larger than 64 KB, use the filesystem directly.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#using-with-ai","level":2,"title":"Using with AI","text":"

    Use Natural Language

    As in many ctx features, the ctx scratchpad can also be used with natural langauge. You don't have to memorize the CLI commands.

    CLI gives you \"precision\", whereas natural language gives you flow.

    The /ctx-pad skill maps natural language to ctx pad commands. You don't need to remember the syntax:

    You say What happens \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"show my scratchpad\" ctx pad \"delete the third entry\" ctx pad rm 3 \"update entry 2 to include the new endpoint\" ctx pad edit 2 \"...\" \"move entry 4 to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./backup\" ctx pad export ./backup \"merge the scratchpad from the worktree\" ctx pad merge worktree/.context/scratchpad.enc

    The skill handles the translation. You describe what you want in plain English; the agent picks the right command.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#worktrees","level":2,"title":"Worktrees","text":"

    The encryption key lives at ~/.ctx/.ctx.key (outside the project directory). Because all worktrees on the same machine share this path, ctx pad works in worktrees automatically - no special setup needed.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#key-distribution","level":2,"title":"Key Distribution","text":"

    The encryption key (~/.ctx/.ctx.key) stays on the machine where it was generated. ctx never transmits it.

    To share the scratchpad across machines:

    1. Copy the key manually: scp, USB drive, password manager.
    2. Push/pull the .enc file via git as usual.
    3. Both machines can now read and write the same scratchpad.

    Never Commit the Key

    The key is .gitignored by default. If you override this, anyone with repo access can decrypt your scratchpad.

    Treat the key like an SSH private key.

    See the Syncing Scratchpad Notes Across Machines recipe for a step-by-step walkthrough.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#plaintext-override","level":2,"title":"Plaintext Override","text":"

    For projects where encryption is unnecessary, disable it in .ctxrc:

    scratchpad_encrypt: false\n

    In plaintext mode:

    • Entries are stored in .context/scratchpad.md instead of .enc.
    • No key is generated or required.
    • All ctx pad commands work identically.
    • The file is human-readable and diffable.

    When Should You Use Plaintext

    Plaintext mode is useful for non-sensitive projects, solo work where encryption adds friction, or when you want scratchpad entries visible in git diff.

    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#when-should-you-use-scratchpad-versus-context-files","level":2,"title":"When Should You Use Scratchpad versus Context Files","text":"Use case Where it goes Temporary reminders (\"check X after deploy\") Scratchpad Working values during debugging Scratchpad Sensitive tokens or API keys (short-term) Scratchpad Quick notes that don't fit anywhere else Scratchpad Items that are not directly relevant to the project Scratchpad Things that you want to keep near, but also hidden Scratchpad Work items with completion tracking TASKS.md Trade-offs with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

    Rule of thumb:

    • If it needs structure or will be referenced months later, use a context file (i.e. DECISIONS.md, LEARNINGS.md, TASKS.md).
    • If it is working memory for the current session or week, use the scratchpad.
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#see-also","level":2,"title":"See Also","text":"
    • Syncing Scratchpad Notes Across Machines: Key distribution, push/pull workflow, merge conflict resolution
    • Using the Scratchpad: Natural language examples, blob workflow, when to use scratchpad vs context files
    • Context Files: Format and conventions for all .context/ files
    • Security: Trust model and permission hygiene
    ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/session-journal/","level":1,"title":"Session Journal","text":"

    Important Security Note

    Session journals contain sensitive data such as file contents, commands, API keys, internal discussions, error messages with stack traces, and more.

    The .context/journal-site/ and .context/journal-obsidian/ directories MUST be .gitignored.

    • DO NOT host your journal publicly.
    • DO NOT commit your journal files to version control.
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#browse-your-session-history","level":2,"title":"Browse Your Session History","text":"

    ctx's Session Journal turns your AI coding sessions into a browsable, searchable, and editable archive.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#quick-start","level":2,"title":"Quick Start","text":"

    After using ctx for a couple of sessions, you can generate a journal site with:

    # Import all sessions to markdown\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

    Then open http://localhost:8000 to browse your sessions.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#what-you-get","level":2,"title":"What You Get","text":"

    The Session Journal gives you:

    • Browsable history: Navigate through all your AI sessions by date
    • Full conversations: See every message, tool use, and result
    • Token usage: Track how many tokens each session consumed
    • Search: Find sessions by content, project, or date
    • Dark mode: Easy on the eyes for late-night archaeology

    Each session page includes the following sections:

    Section Content Metadata Date, time, duration, model, project, git branch Summary Space for your notes (editable) Tool Usage Which tools were used and how often Conversation Full transcript with timestamps","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#1-import-sessions","level":3,"title":"1. Import Sessions","text":"
    # Import all sessions from current project (only new files)\nctx journal import --all\n\n# Import sessions from all projects\nctx journal import --all --all-projects\n\n# Import a specific session by ID (always writes)\nctx journal import abc123\n\n# Preview what would be imported\nctx journal import --all --dry-run\n\n# Re-import existing (regenerates conversation, preserves YAML frontmatter)\nctx journal import --all --regenerate\n\n# Discard frontmatter during regeneration\nctx journal import --all --regenerate --keep-frontmatter=false -y\n

    Imported sessions go to .context/journal/ as editable Markdown files.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#2-generate-the-site","level":3,"title":"2. Generate the Site","text":"
    # Generate site structure\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

    The site is generated in .context/journal-site/ by default.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#3-browse-and-search","level":3,"title":"3. Browse and Search","text":"

    Open http://localhost:8000 after running --serve.

    • Use the sidebar to navigate by date
    • Use search (/ key) to find specific content
    • Click any session to see the full conversation
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#editing-sessions","level":2,"title":"Editing Sessions","text":"

    Imported sessions are plain Markdown in .context/journal/. You can:

    • Add summaries: Fill in the ## Summary section
    • Add notes: Insert your own commentary anywhere
    • Highlight key moments: Use Markdown formatting
    • Delete noise: Remove irrelevant tool outputs

    After editing, regenerate the site:

    ctx journal site --serve\n
    Safe by Default

    Running ctx journal import --all only imports new sessions. Existing files are skipped entirely (your edits and enrichments are never touched).

    Use --regenerate to re-import existing files. Conversation content is regenerated, but YAML frontmatter (topics, type, outcome, etc.) is preserved. You'll be prompted before any existing files are overwritten; add -y to skip the prompt.

    Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

    Locked entries (via ctx journal lock) are always skipped, regardless of flags. If you prefer to add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#large-sessions","level":2,"title":"Large Sessions","text":"

    Sessions with many messages (200+) are automatically split into multiple parts for better browser performance. Navigation links connect the parts:

    session-abc123.md      (Part 1 of 3)\nsession-abc123-p2.md   (Part 2 of 3)\nsession-abc123-p3.md   (Part 3 of 3)\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#suggestion-sessions","level":2,"title":"Suggestion Sessions","text":"

    Claude Code generates \"suggestion\" sessions for auto-complete prompts. These are separated in the index under a \"Suggestions\" section to keep your main session list focused.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enriching-journal-entries","level":2,"title":"Enriching Journal Entries","text":"

    Raw imported sessions contain basic metadata (date, time, project) but lack the structured information needed for effective search, filtering, and analysis. Journal enrichment adds semantic metadata that transforms a flat archive into a searchable knowledge base.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#why-enrich","level":3,"title":"Why Enrich?","text":"

    Without enrichment, you have timestamps and raw conversations. With enrichment:

    • Find sessions by topic: \"Show me all auth-related sessions\"
    • Filter by outcome: \"What did I abandon vs complete?\"
    • Track technology usage: \"When did I last work with PostgreSQL?\"
    • Identify key files: Jump directly to the files discussed
    • Get summaries: Understand what happened without reading transcripts
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-frontmatter-schema","level":3,"title":"The Frontmatter Schema","text":"

    Enriched entries begin with YAML frontmatter:

    ---\ntitle: \"Implement caching layer\"\ndate: 2026-01-27\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/cache/memory.go\n---\n
    Field Required Description title Yes Descriptive title (not the session slug) date Yes Session date (YYYY-MM-DD) type Yes Session type (see below) outcome Yes How the session ended (see below) topics No Subject areas discussed technologies No Languages, databases, frameworks libraries No Specific packages or libraries used key_files No Important files created or modified

    Type values:

    Type When to use feature Building new functionality bugfix Fixing broken behavior refactor Restructuring without behavior change exploration Research, learning, experimentation debugging Investigating issues documentation Writing docs, comments, README

    Outcome values:

    Outcome Meaning completed Goal achieved partial Some progress, work continues abandoned Stopped pursuing this approach blocked Waiting on external dependency","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-ctx-journal-enrich","level":3,"title":"Using /ctx-journal-enrich","text":"

    The /ctx-journal-enrich skill automates enrichment by analyzing conversation content and proposing metadata.

    Invoke by session identifier:

    /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-01-24\n/ctx-journal-enrich 76fe2ab9\n

    The skill will:

    1. Check if locked - locked entries are skipped (same as export);
    2. Find the matching journal file;
    3. Read and analyze the conversation;
    4. Propose frontmatter (type, topics, outcome, technologies);
    5. Generate a 2-3 sentence summary;
    6. Extract decisions, learnings, and tasks mentioned;
    7. Show a diff and ask for confirmation before writing.
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#before-and-after","level":3,"title":"Before and After","text":"

    Before enrichment:

    # twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\n[Add your summary of this session]\n\n## Conversation\n...\n

    After enrichment:

    ---\ntitle: \"Add Redis caching to API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n\n# twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\nImplemented Redis-based caching middleware for frequently accessed API endpoints.\nAdded cache invalidation on writes and configurable TTL per route. Reduced\n the average response time from 200ms to 15ms for cached routes.\n\n## Decisions\n\n* Used Redis over in-memory cache for horizontal scaling\n* Chose per-route TTL configuration over global setting\n\n## Learnings\n\n* Redis WATCH command prevents race conditions during cache invalidation\n\n## Conversation\n...\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enrichment-and-site-generation","level":3,"title":"Enrichment and Site Generation","text":"

    The journal site generator uses enriched metadata for better organization:

    • Titles appear in navigation instead of slugs
    • Summaries provide context in the index
    • Topics enable filtering (when using search)
    • Types allow grouping by work category

    Future improvements will add topic-based navigation and outcome filtering to the generated site.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#batch-enrichment","level":3,"title":"Batch Enrichment","text":"

    To enrich multiple sessions, process them one at a time:

    # List unenriched sessions (those without frontmatter)\ngrep -L \"^---$\" .context/journal/*.md | head -10\n

    Then run /ctx-journal-enrich on each. Enrichment is intentionally interactive to ensure accuracy.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#obsidian-vault-export","level":2,"title":"Obsidian Vault Export","text":"

    If you use Obsidian for knowledge management, you can export your journal as an Obsidian vault instead of (or alongside) the static site:

    ctx journal obsidian\n

    This generates a vault in .context/journal-obsidian/ with:

    • Wikilinks ([[target|display]]) instead of Markdown links
    • MOC pages (Map of Content) for topics, key files, and session types
    • Related sessions footer per entry: links to entries sharing the same topics
    • Transformed frontmatter: topics renamed to tags (Obsidian-recognized), aliases added from title for search
    • Graph-optimized structure: MOC hubs and cross-linked entries create dense graph connectivity

    To use: open the output directory in Obsidian (\"Open folder as vault\").

    # Custom output directory\nctx journal obsidian --output ~/vaults/ctx-journal\n

    Static Site vs Obsidian Vault

    Use ctx journal site when you want a web-browsable archive with search and dark mode. Use ctx journal obsidian when you want graph view, backlinks, and tag-based navigation inside Obsidian. Both use the same enriched source entries: you can generate both.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#full-pipeline","level":2,"title":"Full Pipeline","text":"

    The complete journal workflow has four stages. Each is idempotent: safe to re-run, and stages skip already-processed entries.

    import → enrich → rebuild\n
    Stage Command / Skill What it does Skips if Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) Enrich /ctx-journal-enrich Adds frontmatter, summaries, topics Frontmatter already present Rebuild ctx journal site --build Generates static HTML site -- Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks --

    One-Command Pipeline

    /ctx-journal-enrich-all handles import automatically - it detects unimported sessions and imports them before enriching. You only need to run ctx journal site --build afterward.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-make-journal","level":3,"title":"Using make journal","text":"

    If your project includes Makefile.ctx (deployed by ctx init), the first and last stages are combined:

    make journal           # import + rebuild\n

    After it runs, it reminds you to enrich in Claude Code:

    Next steps (in Claude Code):\n  /ctx-journal-enrich-all # imports if needed + adds metadata per entry\n\nThen re-run: make journal\n

    Rendering Issues?

    If individual entries have rendering problems (broken fences, malformed lists), check the programmatic normalization in the import pipeline. Most cases are handled automatically during ctx journal import.

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#tips","level":2,"title":"Tips","text":"

    Daily workflow:

    # Import, browse, then enrich in Claude Code\nmake journal && make journal-serve\n# Then in Claude Code: /ctx-journal-enrich <session>\n

    After a productive session:

    # Import just that session and add notes\nctx journal import <session-id>\n# Edit .context/journal/<session>.md\n# Regenerate: ctx journal site\n

    Searching across all sessions:

    # Use grep on the journal directory\ngrep -r \"authentication\" .context/journal/\n

    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#requirements","level":2,"title":"Requirements","text":"Use pipx for zensical

    pip install zensical may install a non-functional stub on system Python. Using venv has other issues too.

    These issues especially happen on Mac OSX.

    Use pipx install zensical, which creates an isolated environment and handles Python version management automatically.

    The journal site uses zensical for static site generation:

    pipx install zensical\n
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#see-also","level":2,"title":"See Also","text":"
    • ctx journal: Session discovery and listing
    • ctx journal site: Static site generation
    • ctx journal obsidian: Obsidian vault export
    • Context Files: The .context/ directory structure
    ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/skills/","level":1,"title":"Skills","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skills","level":2,"title":"Skills","text":"

    Skills are slash commands that run inside your AI assistant (e.g., /ctx-next), as opposed to CLI commands that run in your terminal (e.g., ctx status).

    Skills give your agent structured workflows: It knows what to read, what to run, and when to ask. Most wrap one or more ctx CLI commands with opinionated behavior on top.

    Skills Are Best Used Conversationally

    The beauty of ctx is that it's designed to be intuitive and conversational, allowing you to interact with your AI assistant naturally. That's why you don't have to memorize many of these skills.

    See the Prompting Guide for natural-language triggers that invoke these skills conversationally.

    However, when you need a more precise control, you have the option to invoke the relevant skills directly.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#all-skills","level":2,"title":"All Skills","text":"Skill Description Type /ctx-remember Recall project context and present structured readback user-invocable /ctx-wrap-up End-of-session context persistence ceremony user-invocable /ctx-status Show context summary with interpretation user-invocable /ctx-agent Load full context packet for AI consumption user-invocable /ctx-next Suggest 1-3 concrete next actions with rationale user-invocable /ctx-commit Commit with integrated context persistence user-invocable /ctx-reflect Pause and reflect on session progress user-invocable /ctx-task-add Add actionable task to TASKS.md user-invocable /ctx-decision-add Record architectural decision with rationale user-invocable /ctx-learning-add Record gotchas and lessons learned user-invocable /ctx-convention-add Record coding convention for consistency user-invocable /ctx-archive Archive completed tasks from TASKS.md user-invocable /ctx-pad Manage encrypted scratchpad entries user-invocable /ctx-history Browse and import AI session history user-invocable /ctx-journal-enrich Enrich single journal entry with metadata user-invocable /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich user-invocable /ctx-blog Generate blog post draft from project activity user-invocable /ctx-blog-changelog Generate themed blog post from a commit range user-invocable /ctx-consolidate Consolidate redundant learnings or decisions user-invocable /ctx-drift Detect and fix context drift user-invocable /ctx-prompt Apply, list, and manage saved prompt templates user-invocable /ctx-prompt-audit Analyze prompting patterns for improvement user-invocable /ctx-link-check Audit docs for dead internal and external links user-invocable /ctx-permission-sanitize Audit Claude Code permissions for security risks user-invocable /ctx-brainstorm Structured design dialogue before implementation user-invocable /ctx-spec Scaffold a feature spec from a project template user-invocable /ctx-plan-import Import Claude Code plan files into project specs user-invocable /ctx-implement Execute a plan step-by-step with verification user-invocable /ctx-loop Generate autonomous loop script user-invocable /ctx-worktree Manage git worktrees for parallel agents user-invocable /ctx-architecture Build and maintain architecture maps user-invocable /ctx-architecture-failure-analysis Adversarial failure analysis for correctness bugs user-invocable /ctx-remind Manage session-scoped reminders user-invocable /ctx-doctor Troubleshoot ctx behavior with health checks and event analysis user-invocable /ctx-skill-audit Audit skills against Anthropic prompting best practices user-invocable /ctx-skill-create Create, improve, and test skills user-invocable /ctx-pause Pause context hooks for this session user-invocable /ctx-resume Resume context hooks after a pause user-invocable","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-lifecycle","level":2,"title":"Session Lifecycle","text":"

    Skills for starting, running, and ending a productive session.

    Session Ceremonies

    Two skills in this group are ceremony skills: /ctx-remember (session start) and /ctx-wrap-up (session end). Unlike other skills that work conversationally, these should be invoked as explicit slash commands for completeness. See Session Ceremonies.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remember","level":3,"title":"/ctx-remember","text":"

    Recall project context and present a structured readback. Ceremony skill: invoke explicitly at session start.

    Wraps: ctx agent --budget 4000, ctx journal source --limit 3, reads TASKS.md, DECISIONS.md, LEARNINGS.md

    See also: Session Ceremonies, The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-status","level":3,"title":"/ctx-status","text":"

    Show context summary (files, token budget, tasks, recent activity) with interpreted suggestions.

    Wraps: ctx status [--verbose] [--json]

    See also: The Complete Session, ctx status CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-agent","level":3,"title":"/ctx-agent","text":"

    Load the full context packet optimized for AI consumption. Also runs automatically via the PreToolUse hook with cooldown.

    Wraps: ctx agent [--budget] [--format] [--cooldown] [--session]

    See also: The Complete Session, ctx agent CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-next","level":3,"title":"/ctx-next","text":"

    Suggest 1-3 concrete next actions ranked by priority, momentum, and unblocked status.

    Wraps: reads TASKS.md, ctx journal source --limit 3

    See also: The Complete Session, Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-commit","level":3,"title":"/ctx-commit","text":"

    Commit code with integrated context persistence: pre-commit checks, staged files, Co-Authored-By trailer, and a post-commit prompt to capture decisions and learnings.

    Wraps: git add, git commit, optionally chains to /ctx-decision-add and /ctx-learning-add

    See also: The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-reflect","level":3,"title":"/ctx-reflect","text":"

    Pause and reflect on session progress. Walks through a checklist of learnings, decisions, task completions, and session notes to persist.

    Wraps: chains to ctx add learning, ctx add decision, manual TASKS.md updates

    See also: The Complete Session, Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-wrap-up","level":3,"title":"/ctx-wrap-up","text":"

    End-of-session context persistence ceremony. Gathers signal from git diff, recent commits, and conversation themes. Proposes candidates (learnings, decisions, conventions, tasks) with complete structured fields for user approval, then persists via ctx add. Offers /ctx-commit if uncommitted changes remain. Ceremony skill: invoke explicitly at session end.

    Wraps: git diff --stat, git log, ctx add learning, ctx add decision, ctx add convention, ctx add task, chains to /ctx-commit

    See also: Session Ceremonies, The Complete Session

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#context-persistence","level":2,"title":"Context Persistence","text":"

    Skills for recording work artifacts: tasks, decisions, learnings, conventions: into .context/ files.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-task-add","level":3,"title":"/ctx-task-add","text":"

    Add an actionable task with optional priority and phase section.

    Wraps: ctx add task \"description\" [--priority high|medium|low] --session-id ID --branch BR --commit HASH

    See also: Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-decision-add","level":3,"title":"/ctx-decision-add","text":"

    Record an architectural decision with context, rationale, and consequence. Supports Y-statement (lightweight) and full ADR formats.

    Wraps: ctx add decision \"title\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id ID --branch BR --commit HASH

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-learning-add","level":3,"title":"/ctx-learning-add","text":"

    Record a project-specific gotcha, bug, or unexpected behavior. Filters for insights that are searchable, project-specific, and required real effort to discover.

    Wraps: ctx add learning \"title\" --context \"...\" --lesson \"...\" --application \"...\" --session-id ID --branch BR --commit HASH

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-convention-add","level":3,"title":"/ctx-convention-add","text":"

    Record a coding convention that should be standardized across sessions. Targets patterns seen 2-3+ times.

    Wraps: ctx add convention \"rule\" --section \"Name\"

    See also: Persisting Decisions, Learnings, and Conventions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-archive","level":3,"title":"/ctx-archive","text":"

    Archive completed tasks from TASKS.md to a timestamped file in .context/archive/. Preserves phase headers for traceability.

    Wraps: ctx task archive [--dry-run]

    See also: Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#scratchpad","level":2,"title":"Scratchpad","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pad","level":3,"title":"/ctx-pad","text":"

    Manage the encrypted scratchpad: add, remove, edit, and reorder one-liner notes. Encrypted at rest with AES-256-GCM.

    Wraps: ctx pad, ctx pad add, ctx pad rm, ctx pad edit, ctx pad mv, ctx pad import, ctx pad export, ctx pad merge

    See also: Scratchpad, Using the Scratchpad

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#journal-history","level":2,"title":"Journal & History","text":"

    Skills for browsing, exporting, and enriching your AI session history into a structured journal.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-history","level":3,"title":"/ctx-history","text":"

    Browse, inspect, and import AI session history. List recent sessions, show details by slug or ID, and import to .context/journal/.

    Wraps: ctx journal source, ctx journal source --show, ctx journal import

    See also: Browsing and Enriching Past Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich","level":3,"title":"/ctx-journal-enrich","text":"

    Enrich a single journal entry with YAML frontmatter: title, type, outcome, topics, technologies, and summary. Shows diff before writing.

    Wraps: reads and edits .context/journal/*.md files

    See also: Browsing and Enriching Past Sessions, Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich-all","level":3,"title":"/ctx-journal-enrich-all","text":"

    Full journal pipeline: imports unimported sessions first, then batch-enriches all unenriched entries. Filters out short sessions and continuations. Can spawn subagents for large backlogs.

    Wraps: ctx journal import --all + iterates /ctx-journal-enrich

    See also: Browsing and Enriching Past Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#content-creation","level":2,"title":"Content Creation","text":"

    Skills for turning project activity into publishable content.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog","level":3,"title":"/ctx-blog","text":"

    Generate a blog post draft from recent project activity: git history, decisions, learnings, tasks, and journal entries. Requires a narrative arc (problem, approach, outcome).

    Wraps: reads git log, DECISIONS.md, LEARNINGS.md, TASKS.md, journal entries; writes to docs/blog/

    See also: Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog-changelog","level":3,"title":"/ctx-blog-changelog","text":"

    Generate a themed blog post from a commit range. Takes a starting commit and unifying theme, analyzes diffs and journal entries from that period.

    Wraps: git log, git diff --stat; writes to docs/blog/

    See also: Turning Activity into Content

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#auditing-health","level":2,"title":"Auditing & Health","text":"

    Skills for detecting drift, auditing alignment, and improving prompt quality.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-consolidate","level":3,"title":"/ctx-consolidate","text":"

    Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Groups overlapping entries by keyword similarity, presents candidates, and (with user approval) merges groups into denser combined entries. Originals are archived, not deleted.

    Wraps: reads LEARNINGS.md and DECISIONS.md, writes consolidated entries, archives originals, runs ctx reindex

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-drift","level":3,"title":"/ctx-drift","text":"

    Detect and fix context drift: stale paths, missing files, file age staleness, task accumulation, entry count warnings, and constitution violations via ctx drift. Also detects skill drift against canonical templates.

    Wraps: ctx drift [--fix]

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

    Analyze recent prompting patterns to identify vague or ineffective prompts. Reviews 3-5 journal entries and suggests rewrites with positive observations.

    Wraps: reads .context/journal/ entries

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-doctor","level":3,"title":"/ctx-doctor","text":"

    Troubleshoot ctx behavior. Runs structural health checks via ctx doctor, analyzes event log patterns via ctx hook event, and presents findings with suggested actions. The CLI provides the structural baseline; the agent adds semantic analysis of event patterns and correlations.

    Wraps: ctx doctor --json, ctx hook event --json --last 100, ctx remind list, ctx hook message list, reads .ctxrc

    Trigger phrases: \"diagnose\", \"troubleshoot\", \"doctor\", \"health check\", \"why didn't my hook fire?\", \"hooks seem broken\", \"something seems off\"

    Graceful degradation: If event_log is not enabled, the skill still works but with reduced capability. It runs structural checks and notes: \"Enable event_log: true in .ctxrc for hook-level diagnostics.\"

    See also: Troubleshooting, ctx doctor CLI, ctx hook event CLI

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-link-check","level":3,"title":"/ctx-link-check","text":"

    Scan all markdown files under docs/ for broken links. Three passes: internal links (verify file targets exist on disk), external links (HTTP HEAD with timeout, report failures as warnings), and image references. Resolves relative paths, strips anchors before checking, and skips localhost/example URLs.

    Wraps: Glob + Grep to scan, curl for external checks

    Trigger phrases: \"check links\", \"audit links\", \"any broken links?\", \"dead links\"

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-permission-sanitize","level":3,"title":"/ctx-permission-sanitize","text":"

    Audit .claude/settings.local.json for dangerous permissions across four risk categories: hook bypass (Critical), destructive commands (High), config injection vectors (High), and overly broad patterns (Medium). Reports findings by severity and offers specific fix actions with user confirmation.

    Wraps: reads .claude/settings.local.json, edits with confirmation

    Trigger phrases: \"audit permissions\", \"are my permissions safe?\", \"sanitize permissions\", \"check settings\"

    See also: Claude Code Permission Hygiene

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#planning-execution","level":2,"title":"Planning & Execution","text":"

    Skills for structured design, implementation, and parallel agent workflows.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-brainstorm","level":3,"title":"/ctx-brainstorm","text":"

    Transform raw ideas into clear, validated designs through structured dialogue before any implementation begins. Follows a gated process: understand context, clarify the idea (one question at a time), surface non-functional requirements, lock understanding with user confirmation, explore 2-3 design approaches with trade-offs, stress-test the chosen approach, and present the detailed design.

    Wraps: reads DECISIONS.md, relevant source files; chains to /ctx-decision-add for recording design choices

    Trigger phrases: \"let's brainstorm\", \"design this\", \"think through\", \"before we build\", \"what approach should we take?\"

    See also: /ctx-spec

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-spec","level":3,"title":"/ctx-spec","text":"

    Scaffold a feature spec from the project template and walk through each section with the user. Covers: problem, approach, happy path, edge cases, validation rules, error handling, interface, implementation, configuration, testing, and non-goals. Spends extra time on edge cases and error handling.

    Wraps: reads specs/tpl/spec-template.md, writes to specs/, optionally chains to /ctx-task-add

    Trigger phrases: \"spec this out\", \"write a spec\", \"create a spec\", \"design document\"

    See also: /ctx-brainstorm, /ctx-plan-import

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-plan-import","level":3,"title":"/ctx-plan-import","text":"

    Import Claude Code plan files (~/.claude/plans/*.md) into the project's specs/ directory. Lists plans with dates and H1 titles, supports filtering (--today, --since, --all), slugifies headings for filenames, and optionally creates tasks referencing each imported spec.

    Wraps: reads ~/.claude/plans/*.md, writes to specs/, optionally chains to /ctx-task-add

    See also: Importing Claude Code Plans, Tracking Work Across Sessions

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-implement","level":3,"title":"/ctx-implement","text":"

    Execute a multi-step plan with build and test verification at each step. Loads a plan from a file or conversation context, breaks it into atomic steps, and checkpoints after every 3-5 steps.

    Wraps: reads plan file, runs verification commands (go build, go test, etc.)

    See also: Running an Unattended AI Agent

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-loop","level":3,"title":"/ctx-loop","text":"

    Generate a ready-to-run shell script for autonomous AI iteration. Supports Claude Code, Aider, and generic tool templates with configurable completion signals.

    Wraps: ctx loop [--tool] [--prompt] [--max-iterations] [--completion] [--output]

    See also: Autonomous Loops, Running an Unattended AI Agent

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-worktree","level":3,"title":"/ctx-worktree","text":"

    Manage git worktrees for parallel agent development. Create sibling worktrees on dedicated branches, analyze task blast radius for grouping, and tear down with merge.

    Wraps: git worktree add, git worktree list, git worktree remove, git merge

    See also: Parallel Agent Development with Git Worktrees

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture","level":3,"title":"/ctx-architecture","text":"

    Build and maintain architecture maps incrementally. Creates or refreshes ARCHITECTURE.md (succinct project map, loaded at session start) and DETAILED_DESIGN.md (deep per-module reference, consulted on-demand). Coverage is tracked in map-tracking.json so each run extends the map rather than re-analyzing everything.

    Wraps: ctx status, git log, reads source files; writes ARCHITECTURE.md, DETAILED_DESIGN.md, map-tracking.json

    See also: Detecting and Fixing Drift

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture-failure-analysis","level":3,"title":"/ctx-architecture-failure-analysis","text":"

    Adversarial failure analysis that generates falsifiable incident hypotheses against architecture artifacts. Hunts for correctness bugs that survive code review and tests: race conditions, ordering assumptions, cache staleness, error swallowing, ownership gaps, idempotency failures, state machine drift, and scaling cliffs.

    Requires /ctx-architecture artifacts as input. Reads ARCHITECTURE.md, DETAILED_DESIGN*.md, and map-tracking.json, then systematically applies 9 failure categories to every mutation point. Each finding carries an evidence standard (code path, trigger, failure path, silence reason, code evidence), a confidence level, and an explicit risk score. A mandatory challenge phase attempts to disprove each finding before it is accepted.

    Produces .context/DANGER-ZONES.md with ranked findings split into Critical (risk >= 7, silent/cascading) and Elevated tiers.

    Wraps: reads architecture artifacts, source code; writes DANGER-ZONES.md. Optionally uses GitNexus for blast radius and Gemini Search for cross-referencing known failure patterns.

    Relationship:

    Skill Mode /ctx-architecture Map what exists /ctx-architecture-enrich Improve map fidelity /ctx-architecture-failure-analysis Generate falsifiable incident hypotheses","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remind","level":3,"title":"/ctx-remind","text":"

    Manage session-scoped reminders via natural language. Translates user intent (\"remind me to refactor swagger\") into the corresponding ctx remind command. Handles date conversion for --after flags.

    Wraps: ctx remind, ctx remind list, ctx remind dismiss

    See also: Session Reminders

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skill-authoring","level":2,"title":"Skill Authoring","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-audit","level":3,"title":"/ctx-skill-audit","text":"

    Audit one or more skills against Anthropic prompting best practices. Checks audit dimensions: positive framing, motivation, phantom references, examples, subagent guards, scope, and descriptions. Reports findings by severity with concrete fix suggestions.

    Wraps: reads internal/assets/claude/skills/*/SKILL.md or .claude/skills/*/SKILL.md, references anthropic-best-practices.md

    Trigger phrases: \"audit this skill\", \"check skill quality\", \"review the skills\", \"are our skills any good?\"

    See also: /ctx-skill-create, Contributing

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-create","level":3,"title":"/ctx-skill-create","text":"

    Create, improve, and test skills. Guides the full lifecycle: capture intent, interview for edge cases, draft the SKILL.md, test with realistic prompts, review results with the user, and iterate. Applies core principles: the agent is already smart (only add what it does not know), the description is the trigger (make it specific and \"pushy\"), and explain the why instead of rigid directives.

    Wraps: reads/writes .claude/skills/ and internal/assets/claude/skills/

    Trigger phrases: \"create a skill\", \"turn this into a skill\", \"make a slash command\", \"this should be a skill\", \"improve this skill\", \"the skill isn't triggering\"

    See also: Contributing

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-control","level":2,"title":"Session Control","text":"

    Skills for controlling hook behavior during a session.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pause","level":3,"title":"/ctx-pause","text":"

    Pause all context nudge and reminder hooks for the current session. Security hooks still fire. Use for quick investigations or tasks that don't need ceremony overhead.

    Wraps: ctx hook pause

    Trigger phrases: \"pause ctx\", \"pause context\", \"stop the nudges\", \"quiet mode\"

    See also: Pausing Context Hooks

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-resume","level":3,"title":"/ctx-resume","text":"

    Resume context hooks after a pause. Restores normal nudge, reminder, and ceremony behavior. Silent no-op if not paused.

    Wraps: ctx hook resume

    Trigger phrases: \"resume ctx\", \"resume context\", \"turn nudges back on\", \"unpause\"

    See also: Pausing Context Hooks

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#project-specific-skills","level":2,"title":"Project-Specific Skills","text":"

    The ctx plugin ships the skills listed above. Teams can add their own project-specific skills to .claude/skills/ in the project root: These are separate from plugin-shipped skills and are scoped to the project.

    Project-specific skills follow the same format and are invoked the same way.

    Custom skills are not covered in this reference.

    ","path":["Reference","Skills"],"tags":[]},{"location":"reference/versions/","level":1,"title":"Version History","text":"","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#version-history","level":2,"title":"Version History","text":"

    Documentation snapshots for each release.

    Tap the corresponding view docs to view the docs as they were at that release.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#releases","level":2,"title":"Releases","text":"Version Release Date Documentation v0.8.0 2026-03-23 view docs v0.6.0 2026-02-16 view docs v0.3.0 2026-02-07 view docs v0.2.0 2026-02-01 view docs v0.1.2 2026-01-27 view docs v0.1.1 2026-01-26 view docs v0.1.0 2026-01-25 view docs","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v080-the-architecture-release","level":3,"title":"v0.8.0: The Architecture Release","text":"

    MCP server for tool-agnostic AI integration. Memory bridge connecting Claude Code auto-memory to .context/. Complete CLI restructuring into cmd/ + core/ taxonomy. All user-facing strings externalized to YAML. fatih/color removed; two direct dependencies remain.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v060-the-integration-release","level":3,"title":"v0.6.0: The Integration Release","text":"

    Plugin architecture: hooks and skills converted from shell scripts to Go subcommands, shipped as a Claude Code marketplace plugin. Multi-tool hook generation for Cursor, Aider, Copilot, and Windsurf. Webhook notifications with encrypted URL storage.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v030-the-discipline-release","level":3,"title":"v0.3.0: The Discipline Release","text":"

    Journal static site generation via zensical. 49-skill audit and fix pass (positive framing, phantom reference removal, scope tightening). Context consolidation skill. golangci-lint v2 migration.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v020-the-archaeology-release","level":3,"title":"v0.2.0: The Archaeology Release","text":"

    Session journal system: ctx journal import converts Claude Code JSONL transcripts to browsable Markdown. Constants refactor with semantic prefixes (Dir*, File*, Filename*). CRLF handling for Windows compatibility.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v012","level":3,"title":"v0.1.2","text":"

    Default Claude Code permissions deployed on ctx init. Prompting guide published as a standalone documentation page.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v011","level":3,"title":"v0.1.1","text":"

    Bug fixes: hook schema key format corrected, JSON unicode escaping fixed in context file output.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v010-initial-release","level":3,"title":"v0.1.0: Initial Release","text":"

    CLI with 15 subcommands, 6 context file types (CONSTITUTION, TASKS, CONVENTIONS, ARCHITECTURE, DECISIONS, LEARNINGS), Makefile build system, and Claude Code hook integration.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#latest","level":2,"title":"Latest","text":"

    The main documentation always reflects the latest development version.

    For the most recent stable release, see v0.8.0.

    ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#changelog","level":2,"title":"Changelog","text":"

    For detailed changes between versions, see the GitHub Releases page.

    ","path":["Reference","Version History"],"tags":[]},{"location":"security/","level":1,"title":"Security","text":"

    Security model, agent hardening, and vulnerability reporting.

    ","path":["Security"],"tags":[]},{"location":"security/#securing-ai-agents","level":3,"title":"Securing AI Agents","text":"

    Defense in depth for unattended AI agents: five layers of protection, each with a known bypass, strength in combination.

    ","path":["Security"],"tags":[]},{"location":"security/#security-policy","level":3,"title":"Security Policy","text":"

    Trust model, vulnerability reporting, permission hygiene, and security design principles.

    ","path":["Security"],"tags":[]},{"location":"security/agent-security/","level":1,"title":"Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#defense-in-depth-securing-ai-agents","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-problem","level":2,"title":"The Problem","text":"

    An unattended AI agent with unrestricted access to your machine is an unattended shell with unrestricted access to your machine.

    This is not a theoretical concern. AI coding agents execute shell commands, write files, make network requests, and modify project configuration. When running autonomously (overnight, in a loop, without a human watching), the attack surface is the full capability set of the operating system user account.

    The risk is not that the AI is malicious. The risk is that the AI is controllable: it follows instructions from context, and context can be poisoned.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#threat-model","level":2,"title":"Threat Model","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#how-agents-get-compromised","level":3,"title":"How Agents Get Compromised","text":"

    AI agents follow instructions from multiple sources: system prompts, project files, conversation history, and tool outputs. An attacker who can inject content into any of these sources can redirect the agent's behavior.

    Vector How it works Prompt injection via dependencies A malicious package includes instructions in its README, changelog, or error output. The agent reads these during installation or debugging and follows them. Prompt injection via fetched content The agent fetches a URL (documentation, API response, Stack Overflow answer) containing embedded instructions. Poisoned project files A contributor adds adversarial instructions to CLAUDE.md, .cursorrules, or .context/ files. The agent loads these at session start. Self-modification between iterations In an autonomous loop, the agent modifies its own configuration files. The next iteration loads the modified config with no human review. Tool output injection A command's output (error messages, log lines, file contents) contains instructions the agent interprets and follows.","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#what-can-a-compromised-agent-do","level":3,"title":"What Can a Compromised Agent Do","text":"

    Depends entirely on what permissions and access the agent has:

    Access level Potential impact Unrestricted shell Execute any command, install software, modify system files Network access Exfiltrate source code, credentials, or context files to external servers Docker socket Escape container isolation by spawning privileged sibling containers SSH keys Pivot to other machines, push to remote repositories, access production systems Write access to own config Disable its own guardrails for the next iteration","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-defense-layers","level":2,"title":"The Defense Layers","text":"

    No single layer is sufficient. Each layer catches what the others miss.

    Layer 1: Soft instructions     (CONSTITUTION.md, playbook)\nLayer 2: Application controls  (permission allowlist, tool restrictions)\nLayer 3: OS-level isolation    (user accounts, filesystem, containers)\nLayer 4: Network controls      (firewall rules, airgap)\nLayer 5: Infrastructure        (VM isolation, resource limits)\n
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

    Markdown files like CONSTITUTION.md and the Agent Playbook tell the agent what to do and what not to do. These are probabilistic: the agent usually follows them, but there is no enforcement mechanism.

    What it catches: Most common mistakes. An agent that has been told \"never delete production data\" will usually not delete production data.

    What it misses: Prompt injection. A sufficiently crafted injection can override soft instructions. Long context windows dilute attention on rules stated early. Edge cases where instructions are ambiguous.

    Verdict: Necessary but not sufficient. Good for the common case. Do not rely on it for security boundaries.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

    AI tool runtimes (Claude Code, Cursor, etc.) provide permission systems: tool allowlists, command restrictions, confirmation prompts.

    For Claude Code, ctx init writes both an allowlist and an explicit deny list into .claude/settings.local.json. The golden images live in internal/assets/permissions/:

    Allowlist (allow.txt): only these tools run without confirmation:

    Bash(ctx:*)\nSkill(ctx-convention-add)\nSkill(ctx-decision-add)\n... # all bundled ctx-* skills\n

    Deny list (deny.txt): these are blocked even if the agent requests them:

    # Dangerous operations\nBash(sudo *)\nBash(git push *)\nBash(git push)\nBash(rm -rf /*)\nBash(rm -rf ~*)\nBash(curl *)\nBash(wget *)\nBash(chmod 777 *)\n\n# Sensitive file reads\nRead(**/.env)\nRead(**/.env.*)\nRead(**/*credentials*)\nRead(**/*secret*)\nRead(**/*.pem)\nRead(**/*.key)\n\n# Sensitive file edits\nEdit(**/.env)\nEdit(**/.env.*)\n

    What it catches: The agent cannot run commands outside the allowlist, and the deny list blocks dangerous operations even if a future allowlist change were to widen access. If rm, curl, sudo, or docker are not allowed and sudo/curl/wget are explicitly denied, the agent cannot invoke them regardless of what any prompt says.

    What it misses: The agent can modify the allowlist itself. In an autonomous loop, if the agent writes to .claude/settings.local.json, and the next iteration loads the modified config, then the protection is effectively lost. The application enforces the rules, but the application reads the rules from files the agent can write.

    Verdict: Strong first layer. Must be combined with self-modification prevention (Layer 3).

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-3-os-level-isolation-deterministic-and-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Deterministic and Unbypassable)","text":"

    The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

    Control Purpose Dedicated user account No sudo, no privileged group membership (docker, wheel, adm). The agent cannot escalate privileges. Filesystem permissions Project directory writable; everything else read-only or inaccessible. Agent cannot reach other projects, home directories, or system config. Immutable config files CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md owned by a different user or marked immutable (chattr +i on Linux). The agent cannot modify its own guardrails.

    What it catches: Privilege escalation, self-modification, lateral movement to other projects or users.

    What it misses: Actions within the agent's legitimate scope. If the agent has write access to source code (which it needs to do its job), it can introduce vulnerabilities in the code itself.

    Verdict: Essential. This is the layer that makes the other layers trustworthy.

    OS-level isolation does not make the agent safe; it makes the other layers meaningful.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

    An agent that cannot reach the internet cannot exfiltrate data. It also cannot ingest new instructions mid-loop from external documents, API responses, or hostile content.

    Scenario Recommended control Agent does not need the internet --network=none (container) or outbound firewall drop-all Agent needs to fetch dependencies Allow specific registries (npmjs.com, proxy.golang.org, pypi.org) via firewall rules. Block everything else. Agent needs API access Allow specific API endpoints only. Use an HTTP proxy with allowlisting.

    What it catches: Data exfiltration, phone-home payloads, downloading additional tools, and instruction injection via fetched content.

    What it misses: Nothing, if the agent genuinely does not need the network. The tradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

    The strongest boundary is a separate machine (or something that behaves like one).

    The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

    Containers (Docker, Podman):

    docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

    Docker Socket Is Sudo Access

    Critical: never mount the Docker socket (/var/run/docker.sock).

    An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

    Use rootless Docker or Podman to eliminate this escalation path.

    Virtual machines: The strongest isolation. The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

    Resource limits: CPU, memory, and disk quotas prevent a runaway agent from consuming all resources. Use ulimit, cgroup limits, or container resource constraints.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

    A defense-in-depth setup for overnight autonomous runs:

    Layer Implementation Stops Soft instructions CONSTITUTION.md with \"never delete tests\", \"always run tests before committing\" Common mistakes (probabilistic) Application allowlist .claude/settings.local.json with explicit tool permissions Unauthorized commands (deterministic within runtime) Immutable config chattr +i on CLAUDE.md, .claude/, CONSTITUTION.md Self-modification between iterations Unprivileged user Dedicated user, no sudo, no docker group Privilege escalation Container --cap-drop=ALL --network=none, rootless, no socket mount Host escape, network exfiltration Resource limits --memory=4g --cpus=2, disk quotas Resource exhaustion

    Each layer is straightforward: The strength is in the combination.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#common-mistakes","level":2,"title":"Common Mistakes","text":"

    \"I'll just use --dangerously-skip-permissions\": This disables Layer 2 entirely. Without Layers 3-5, you have no protection at all. Only use this flag inside a properly isolated container or VM.

    \"The agent is sandboxed in Docker\": A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

    \"CONSTITUTION.md says not to do that\": Markdown is a suggestion. It works most of the time. It is not a security boundary. Do not use it as one.

    \"I reviewed the CLAUDE.md, it's fine\": The agent can modify CLAUDE.md during iteration N. Iteration N+1 loads the modified version. Unless the file is immutable, your review is stale.

    \"The agent only has access to this one project\": Does the project directory contain .env files, SSH keys, API tokens, or credentials? Does it have a .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-security-considerations","level":2,"title":"Team Security Considerations","text":"

    When multiple developers share a .context/ directory, security considerations extend beyond single-agent hardening.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#code-review-for-context-files","level":3,"title":"Code Review for Context Files","text":"

    Treat .context/ changes like code changes. Context files influence agent behavior (a modified CONSTITUTION.md or CONVENTIONS.md changes what every agent on the team will do next session). Review them in PRs with the same scrutiny you apply to production code.

    Watch for:

    • Weakened constitutional rules (removed constraints, softened language)
    • New decisions that contradict existing ones without acknowledging it
    • Learnings that encode incorrect assumptions
    • Task additions that bypass the team's prioritization process
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#gitignore-patterns","level":3,"title":"Gitignore Patterns","text":"

    ctx init configures .gitignore automatically, but verify these patterns are in place:

    • Always gitignored: .ctx.key (encryption key), .context/logs/, .context/journal/
    • Team decision: scratchpad.enc (encrypted, safe to commit for shared scratchpad state); .gitignore if scratchpads are personal
    • Never committed: .env, credentials, API keys (enforced by drift secret detection)
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#multi-developer-context-sharing","level":3,"title":"Multi-Developer Context Sharing","text":"

    CONSTITUTION.md is the shared contract. All team members and their agents inherit it. Changes require team consensus, not unilateral edits.

    When multiple agents write to the same context files concurrently (e.g., two developers adding learnings simultaneously), git merge conflicts are expected. Resolution is typically additive: accept both additions. Destructive resolution (dropping one side) loses context.

    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-conventions-for-context-management","level":3,"title":"Team Conventions for Context Management","text":"

    Establish and document:

    • Who reviews context changes: Same reviewers as code, or a designated context owner?
    • How to resolve conflicting decisions: If two sessions record contradictory decisions, which wins? Default: the later one must explicitly supersede the earlier one with rationale.
    • Frequency of context maintenance: Weekly ctx drift checks, monthly consolidation passes, archival after each milestone.
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#checklist","level":2,"title":"Checklist","text":"

    Before running an unattended AI agent:

    • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
    • Agent's config files are immutable or owned by a different user
    • Permission allowlist restricts tools to the project's toolchain
    • Container drops all capabilities (--cap-drop=ALL)
    • Docker socket is NOT mounted
    • Network is disabled or restricted to specific domains
    • Resource limits are set (memory, CPU, disk)
    • No SSH keys, API tokens, or credentials are accessible to the agent
    • Project directory does not contain .env or secrets files
    • Iteration cap is set (--max-iterations)
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#further-reading","level":2,"title":"Further Reading","text":"
    • Running an Unattended AI Agent: the ctx recipe for autonomous loops, including step-by-step permissions and isolation setup
    • Security: ctx's own trust model and vulnerability reporting
    • Autonomous Loops: full documentation of the loop pattern, prompt templates, and troubleshooting
    ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/hub/","level":1,"title":"Hub Security Model","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#ctx-hub-security-model","level":1,"title":"ctx Hub: Security Model","text":"

    What the hub defends against, what it does not defend against, and the concrete mechanisms in play.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#threat-model","level":2,"title":"Threat Model","text":"

    The hub is designed for trusted cross-project knowledge sharing within a team or homelab. It assumes:

    • The hub host is trusted. Anyone with root on that box can read every entry ever published.
    • Network is semi-trusted. Hub traffic is gRPC over TCP; TLS is strongly recommended but not mandatory.
    • Client machines are trusted enough to hold a per-project client token. Losing a client token is roughly equivalent to losing an API key: scoped damage, not total compromise.
    • Entry content is not secret. Decisions, learnings, and conventions may be indexed by AI agents, rendered in docs, shared across projects. Do not push credentials or PII into the hub.

    The hub is not a secure messaging system, a secrets store, or a compliance-grade audit log. If your threat model needs those, use a dedicated tool and keep the hub for knowledge sharing.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#mechanisms","level":2,"title":"Mechanisms","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#bearer-tokens","level":3,"title":"Bearer Tokens","text":"

    All RPCs except Register require a bearer token in gRPC metadata. Two kinds of tokens exist:

    Kind Format Scope Lifetime Admin token ctx_adm_... Register new projects Manual rotate Client token ctx_cli_... Publish, Sync, Listen, Status Project lifetime

    Tokens are compared in constant time (crypto/subtle) to prevent timing oracles, and looked up via an O(1) hash map so the comparison cost does not depend on the total number of registered clients.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#client-side-encryption-at-rest","level":3,"title":"Client-Side Encryption at Rest","text":"

    .context/.connect.enc stores the client token and hub address, encrypted with AES-256-GCM using the same scheme the notification subsystem uses. The key is derived from ctx's local keyring (see internal/crypto).

    An attacker with read access to the project directory cannot learn the client token without also breaking ctx's local keyring.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#hub-side-token-storage","level":3,"title":"Hub-Side Token Storage","text":"

    Tokens Are Stored in Plaintext on the Hub Host

    <data-dir>/clients.json currently stores client tokens verbatim, not hashed. Anyone with read access to the hub's data directory sees every registered client's token and can impersonate any project that has ever registered.

    Mitigations today:

    • Run the hub as an unprivileged user and lock the data directory with chmod 700 <data-dir>.
    • Use the systemd unit in Operations, which enables ProtectSystem=strict, NoNewPrivileges=true, and a dedicated user.
    • Never expose <data-dir> over NFS, SMB, or shared filesystems.
    • Treat <data-dir> the same way you'd treat /etc/shadow: back it up encrypted, never check it into version control.

    Hashing clients.json and moving to keyring-backed storage is tracked as a follow-up in the PR #60 task group. Until that lands, assume a hub host compromise equals total hub compromise.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#input-validation","level":3,"title":"Input Validation","text":"

    Every published entry is validated before it touches the log:

    • Type must be one of: decision, learning, convention, task. Unknown types are rejected.
    • ID and Origin are required and non-empty.
    • Content size is capped at 1 MB. Reasonable for text, hostile for attempts to fill the disk.
    • Duplicate project registration is rejected; a client that replays an old Register call gets an error, not a second token.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#no-script-execution","level":3,"title":"No Script Execution","text":"

    The hub never interprets entry content. There is no expression language, no template evaluation, no markdown rendering at ingest. Content is stored as bytes and fanned out to clients verbatim.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#audit-trail","level":3,"title":"Audit Trail","text":"

    entries.jsonl is append-only. Every accepted publish is recorded with the publishing project's origin tag and sequence number. Nothing is ever deleted by the hub; retention is managed manually by the operator (see log rotation).

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#what-the-hub-does-not-defend-against","level":2,"title":"What the Hub Does Not Defend Against","text":"
    • Untrusted entry senders. A client with a valid token can publish anything (within the 1 MB cap). There is no content validation beyond shape.
    • Denial of service from a registered client. A misbehaving client can publish until disk is full. Monitor entries.jsonl growth.
    • Network eavesdropping without TLS. Plain gRPC leaks entry content and tokens. Use a TLS-terminating reverse proxy (see Multi-machine recipe).
    • Host compromise. Root on the hub host = access to every entry and every token. Harden the host.
    • Accidental secret upload. The hub will happily fan out a decision containing an API key. Sanitize content before publishing.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#operational-hardening-checklist","level":2,"title":"Operational Hardening Checklist","text":"
    • Run the hub as an unprivileged user with NoNewPrivileges=true and ProtectSystem=strict (see the systemd unit in Operations).
    • Terminate TLS in front of the hub for anything beyond a trusted LAN.
    • Restrict the listen port with firewall rules to the client subnet only.
    • Back up <data-dir>/admin.token to a secrets manager; do not leave it in shell history.
    • Rotate the admin token when a team member with access leaves. Client tokens keep working across rotations.
    • Monitor entries.jsonl growth; alert on sudden spikes.
    • Run NTP on all clients to prevent entry-timestamp skew.
    • Do not publish from machines you do not trust.
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#responsible-disclosure","level":2,"title":"Responsible Disclosure","text":"

    Security issues in the hub follow the same process as the rest of ctx; see Reporting.

    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#see-also","level":2,"title":"See Also","text":"
    • ctx Hub Operations
    • ctx Hub failure modes
    • HA cluster recipe
    ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/reporting/","level":1,"title":"Security Policy","text":"","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#reporting-vulnerabilities","level":2,"title":"Reporting Vulnerabilities","text":"

    At ctx we take security very seriously.

    If you discover a security vulnerability in ctx, please report it responsibly.

    Do NOT open a public issue for security vulnerabilities.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#email","level":3,"title":"Email","text":"

    Send details to security@ctx.ist.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#github-private-reporting","level":3,"title":"GitHub Private Reporting","text":"
    1. Go to the Security tab;
    2. Click \"Report a Vulnerability\";
    3. Provide a detailed description.
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#encrypted-reports-optional","level":3,"title":"Encrypted Reports (Optional)","text":"

    If your report contains sensitive details (proof-of-concept exploits, credentials, or internal system information), you can encrypt your message with our PGP key:

    • In-repo: SECURITY_KEY.asc
    • Keybase: keybase.io/alekhinejose
    # Import the key\ngpg --import SECURITY_KEY.asc\n\n# Encrypt your report\ngpg --armor --encrypt --recipient security@ctx.ist report.txt\n

    Encryption is optional. Unencrypted reports to security@ctx.ist or via GitHub Private Reporting are perfectly fine.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#what-to-include","level":3,"title":"What to Include","text":"
    • Description of the vulnerability,
    • Steps to reproduce,
    • Potential impact,
    • Suggested fix (if any).
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#attribution","level":2,"title":"Attribution","text":"

    We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities (unless they prefer to remain anonymous).

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#response-timeline","level":3,"title":"Response Timeline","text":"

    Open Source, Best-Effort Timelines

    ctx is a volunteer-maintained open source project.

    The timelines below are guidelines, not guarantees, and depend on contributor availability.

    We will address security reports on a best-effort basis and prioritize them by severity.

    Stage Timeframe Acknowledgment Within 48 hours Initial assessment Within 7 days Resolution target Within 30 days (depending on severity)","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#trust-model","level":2,"title":"Trust Model","text":"

    ctx operates within a single trust boundary: the local filesystem.

    The person who authors .context/ files is the same person who runs the agent that reads them. There is no remote input, no shared state, and no server component.

    This means:

    • ctx does not sanitize context files for prompt injection. This is a deliberate design choice, not an oversight. The files are authored by the developer who owns the machine: Sanitizing their own instructions back to them would be counterproductive.
    • If you place adversarial instructions in your own .context/ files, your agent will follow them. This is expected behavior. You control the context; the agent trusts it.

    Shared Repositories

    In shared repositories, .context/ files should be reviewed in code review (the same way you would review CI/CD config or Makefiles). A malicious contributor could add harmful instructions to CONSTITUTION.md or TASKS.md.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#security-design","level":2,"title":"Security Design","text":"

    ctx is designed with security in mind:

    • No secrets in context: The constitution explicitly forbids storing secrets, tokens, API keys, or credentials in .context/ files
    • Local only: ctx runs entirely locally with no external network calls
    • No code execution: ctx reads and writes Markdown files only; it does not execute arbitrary code
    • Git-tracked: Core context files are meant to be committed, so they should never contain sensitive data. Exception: sessions/ and journal/ contain raw conversation data and should be gitignored
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#permission-hygiene","level":2,"title":"Permission Hygiene","text":"

    Claude Code evaluates permissions in deny → ask → allow order. ctx init automatically populates permissions.deny with rules that block dangerous operations before the allow list is ever consulted.

    Default deny rules block:

    • sudo, git push, rm -rf /, rm -rf ~, curl, wget, chmod 777
    • Read/Edit of .env, credentials, secrets, .pem, .key files

    Even with deny rules in place, the allow list accumulates one-off permissions over time. Periodically review for:

    • Destructive commands: git reset --hard, git clean -f, etc.
    • Config injection vectors: permissions that allow modifying files controlling agent behavior (CLAUDE.md, settings.local.json)
    • Broad wildcards: overly permissive patterns that pre-approve more than intended
    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#state-file-management","level":2,"title":"State File Management","text":"

    Hook state files (throttle markers, prompt counters, pause markers) are stored in .context/state/, which is project-scoped and gitignored. State files are automatically managed by the hooks that create them; no manual cleanup is needed.

    ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#best-practices","level":2,"title":"Best Practices","text":"
    1. Review before committing: Always review .context/ files before committing
    2. Use .gitignore: If you must store sensitive notes locally, add them to .gitignore
    3. Drift detection: Run ctx drift to check for potential issues
    4. Permission audit: Review .claude/settings.local.json after busy sessions
    ","path":["Security","Security Policy"],"tags":[]},{"location":"thesis/","level":1,"title":"Context as State","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#a-persistence-layer-for-human-ai-cognition","level":2,"title":"A Persistence Layer for Human-AI Cognition","text":"

    Jose Alekhinne - jose@ctx.ist

    February 2026

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#abstract","level":3,"title":"Abstract","text":"

    As AI tools evolve from code-completion utilities into reasoning collaborators, the knowledge that governs their behavior becomes as important as the code they produce; yet, that knowledge is routinely discarded at the end of every session.

    AI-assisted development systems assemble context at prompt time using heuristic retrieval from mutable sources: recent files, semantic search results, session history. These approaches optimize relevance at the moment of generation but do not persist the cognitive state that produced decisions. Reasoning is not reproducible, intent is lost across sessions, and teams cannot audit the knowledge that constrains automated behavior.

    This paper argues that context should be treated as deterministic, version-controlled state rather than as a transient query result. We ground this argument in three sources of evidence: a landscape analysis of 17 systems spanning AI coding assistants, agent frameworks, and knowledge stores; a taxonomy of five primitive categories that reveals irrecoverable architectural trade-offs; and an experience report from ctx, a persistence layer for AI-assisted development, which developed itself using its own persistence model across 389 sessions over 33 days. We define a three-tier model for cognitive state: authoritative knowledge, delivery views, and ephemeral state. Then we present six design invariants empirically validated by 56 independent rejection decisions observed across the analyzed landscape. We show that context determinism applies to assembly, not to model output, and that the curation cost this model requires is offset by compounding returns in reproducibility, auditability, and team cognition.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#1-introduction","level":2,"title":"1. Introduction","text":"

    The introduction of large language models into software development has shifted the primary interface from code execution to interactive reasoning. In this environment, the correctness of an output depends not only on source code but on the context supplied to the model: the conventions, decisions, architectural constraints, and domain knowledge that bound the space of acceptable responses.

    Current systems treat context as a query result assembled at the moment of interaction. A developer begins a session; the tool retrieves what it estimates to be relevant from chat history, recent files, and vector stores; the model generates output conditioned on this transient assembly; the session ends, and the context evaporates. The next session begins the cycle again.

    This model has improved substantially over the past year. CLAUDE.md files, Cursor rules, Copilot's memory system, and tools such as Mem0, Letta, and Kindex each address aspects of the persistence problem. Yet across 17 systems we analyzed spanning AI coding assistants, agent frameworks, autonomous coding agents, and purpose-built knowledge stores, no system provides all five of the following properties simultaneously: deterministic context assembly, human-readable file-based persistence, token-budgeted delivery, a single-binary core with zero required runtime dependencies for the persistence path, and local-first operation.

    This paper does not propose a universal replacement for retrieval-centric workflows. It defines a persistence layer (embodied in ctx (https://ctx.ist)) whose advantages emerge under specific operational conditions: when reproducibility is a requirement, when knowledge must outlive sessions and individuals, when teams require shared cognitive authority, or when offline operation is necessary.

    The trade-offs (manual curation cost, reduced automatic recall, coarser granularity) are intentional and mirror the trade-offs accepted by systems that favor reproducibility over convenience, such as reproducible builds and immutable infrastructure 1 6.

    The contribution is threefold: a three-tier model for cognitive state that resolves the ambiguity between authoritative knowledge and ephemeral session artifacts; six design invariants empirically grounded in a cross-system landscape analysis; and an experience report demonstrating that the model produces compounding returns when applied to its own development.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#2-the-limits-of-prompt-time-context","level":2,"title":"2. The Limits of Prompt-Time Context","text":"

    Prompt-time assembly pipelines typically consist of corpus selection, retrieval, ranking, and truncation. These pipelines are probabilistic and time-dependent, producing three failure modes that compound over the lifetime of a project.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#21-non-reproducibility","level":3,"title":"2.1 Non-Reproducibility","text":"

    If context is derived from mutable sources using heuristic ranking, identical requests at different times receive different inputs. A developer who asks \"What is our authentication strategy?\" on Tuesday may receive a different context window than the same question on Thursday: Not because the strategy changed, but because the retrieval heuristic surfaced different fragments.

    Reproducibility (the ability to reconstruct the exact inputs that produced a given output) is a foundational property of reliable systems. Its loss in AI-assisted development mirrors the historical evolution from ad-hoc builds to deterministic build systems 1 2. The build community learned that when outputs depend on implicit state (environment variables, system clocks, network-fetched dependencies), debugging becomes archaeology. The same principle applies when AI outputs depend on non-deterministic context retrieval.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#22-opaque-knowledge","level":3,"title":"2.2 Opaque Knowledge","text":"

    Embedding-based memory increases recall but reduces inspectability. When a vector store determines that a code snippet is \"similar\" to the current query, the ranking function is opaque: the developer cannot inspect why that snippet was chosen, whether a more relevant artifact was excluded, or whether the ranking will remain stable. This prevents deterministic debugging, policy auditing, and causal attribution (properties that information retrieval theory identifies as fundamental trade-offs of probabilistic ranking) 3.

    In practice, this opacity manifests as a compliance ceiling. In our experience developing a context management system (detailed in Section 7), soft instructions (directives that ask an AI agent to read specific files or follow specific procedures) achieve approximately 75-85% compliance. The remaining 15-25% represents cases where the agent exercises judgment about whether the instruction applies, effectively applying a second ranking function on top of the explicit directive. When 100% compliance is required, instruction is insufficient; the content must be injected directly, removing the agent's option to skip it.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#23-loss-of-intent","level":3,"title":"2.3 Loss of Intent","text":"

    Session transcripts record interaction but not cognition. A transcript captures what was said but not which assumptions were accepted, which alternatives were rejected, or which constraints governed the decision. The distinction matters: a decision to use PostgreSQL recorded as a one-line note (\"Use PostgreSQL\") teaches a model what was decided; a structured record with context, rationale, and consequences teaches it why (and why is what prevents the model from unknowingly reversing the decision in a future session) 4.

    Session transcripts provide history. Cognitive state requires something more: the persistent, structured representation of the knowledge required for correct decision-making.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#3-cognitive-state-a-three-tier-model","level":2,"title":"3. Cognitive State: A Three-Tier Model","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#31-definitions","level":3,"title":"3.1 Definitions","text":"

    We define cognitive state as the authoritative, persistent representation of the knowledge required for correct decision-making within a project. It is human-authored or human-ratified, versioned, inspectable, and reproducible. It is distinct from logs, transcripts, retrieval results, and model-generated summaries.

    Previous formulations of this idea have treated cognitive state as a monolithic concept. In practice, a three-tier model better captures the operational reality:

    Tier 1: Authoritative State: The canonical knowledge that the system treats as ground truth. In a concrete implementation, this corresponds to a set of human-curated files with defined schemas: a constitution (inviolable rules), conventions (code patterns), an architecture document (system structure), decision records (choices with rationale), learnings (captured experience), a task list (current work), a glossary (domain terminology), and an agent playbook (operating instructions). Each file has a single purpose, a defined lifecycle, and a distinct update frequency. Authoritative state is version-controlled alongside code and reviewed through the same mechanisms (diffs, pull requests, blame annotations).

    Tier 2: Delivery Views: Derived representations of authoritative state, assembled for consumption by a model. A delivery view is produced by a deterministic assembly function that takes the authoritative state, a token budget, and an inclusion policy as inputs and produces a context window as output. The same authoritative state, budget, and policy must always produce the same delivery view. Delivery views are ephemeral (they exist only for the duration of a session), but their construction is reproducible.

    Tier 3: Ephemeral State: Session transcripts, scratchpad notes, draft journal entries, and other artifacts that exist during or immediately after a session but are not authoritative. Ephemeral state is the raw material from which authoritative state may be extracted through human review, but it is never consumed directly by the assembly function.

    This three-tier model resolves confusion present in earlier formulations: the claim that AI output is a deterministic function of the repository state. The corrected claim is that context selection is deterministic (the delivery view is a function of authoritative state), but model output remains stochastic, conditioned on the deterministic context. Formally:

    delivery_view = assemble(authoritative_state, budget, policy)\noutput = model(delivery_view)   # stochastic\n

    The persistence layer's contribution is making assemble reproducible, not making model deterministic.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#32-separation-of-concerns","level":3,"title":"3.2 Separation of Concerns","text":"

    The decision to separate authoritative state into distinct files with distinct purposes is not cosmetic. Different types of knowledge have different lifecycles:

    Knowledge Type Update Frequency Read Frequency Load Priority Example Constitution Rarely Every session Always \"Never commit secrets to git\" Tasks Every session Session start Always \"Implement token budget CLI flag\" Conventions Weekly Before coding High \"All errors use structured logging with severity levels\" Decisions When decided When questioning Medium \"Use PostgreSQL over MySQL (see ADR-003)\" Learnings When learned When stuck Medium \"Hook scripts >50ms degrade interactive UX\" Architecture When changed When designing On demand \"Three-layer pipeline: ingest → enrich → assemble\" Journal Every session Rarely Never auto \"Session 247: Removed dead-end session copy layer\"

    A monolithic context file would force the assembly function to load everything or nothing. Separation enables progressive disclosure: the minimum context that matters for the current moment, with the option to load more when needed. A normal session loads the constitution, tasks, and conventions; a deep investigation loads decision history and journal entries from specific dates.

    The budget mechanism is the constraint that makes separation valuable. Without a budget, the default behavior is to load everything, which destroys the attention density that makes loaded context useful. With a budget, the assembly function must prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings (scored by recency). Entries that do not fit receive title-only summaries rather than being silently dropped (an application of the \"tell me what you don't know\" pattern identified independently by four systems in our landscape analysis).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#4-design-invariants","level":2,"title":"4. Design Invariants","text":"

    The following six invariants define the constraints that a cognitive state persistence layer must satisfy. They are not axioms chosen a priori; they are empirically grounded properties whose violation was independently identified as producing complexity costs across the 17 systems we analyzed.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-1-markdown-on-filesystem-persistence","level":3,"title":"Invariant 1: Markdown-on-Filesystem Persistence","text":"

    Context files must be human-readable, git-diffable, and editable with any text editor. No database. No binary storage.

    Validation: 11 independent rejection decisions across the analyzed landscape protected this property. Systems that adopted embedded records, binary serialization, or knowledge graphs as their core primitive consistently traded away the ability for a developer to run cat DECISIONS.md and understand the system's knowledge. The inspection cost of opaque storage compounds over the lifetime of a project: every debugging session, every audit, every onboarding conversation requires specialized tooling to access knowledge that could have been a text file.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-2-zero-runtime-dependencies","level":3,"title":"Invariant 2: Zero Runtime Dependencies","text":"

    The tool must work with no installed runtimes, no running services, and no API keys for core functionality.

    Validation: 13 independent rejection decisions protected this property (the most frequently defended invariant). Systems that required databases (PostgreSQL, SQLite, Redis), embedding models, server daemons, container runtimes, or cloud APIs for core operation introduced failure modes proportional to their dependency count. A persistence layer that depends on infrastructure is not a persistence layer; it is a service. Services have uptime requirements, version compatibility matrices, and operational costs that simple file operations do not.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-3-deterministic-context-assembly","level":3,"title":"Invariant 3: Deterministic Context Assembly","text":"

    The same files plus the same budget must produce the same output. No embedding-based retrieval, no LLM-driven selection, no wall-clock-dependent scoring in the assembly path.

    Validation: 6 independent rejection decisions protected this property. Non-deterministic assembly (whether from embedding variance, LLM-based selection, or time-dependent scoring) destroys the ability to reproduce a context window and therefore to diagnose why a model produced a given output. Determinism in the assembly path is what makes the persistence layer auditable.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-4-human-authority-over-persistent-state","level":3,"title":"Invariant 4: Human Authority over Persistent State","text":"

    The agent may propose changes to context files but must not unilaterally modify them. All persistent changes go through human-reviewable git commits.

    Validation: 6 independent rejection decisions protected this property. Systems that allowed agents to self-modify their memory (writing freeform notes, auto-pruning old entries, generating summaries as ground truth) consistently produced lower-quality persistent context than systems that enforced human review. Structure is a feature, not a limitation: across the landscape, the pattern \"structured beats freeform\" was independently discovered by four systems that evolved from freeform LLM summaries to typed schemas with required fields.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-5-local-first-air-gap-capable","level":3,"title":"Invariant 5: Local-First, Air-Gap Capable","text":"

    Core functionality must work offline with no network access. Cloud services may be used for optional features but never for core context management.

    Validation: 7 independent rejection decisions protected this property. Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios. A filesystem-native model continues to function under all conditions where the repository is accessible.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-6-no-default-telemetry","level":3,"title":"Invariant 6: No Default Telemetry","text":"

    Any analytics, if ever added, must be strictly opt-in.

    Validation: 4 independent rejection decisions protected this property. Default telemetry erodes the trust model that a persistence layer depends on. If developers must trust the system with their architectural decisions, operational learnings, and project constraints, the system cannot simultaneously be reporting usage data to external services.

    These six invariants collectively define a design space. Each feature proposal can be evaluated against them: a feature that violates any invariant is rejected regardless of how many other systems implement it. The discipline of constraint (refusing to add capabilities that compromise foundational properties) is itself an architectural contribution. Across the 17 analyzed systems, 56 patterns were explicitly rejected for violating these invariants. The rejection count per invariant (11, 13, 6, 6, 7, 4) provides a rough measure of each property's vulnerability to architectural erosion. A representative sample of these rejections is provided in Appendix A.1

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#5-landscape-analysis","level":2,"title":"5. Landscape Analysis","text":"

    The 17 systems were selected to cover the architectural design space rather than to achieve completeness. Each included system satisfies three criteria: it represents a distinct architectural primitive for AI-assisted development, it is actively maintained or widely referenced, and it provides sufficient public documentation or source code for architectural inspection. The goal was to ensure that every major category of primitive (document, embedded record, state snapshot, event/message, construction/derivation) was represented by multiple systems, enabling cross-system pattern detection.

    The resulting set spans six categories: AI coding assistants (Continue, Sourcegraph/Cody, Aider, Claude Code), AI agent frameworks (CrewAI, AutoGen, LangGraph, LlamaIndex, Letta/MemGPT), autonomous coding agents (OpenHands, Sweep), session provenance tools (Entire), data versioning systems (Dolt, Pachyderm), pipeline/build systems (Dagger), and purpose-built knowledge stores (QubicDB, Kindex). Each system was analyzed from its source code and documentation, producing 34 individual analysis artifacts (an architectural profile and a set of insights per system) that yielded 87 adopt/adapt recommendations, 56 explicit rejection decisions, and 52 watch items.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#51-primitive-taxonomy","level":3,"title":"5.1 Primitive Taxonomy","text":"

    Every system in the AI-assisted development landscape operates on a core primitive: an atomic unit around which the entire architecture revolves. Our analysis of 17 systems reveals five categories of primitives, each making irrecoverable trade-offs:

    Group A: Document/File Primitives: Human-readable documents as the primary unit. Documents are authored by humans, version-controlled in git, and consumed by AI tools. The invariant of this group is that the primitive is always human-readable and version-controllable with standard tools. Three systems participate in this pattern: the system described in this paper as a pure expression, and Continue (via its rules directory) and Claude Code (via CLAUDE.md files) as partial participants: both use document-based context as an input but organize around different core primitives.

    Group B: Embedded Record Primitives: Vector-embedded records stored with numerical embeddings for similarity search, metadata for filtering, and scoring mechanisms for ranking. Five systems use this approach (LlamaIndex, CrewAI, Letta/MemGPT, QubicDB, Kindex). The invariant is that the primitive requires an embedding model or vector database for core operations: a dependency that precludes offline and air-gapped use.

    Group C: State Snapshot Primitives: Point-in-time captures of the complete system state. The invariant is that any past state can be reconstructed at any historical point. Three systems use this approach (LangGraph, Entire, Dolt).

    Group D: Event/Message Primitives: Sequential events or messages forming an append-only log with causal relationships. Four systems use this approach (OpenHands, AutoGen, Claude Code, Sweep). The invariant is temporal ordering and append-only semantics.

    Group E: Construction/Derivation Primitives: Derived or constructed values that encode how they were produced. The invariant is that the primitive is a function of its inputs; re-executing the same inputs produces the same primitive. Three systems use this approach (Dagger, Pachyderm, Aider).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#52-comparison-matrix","level":3,"title":"5.2 Comparison Matrix","text":"

    The five primitive categories differ along seven dimensions:

    Property Document Embedded Record State Snapshot Event/Message Construction Human-readable Yes No Varies Partially No Version-controllable Yes No Varies Yes Yes Queryable by meaning No Yes No No No Rewindable Via git No Yes Yes (replay) Yes Deterministic Yes No Yes Yes Yes Zero-dependency Yes No Varies Varies Varies Offline-capable Yes No Varies Varies Yes

    The document primitive is the only one that simultaneously satisfies human-readability, version-controllability, determinism, zero dependencies, and offline capability. This is not because documents are superior in general (embedded records provide semantic queryability that documents lack) but because the combination of all five properties is what the persistence layer requires. The choice between primitive categories is not a matter of capability but of which properties are considered invariant.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#53-convergent-patterns","level":3,"title":"5.3 Convergent Patterns","text":"

    Across the 17 analyzed systems, six design patterns were independently discovered. These convergent patterns carry extra validation weight because they emerged from different problem spaces:

    Pattern 1: \"Tell me what you don't know\": When context is incomplete, explicitly communicate to the model what information is missing and what confidence level the provided context represents. Four systems independently converged on this pattern: inserting skip markers, tracking evidence gaps, annotating provenance, or naming output quality tiers.

    Pattern 2: \"Freshness matters\": Information relevance decreases over time. Three systems independently chose exponential decay with different half-lives (30 days, 90 days, and LRU ordering). Static priority ordering with no time dimension leaves relevant recent knowledge at the same priority as stale entries. This pattern is in productive tension with the persistence model's emphasis on determinism: the claim is not that time-dependence is irrelevant, but that it belongs in the curation step (a human deciding to consolidate or archive stale entries) rather than in the assembly function (an algorithm silently down-ranking entries based on age).

    Pattern 3: \"Content-address everything\": Compute a hash of content at creation time for deduplication, cache invalidation, integrity verification, and change detection. Five systems independently implement content hashing, each discovering it solves different problems 5.

    Pattern 4: \"Structured beats freeform\": When capturing knowledge or session state, a structured schema with required fields produces more useful data than freeform text. Four systems evolved from freeform summaries to typed schemas: one moving from LLM-generated prose to a structured condenser with explicit fields for completed tasks, pending tasks, and files modified.

    Pattern 5: \"Protocol convergence\": The Model Context Protocol (MCP) is emerging as a standard tool integration layer. Nine of 17 systems support it, spanning every category in the analysis. MCP's significance for the persistence model is that it provides a transport mechanism for context delivery without dictating how context is stored or assembled. This makes the approach compatible with both retrieval-centric and persistence-centric architectures.

    Pattern 6: \"Human-in-the-loop for memory\": Critical memory decisions should involve human judgment. Fully automated memory management produces lower-quality persistent context than human-reviewed systems. Four systems independently converged on variants of this pattern: ceremony-based consolidation, interrupt/resume for human input, confirmation mode for high-risk actions, and separated \"think fast\" vs. \"think slow\" processing paths.

    Pattern 6 directly validates the ceremony model described in this paper. The persistence layer requires human curation not because automation is impossible, but because the quality of persistent knowledge degrades when the curation step is removed. The improvement opportunity is to make curation easier, not to automate it away.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#6-worked-example-architectural-decision-under-two-models","level":2,"title":"6. Worked Example: Architectural Decision under Two Models","text":"

    We now instantiate the three-tier model in a concrete system (ctx) and illustrate the difference between prompt-time retrieval and cognitive state persistence using a real scenario from its development.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#61-the-problem","level":3,"title":"6.1 The Problem","text":"

    During development, the system accumulated three overlapping storage layers for session data: raw transcripts (owned by the AI tool), session copies (JSONL copies plus context snapshots), and enriched journal entries (Markdown summaries). The middle layer (session copies) was a dead-end write sink. An auto-save hook copied transcripts to a directory that nothing read from, because the journal pipeline already read directly from the raw transcripts. Approximately 15 source files, a shell hook, 20 configuration constants, and 30 documentation references supported infrastructure with no consumers.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#62-prompt-time-retrieval-model","level":3,"title":"6.2 Prompt-Time Retrieval Model","text":"

    In a retrieval-based system, the decision to remove the middle layer depends on whether the retrieval function surfaces the relevant context:

    The developer asks: \"Should we simplify the session storage?\" The retrieval system must find and rank the original discussion thread where the three layers were designed, the usage statistics showing zero reads from the middle layer, the journal pipeline documentation showing it reads from raw transcripts directly, and the dependency analysis showing 15 files, a hook, and 30 doc references. If any of these fragments are not retrieved (because they are in old chat history, because the embedding similarity score is low, or because the token budget was consumed by more recent but less relevant context), the model may recommend preserving the middle layer, or may not realize it exists.

    Six months later, a new team member asks the same question. The retrieval results will differ: the original discussion has aged out of recency scoring, the usage statistics are no longer in recent history, and the model may re-derive the answer or arrive at a different conclusion.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#63-cognitive-state-model","level":3,"title":"6.3 Cognitive State Model","text":"

    In the persistence model, the decision is recorded as a structured artifact at write time:

    ## [2026-02-11] Remove .context/sessions/ storage layer\n\n**Status**: Accepted\n\n**Context**: The session/recall/journal system had three overlapping\nstorage layers. The recall pipeline reads directly from raw transcripts,\nmaking .context/sessions/ a dead-end write sink that nothing reads from.\n\n**Decision**: Remove .context/sessions/ entirely. Two stores remain:\nraw transcripts (global, tool-owned) and enriched journal\n(project-local).\n\n**Rationale**: Dead-end write sinks waste code surface, maintenance\neffort, and user attention. The recall pipeline already proved that\nreading directly from raw transcripts is sufficient. Context snapshots\nare redundant with git history.\n\n**Consequence**: Deleted internal/cli/session/ (15 files), removed\nauto-save hook, removed --auto-save from watch, removed pre-compact\nauto-save, removed /ctx-save skill, updated ~45 documentation files.\nFour earlier decisions superseded.\n

    This artifact is:

    • Deterministically included in every subsequent session's delivery view (budget permitting, with title-only fallback if budget is exceeded)
    • Human-readable and reviewable as a diff in the commit that introduced it
    • Permanent: it persists in version control regardless of retrieval heuristics
    • Causally linked: it explicitly supersedes four earlier decisions, creating an auditable chain

    When the new team member asks \"Why don't we store session copies?\" six months later, the answer is the same artifact, at the same revision, with the same rationale. The reasoning is reconstructible because it was persisted at write time, not discovered at query time.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#64-the-diff-when-policy-changes","level":3,"title":"6.4 The Diff When Policy Changes","text":"

    If a future requirement re-introduces session storage (for example, to support multi-agent session correlation), the change appears as a diff to the decision record:

    - **Status**: Accepted\n+ **Status**: Superseded by [2026-08-15] Reintroduce session storage\n+ for multi-agent correlation\n

    The new decision record references the old one, creating a chain of reasoning visible in git log. In the retrieval model, the old decision would simply be ranked lower over time and eventually forgotten.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#7-experience-report-a-system-that-designed-itself","level":2,"title":"7. Experience Report: A System That Designed Itself","text":"

    The persistence model described in this paper was developed and tested by using it on its own development. Over 33 days and 389 sessions, the system's context files accumulated a detailed record of decisions made, reversed, and consolidated: providing quantitative and qualitative evidence for the model's properties.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#71-scale-and-structure","level":3,"title":"7.1 Scale and Structure","text":"

    The development produced the following authoritative state artifacts:

    • 8 consolidated decision records covering 24 original decisions spanning context injection architecture, hook design, task management, security, agent autonomy, and webhook systems
    • 18 consolidated learning records covering 75 original observations spanning agent compliance, hook behavior, testing patterns, documentation drift, and tool integration
    • A constitution with 13 inviolable rules across 4 categories (security, quality, process, context preservation)
    • 389 enriched journal entries providing a complete session-level audit trail

    The consolidation ratio (24 decisions compressed to 8 records, 75 learnings compressed to 18) illustrates the curation cost and its return: authoritative state becomes denser and more useful over time as related entries are merged, contradictions are resolved, and superseded decisions are marked.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#72-architectural-reversals","level":3,"title":"7.2 Architectural Reversals","text":"

    Three architectural reversals during development provide evidence that the persistence model captures and communicates reasoning effectively:

    Reversal 1: The two-tier persistence model: The original design included a middle storage tier for session copies. After 21 days of development, the middle tier was identified as a dead-end write sink (described in Section 6). The decision record captured the full context, and the removal was executed cleanly: 15 source files, a shell hook, and 45 documentation references. The pattern of a \"dead-end write sink\" was subsequently observed in 7 of 17 systems in our landscape analysis that store raw transcripts alongside structured context.

    Reversal 2: The prompt-coach hook: An early design included a hook that analyzed user prompts and offered improvement suggestions. After deployment, the hook produced zero useful tips, its output channel was invisible to users, and it accumulated orphan temporary files. The hook was removed, and the decision record captured the failure mode for future reference.

    Reversal 3: The soft-instruction compliance model: The original context injection strategy relied on soft instructions: directives asking the AI agent to read specific files. After measuring compliance across multiple sessions, we found a consistent 75-85% compliance ceiling. The revised strategy injects content directly, bypassing the agent's judgment about whether to comply. The learning record captures the ceiling measurement and the rationale for the architectural change.

    Each reversal was captured as a structured decision record with context, rationale, and consequences. In a retrieval-based system, these reversals would exist only in chat history, discoverable only if the retrieval function happens to surface them. In the persistence model, they are permanent, indexable artifacts that inform future decisions.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#73-compliance-ceiling","level":3,"title":"7.3 Compliance Ceiling","text":"

    The 75-85% compliance ceiling for soft instructions is the most operationally significant finding from the experience report. It means that any context management strategy relying on agent compliance with instructions (\"read this file,\" \"follow this convention,\" \"check this list\") has a hard ceiling on reliability.

    The root cause is structural: the instruction \"don't apply judgment\" is itself evaluated by judgment. When an agent receives a directive to read a file, it first assesses whether the directive is relevant to the current task (and that assessment is the judgment the directive was trying to prevent).

    The architectural response maps directly to the formal model defined in Section 3.1. Content requiring 100% compliance is included in authoritative_state and injected by the deterministic assemble function, bypassing the agent entirely. Content where 80% compliance is acceptable is delivered as instructions within the delivery view. The three-tier architecture makes this distinction explicit: authoritative state is injected; delivery views are assembled deterministically; ephemeral state is available but not pushed.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#74-compounding-returns","level":3,"title":"7.4 Compounding Returns","text":"

    Over 33 days, we observed a qualitative shift in the development experience. Early sessions (days 1-7) spent significant time re-establishing context: explaining conventions, re-stating constraints, re-deriving past decisions. Later sessions (days 25-33) began with the agent loading curated context and immediately operating within established constraints, because the constraints were in files rather than in chat history.

    This compounding effect (where each session's context curation improves all subsequent sessions) is the primary return on the curation investment. The cost is borne once (writing a decision record, capturing a learning, updating the task list); the benefit is collected on every subsequent session load.

    The effect is analogous to compound interest in financial systems: the knowledge base grows not linearly with effort but with increasing marginal returns as new knowledge interacts with existing context. A learning captured on day 5 prevents a mistake on day 12, which avoids a debugging session that would have consumed a day 12 session, freeing that session for productive work that generates new learnings. The growth is not literally exponential (it is bounded by project scope and subject to diminishing returns as the knowledge base matures), but within the observed 33-day window, the returns were consistently accelerating.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#75-scope-and-generalizability","level":3,"title":"7.5 Scope and Generalizability","text":"

    This experience report is self-referential by design: the system was developed using its own persistence model. This circularity strengthens the internal validity of the findings (the model was stress-tested under authentic conditions) but limits external generalizability. The two-week crossover point was observed on a single project of moderate complexity with a small team already familiar with the model's assumptions. Whether the same crossover holds for larger teams, for codebases with different characteristics, or for teams adopting the model without having designed it remains an open empirical question. The quantitative claims in this section should be read as existence proofs (demonstrating that the model can produce compounding returns) rather than as predictions about specific adoption scenarios.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#8-situating-the-persistence-layer","level":2,"title":"8. Situating the Persistence Layer","text":"

    The persistence layer occupies a specific position in the stack of AI-assisted development:

    Application Logic\nAI Interaction / Agents\nContext Retrieval Systems\nCognitive State Persistence Layer\nVersion Control / Storage\n

    Current systems innovate primarily in the retrieval layer (improving how context is discovered, ranked, and delivered at query time). The persistence layer sits beneath retrieval and above version control. Its role is to maintain the authoritative state that retrieval systems may query but do not own. The relationship is complementary: retrieval answers \"What in the corpus might be relevant?\"; cognitive state answers \"What must be true for this system to operate correctly?\" A mature system uses both: retrieval for discovery, persistence for authority.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#9-applicability-and-trade-offs","level":2,"title":"9. Applicability and Trade-Offs","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#91-when-to-use-this-model","level":3,"title":"9.1 When to Use This Model","text":"

    A cognitive state persistence layer is most appropriate when:

    Reproducibility is a requirement: If a system must be able to answer \"Why did this output occur, and can it be produced again?\" then deterministic, version-controlled context becomes necessary. This is relevant in regulated environments, safety-critical systems, long-lived infrastructure, and security-sensitive deployments.

    Knowledge must outlive sessions and individuals: Projects with multi-year lifetimes accumulate architectural decisions, domain interpretations, and operational policy. If this knowledge is stored only in chat history, issue trackers, and institutional memory, it decays. The persistence model converts implicit knowledge into branchable, reviewable artifacts.

    Teams require shared cognitive authority: In collaborative environments, correctness depends on a stable answer to \"What does the system believe to be true?\" When this answer is derived from retrieval heuristics, authority shifts to ranking algorithms. When it is versioned and human-readable, authority remains with the team.

    Offline or air-gapped operation is required: Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#92-when-not-to-use-this-model","level":3,"title":"9.2 When Not to Use This Model","text":"

    Zero-configuration personal workflows: For short-lived or exploratory tasks, the cost of explicit knowledge curation outweighs its benefits. Heuristic retrieval is sufficient when correctness is non-critical, outputs are disposable, and historical reconstruction is unnecessary.

    Maximum automatic recall from large corpora: Vector retrieval systems provide superior performance when the primary task is searching vast, weakly structured information spaces. The persistence model assumes that what matters can be decided and that this decision is valuable to record.

    Fully autonomous agent architectures: Agent runtimes that generate and discard state continuously, optimizing for local goal completion, do not benefit from a model that centers human ratification of knowledge.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#93-incremental-adoption","level":3,"title":"9.3 Incremental Adoption","text":"

    The transition does not require full system replacement. An incremental path:

    Step 1: Record decisions as versioned artifacts: Instead of allowing conclusions to remain in discussion threads, persist them in reviewable form with context, rationale, and consequences 4. This alone converts ephemeral reasoning into the cognitive state.

    Step 2: Make inclusion deterministic: Define explicit assembly rules. Retrieval may still exist, but it is no longer authoritative.

    Step 3: Move policy into cognitive state: When system behavior depends on stable constraints, encode those constraints as versioned knowledge. Behavior becomes reproducible.

    Step 4: Optimize assembly, not retrieval: Once the authoritative layer exists, performance improvements come from budgeting, caching, and structural refinement rather than from improving ranking heuristics.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#94-the-curation-cost","level":3,"title":"9.4 The Curation Cost","text":"

    The primary objection to this model is the cost of explicit knowledge curation. This cost is real. Writing a structured decision record takes longer than letting a chatbot auto-summarize a conversation. Maintaining a glossary requires discipline. Consolidating 75 learnings into 18 records requires judgment.

    The response is not that the cost is negligible but that it is amortized. A decision record written once is loaded hundreds of times. A learning captured today prevents repeated mistakes across all future sessions. The curation cost is paid once; the benefit compounds.

    The experience report provides rough order-of-magnitude numbers. Across 389 sessions over 33 days, curation activities (writing decision records, capturing learnings, updating the task list, consolidating entries) averaged approximately 3-5 minutes per session. In early sessions (days 1-7), before curated context existed, re-establishing context consumed approximately 10-15 minutes per session: re-explaining conventions, re-stating architectural constraints, re-deriving decisions that had been made but not persisted. By the final week (days 25-33), the re-explanation overhead had dropped to near zero: the agent loaded curated context and began productive work immediately.

    At ~12 sessions per day, the curation cost was roughly 35-60 minutes daily. The re-explanation cost in the first week was roughly 120-180 minutes daily. By the third week, that cost had fallen to under 15 minutes daily while the curation cost remained stable. The crossover (where cumulative curation cost was exceeded by cumulative time saved) occurred around day 10. These figures are approximate and derived from a single project with a small team already familiar with the model; the crossover point will vary with project complexity, team size, and curation discipline.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#10-future-work","level":2,"title":"10. Future Work","text":"

    Several directions are compatible with the model described here:

    Section-level deterministic budgeting: Current assembly operates at file granularity. Section-level budgeting would allow finer-grained control (including specific decision records while excluding others within the same file) without sacrificing determinism.

    Causal links between decisions: The experience report shows that decisions frequently reference earlier decisions (superseding, extending, or qualifying them). Formal causal links would enable traversal of the decision graph and automatic detection of orphaned or contradictory constraints.

    Content-addressed context caches: Five systems in our landscape analysis independently discovered that content hashing provides cache invalidation, integrity verification, and change detection. Applying content addressing to the assembly output would enable efficient cache reuse when the authoritative state has not changed.

    Conditional context inclusion: Five systems independently suggest that context entries could carry activation conditions (file patterns, task keywords, or explicit triggers) that control whether they are included in a given assembly. This would reduce the per-session budget cost of large knowledge bases without sacrificing determinism.

    Provenance metadata: Linking context entries to the sessions, decisions, or learnings that motivated them would strengthen the audit trail. Optional provenance fields on Markdown entries (session identifier, cause reference, motivation) would be lightweight and compatible with the existing file-based model.

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#11-conclusion","level":2,"title":"11. Conclusion","text":"

    AI-assisted development has treated context as a \"query result\" assembled at the moment of interaction, discarded at the session end. This paper identifies a complementary layer: the persistence of authoritative cognitive state as deterministic, version-controlled artifacts.

    The contribution is grounded in three sources of evidence. A landscape analysis of 17 systems reveals five categories of primitives and shows that no existing system provides the combination of human-readability, determinism, zero dependencies, and offline capability that the persistence layer requires. Six design invariants, validated by 56 independent rejection decisions, define the constraints of the design space. An experience report over 389 sessions and 33 days demonstrates compounding returns: later sessions start faster, decisions are not re-derived, and architectural reversals are captured with full context.

    The core claim is this: persistent cognitive state enables causal reasoning across time. A system built on this model can explain not only what is true, but why it became true and when it changed.

    When context is the state:

    • Reasoning is reproducible: the same authoritative state, budget, and policy produce the same delivery view.
    • Knowledge is auditable: decisions are traceable to explicit artifacts with context, rationale, and consequences.
    • Understanding compounds: each session's curation improves all subsequent sessions.

    The choice between retrieval-centric workflows and a persistence layer is not a matter of capability but of time horizon. Retrieval optimizes for relevance at the moment of interaction. Persistence optimizes for the durability of understanding across the lifetime of a project.

    🐸🖤 \"Gooood... let the deterministic context flow through the repository...\" - Kermit the Sidious, probably

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#appendix-a-representative-rejection-decisions","level":2,"title":"Appendix A: Representative Rejection Decisions","text":"

    The 56 rejection decisions referenced in Section 4 were cataloged across all 17 system analyses, grouped by the invariant they would violate. This appendix provides a representative sample (two per invariant) to illustrate the methodology.

    Invariant 1: Markdown-on-Filesystem (11 rejections): CrewAI's vector embedding storage was rejected because embeddings are not human-readable, not git-diff-friendly, and require external services. Kindex's knowledge graph as core primitive was rejected because it requires specialized commands to inspect content that could be a text file (kin show <id> vs. cat DECISIONS.md).

    Invariant 2: Zero Runtime Dependencies (13 rejections): Letta/MemGPT's PostgreSQL-backed architecture was rejected because it conflicts with local-first, no-database, single-binary operation. Pachyderm's Kubernetes-based distributed architecture was rejected as the antithesis of a single-binary design for a tool that manages text files.

    Invariant 3: Deterministic Assembly (6 rejections): LlamaIndex's embedding-based retrieval as the primary selection mechanism was rejected because it destroys determinism, requires an embedding model, and removes human judgment from the selection process. QubicDB's wall-clock-dependent scoring was rejected because it directly conflicts with the \"same inputs produce same output\" property.

    Invariant 4: Human Authority (6 rejections): Letta/MemGPT's agent self-modification of memory was rejected as fundamentally opposed to human-curated persistence. Claude Code's unstructured auto-memory (where the agent writes freeform notes) was rejected because structured files with defined schemas produce higher-quality persistent context than unconstrained agent output.

    Invariant 5: Local-First / Air-Gap Capable (7 rejections): Sweep's cloud-dependent architecture was rejected as fundamentally incompatible with the local-first, offline-capable model. LangGraph's managed cloud deployment was rejected because cloud dependencies for core functionality violate air-gap capability.

    Invariant 6: No Default Telemetry (4 rejections): Continue's telemetry-by-default (PostHog) was rejected because it contradicts the local-first, privacy-respecting trust model. CrewAI's global telemetry on import (Scarf tracking pixel) was rejected because it violates user trust and breaks air-gap capability.

    The remaining 9 rejections did not map to a specific invariant but were rejected on other architectural grounds: for example, Aider's full-file-content-in-context approach (which defeats token budgeting), AutoGen's multi-agent orchestration as core primitive (scope creep), and Claude Code's 30-day transcript retention limit (institutional knowledge should have no automatic expiration).

    ","path":["The Thesis"],"tags":[]},{"location":"thesis/#references","level":2,"title":"References","text":"
    1. Reproducible Builds Project, \"Reproducible Builds: Increasing the Integrity of Software Supply Chains\", 2017. https://reproducible-builds.org/docs/definition/ ↩↩↩

    2. S. McIntosh et al., \"The Impact of Build System Evolution on Software Quality\", ICSE, 2015. https://doi.org/10.1109/ICSE.2015.70 ↩

    3. C. Manning, P. Raghavan, H. Schütze, Introduction to Information Retrieval, Cambridge University Press, 2008. https://nlp.stanford.edu/IR-book/ ↩

    4. M. Nygard, \"Documenting Architecture Decisions\", Cognitect Blog, 2011. https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions ↩↩

    5. L. Torvalds et al., Git Internals - Git Objects (content-addressed storage concepts). https://git-scm.com/book/en/v2/Git-Internals-Git-Objects ↩

    6. Kief Morris, Infrastructure as Code, O'Reilly, 2016. ↩

    7. J. Kreps, \"The Log: What every software engineer should know about real-time data's unifying abstraction\", 2013. https://engineering.linkedin.com/distributed-systems/log ↩

    8. P. Hunt et al., \"ZooKeeper: Wait-free coordination for Internet-scale systems\", USENIX ATC, 2010. https://www.usenix.org/legacy/event/atc10/tech/full_papers/Hunt.pdf ↩

    ","path":["The Thesis"],"tags":[]}]} \ No newline at end of file diff --git a/site/security/hub/index.html b/site/security/hub/index.html index a0d75a641..238e585af 100644 --- a/site/security/hub/index.html +++ b/site/security/hub/index.html @@ -1505,7 +1505,7 @@

    Threat Modelstrongly recommended but not mandatory.
  • Client machines are trusted enough to hold a per-project client token. Losing a client token is roughly equivalent to losing an - API key — scoped damage, not total compromise.
  • + API key: scoped damage, not total compromise.
  • Entry content is not secret. Decisions, learnings, and conventions may be indexed by AI agents, rendered in docs, shared across projects. Do not push credentials or PII into @@ -1572,7 +1572,7 @@

    Hub-Side Token StorageNever expose <data-dir> over NFS, SMB, or shared filesystems.

  • Treat <data-dir> the same way you'd treat - /etc/shadow — back it up encrypted, never check it + /etc/shadow: back it up encrypted, never check it into version control.
  • Hashing clients.json and moving to keyring-backed storage @@ -1588,7 +1588,7 @@

    Input ValidationOperational Hardening Checklist

    Responsible Disclosure

    Security issues in the hub follow the same process as the rest -of ctx — see Reporting.

    +of ctx; see Reporting.

    See Also

    • ctx Hub Operations
    • diff --git a/specs/ai-typography-cleanup.md b/specs/ai-typography-cleanup.md new file mode 100644 index 000000000..fcebcbf0e --- /dev/null +++ b/specs/ai-typography-cleanup.md @@ -0,0 +1,36 @@ +# AI Typography Cleanup + +## Problem + +LLM-generated text uses typographic punctuation from its training +corpus: em-dashes, en-dashes, smart quotes, and space-padded +double hyphens. These leak into docs and Go doc comments during +AI-assisted writing sessions. The detection script +(hack/detect-ai-typography.sh) was silently broken on macOS +because BSD grep does not support the -P flag. + +## Approach + +1. Fix detect-ai-typography.sh to work on both GNU grep and BSD + grep (macOS) via runtime detection +2. Replace all AI typography in docs/ with contextually appropriate + ASCII punctuation (semantic editing, not blind sed) +3. Replace all AI typography in internal/ Go and YAML files +4. Remove hack/agents/ (content migrated to docs/operations/runbooks/) + +## Scope + +- docs/: ~293 em-dashes across 44 files, plus en-dashes and + double-hyphens +- internal/: ~1668 matches across 328 files (mostly doc.go from + PR #69 enrichment) +- hack/detect-ai-typography.sh: BSD grep compatibility fix +- hack/agents/: deletion (migrated) + +## Decision + +Spec also covers the ctx backup deprecation planning (spec and +decision record only, no code removal yet): +- specs/deprecate-ctx-backup.md created +- Decision recorded in DECISIONS.md +- TASKS.md updated with deprecation task diff --git a/specs/deprecate-ctx-backup.md b/specs/deprecate-ctx-backup.md new file mode 100644 index 000000000..238fba048 --- /dev/null +++ b/specs/deprecate-ctx-backup.md @@ -0,0 +1,189 @@ +# Deprecate and Remove `ctx backup` + +## Problem + +`ctx backup` is an environment-specific feature masquerading as a +core command. It assumes SMB/GVFS mounts, fires nag hooks for +users who never configured it, and solves a problem that belongs +to the OS/infrastructure layer, not the application layer. + +Since its introduction, `ctx hub` has landed as the real answer to +"how do I make my context survive across machines." Meanwhile, +`ctx backup` has accumulated tech debt (the Broadcom mirror repo +broke `sync-to-asgard`, GVFS is Linux-only, the check_backup_age +hook fires even in projects that never use backup) without +delivering proportional value. + +No user other than the project maintainer has ever configured it. + +## Decision + +Remove `ctx backup` entirely. Replace with a documentation runbook +that covers backup strategies for users who need file-level +backups beyond what hub provides. + +## Reasoning + +### 1. Hub already solves distributed persistence + +`ctx hub` replicates decisions, learnings, conventions, and tasks +across machines. For the data that matters most (knowledge files), +hub is the persistence mechanism. Backup of `.context/` is +redundant if you run a hub. + +### 2. Backup is inherently environment-specific + +SMB, NFS, S3, rsync, Time Machine, Borg, restic: every user has a +different backup story. `ctx backup` picked SMB via GVFS, which is +a narrow choice that does not generalize. Adding +`CTX_BACKUP_MOUNT_PATH` (the original task) would just patch one +assumption with another. You would chase mount strategies forever. + +### 3. Wrong layer + +Backup is an OS/infrastructure concern. `ctx` manages context +files; the user's backup tool backs up files. Asking `ctx` to also +be a backup tool is scope creep that adds maintenance burden for a +feature that `rsync`, `cp`, or a cron job handles better. + +### 4. Active maintenance cost + +- The Broadcom mirror repo issue forced disabling `sync-to-asgard` + (reminder [12] in the project) +- GVFS dependency is Linux-only, breaking macOS/Windows users +- `check_backup_age` hook fires for everyone, even users who never + configured backup, creating noise +- SMB mount code (`internal/exec/gio/`) is dead weight on macOS + +## Scope + +### What gets removed + +**CLI command** (3 files): +- `internal/cli/backup/cmd.go` +- `internal/cli/backup/run.go` +- `internal/cli/backup/doc.go` + +**Hook** (3 files): +- `internal/cli/system/cmd/check_backup_age/cmd.go` +- `internal/cli/system/cmd/check_backup_age/run.go` +- `internal/cli/system/cmd/check_backup_age/doc.go` +- Hook message template: `internal/assets/hooks/messages/check-backup-age/` +- Hook registration in `internal/cli/system/system.go` + +**Core archive/SMB** (5 files): +- `internal/cli/system/core/archive/backup.go` +- `internal/cli/system/core/archive/smb.go` +- `internal/cli/system/core/archive/types.go` +- `internal/cli/system/core/archive/archive.go` +- `internal/cli/system/core/archive/doc.go` + +**GIO/mount integration**: +- `internal/exec/gio/mount.go` (entire package if only used by backup) + +**Config constants** (4 files): +- `internal/config/archive/backup.go` +- `internal/config/archive/var.go` +- `internal/config/archive/archive.go` +- `internal/config/archive/doc.go` + +**Env vars** (in `internal/config/env/env.go`): +- `CTX_BACKUP_SMB_URL` +- `CTX_BACKUP_SMB_SUBDIR` + +**Embed keys** (scattered across `internal/config/embed/`): +- `cmd/backup.go` (UseBackup, DescKeyBackup) +- `cmd/system.go` (UseSystemCheckBackupAge, DescKeySystemCheckBackupAge) +- `flag/backup.go` (DescKeyBackupJson, DescKeyBackupScope) +- `text/backup.go` (all DescKeyBackup* and DescKeyWriteBackup* keys) +- `text/err_backup.go` (all DescKeyErrBackup* keys) + +**Error package**: +- `internal/err/backup/` (entire package) + +**Write/output package**: +- `internal/write/backup/` (entire package) + +**Entity types** (in `internal/entity/system.go`): +- `ArchiveEntry` struct +- `BackupResult` struct + +**Bootstrap registration** (in `internal/bootstrap/group.go`): +- Remove `backup.Cmd` from `GroupRuntime` +- Remove import + +**YAML text files** (remove backup-related entries): +- `internal/assets/commands/commands.yaml` +- `internal/assets/commands/text/write.yaml` +- Various error/text YAML files + +**Skill**: +- `.claude/skills/_ctx-backup/` (entire directory) + +**Documentation**: +- `docs/cli/backup.md` (remove entirely) +- `zensical.toml` (remove backup from nav) +- `docs/home/contributing.md` (remove backup references) +- `docs/home/common-workflows.md` (remove backup workflow) +- `docs/cli/index.md` (remove backup from command index) +- `docs/cli/system.md` (remove check-backup-age reference) +- `docs/recipes/hook-sequence-diagrams.md` (remove backup sequence) +- `docs/recipes/customizing-hook-messages.md` (remove backup example) +- `docs/recipes/hook-output-patterns.md` (remove backup example) +- Various hub docs (remove backup tier references, update to + recommend external backup tools) + +### What gets created + +**Runbook**: `docs/operations/runbooks/backup-strategy.md` + +Contents: +- What to back up: `.context/`, `.claude/`, `~/.ctx/` +- How hub reduces backup needs (knowledge files are replicated) +- What hub does NOT back up (journal, scratchpad, session logs) +- Example strategies: + - cron + rsync to NAS/external drive + - cron + cp to cloud-synced directory + - Time Machine / system backup (macOS) + - Borg/restic for versioned backups +- When you still need file-level backup even with hub + +### What stays + +- `internal/cli/initialize/core/backup/file.go`: this creates + `.bak` copies during `ctx init --force`. It is NOT part of + `ctx backup`; it is init's config backup mechanism. Keep it. +- Hub backup/restore documentation in `docs/operations/hub.md`: + this is about backing up the hub data directory, not `ctx backup`. + +## Implementation Order + +1. Create the backup-strategy runbook first (so users have a + migration path before the feature disappears) +2. Remove the `check_backup_age` hook and its registration (this + is the user-visible annoyance; removing it first gives + immediate relief) +3. Remove the CLI command, core packages, config constants, error + package, write package, entity types, bootstrap registration +4. Remove the skill +5. Update all documentation (remove pages, update cross-refs) +6. Remove YAML text entries +7. Run `make audit` to catch any dangling references +8. Update TASKS.md: mark the SMB mount task as skipped, add + completion note + +## Migration + +Users who relied on `ctx backup` (currently: only the project +maintainer) should: + +1. Replace with `rsync` or `cp` in a cron job +2. Use `ctx hub` for cross-machine knowledge persistence +3. Follow the backup-strategy runbook for file-level needs + +## Risks + +- **Low**: no external users depend on this feature +- **Medium**: the project maintainer's own backup workflow breaks. + Mitigated by writing the runbook first and setting up a + replacement cron job before removing the command. From 423016c3e569590297b50b4f2b3e4a13ac0e9e1a Mon Sep 17 00:00:00 2001 From: Jose Alekhinne Date: Sat, 25 Apr 2026 01:44:56 -0700 Subject: [PATCH 12/13] feat: enforce explicit CTX_DIR with single-source anchor resolution Replaces directory walk-up with explicit declaration via CTX_DIR (canonical-basename and absolute-path enforced). The anchor now arrives through ctx activate / ctx deactivate shell hooks plus per-tool channels; non-exempt commands refuse to run without it. ContextChild and the implicit walk-up paths are removed; over twenty command entry points migrated through rc.RequireContextDir. Highlights: - New cli/activate, cli/deactivate, system/check-anchor-drift commands plus the shell-emit primitives in config/shell and write/activate. - rc.ContextDir and state.Dir tightened to (string, error) with ErrDirNotDeclared. Callers now gate on err, eliminating the filepath.Join("", rel) trap that wrote state files into CWD. - testutil/testctx package introduced. Declare wires CTX_DIR and HOME for hermetic tests, eliminating cross-package races on ~/.claude/settings.json under parallel go test ./... - Test isolation: fourteen test files now declare CTX_DIR and HOME so a developer shell with CTX_DIR exported no longer pollutes test runs. cli_test.go's withCtxEnv helper replaced with a single t.Setenv at the top of TestBinaryIntegration. - Block-dangerous-command subtree retired in favor of the block-dangerous-commands plural canonical naming. - examples/demo/ removed (superseded by docs runbooks). - .context/ updated end-to-end: agent playbook, conventions, decisions, learnings, glossary, architecture, plus four new steering files. Spec: specs/single-source-context-anchor.md Signed-off-by: Jose Alekhinne --- .claude/hooks/block-hack-scripts.sh | 47 - .claude/skills/_ctx-audit/SKILL.md | 2 - .claude/skills/_ctx-backup/SKILL.md | 68 - .context/AGENT_PLAYBOOK.md | 92 +- .context/AGENT_PLAYBOOK_GATE.md | 16 +- .context/ARCHITECTURE.md | 401 +-- .context/CONSTITUTION.md | 47 +- .context/CONVENTIONS.md | 210 +- .context/DECISIONS.md | 1752 ------------ .context/GLOSSARY.md | 44 - .context/HANDOVER-2026-04-22.md | 213 ++ .context/LEARNINGS.md | 1649 +---------- .context/TASKS.md | 2116 +-------------- .context/steering/product.md | 50 + .context/steering/structure.md | 49 + .context/steering/tech.md | 50 + .context/steering/workflow.md | 50 + .ctxrc.dev | 1 - .github/copilot-instructions.md | 6 +- CLAUDE.md | 32 +- CONTRIBUTING-SKILLS.md | 12 +- GITNEXUS.md | 24 +- README.md | 26 +- SECURITY.md | 2 +- ...02-15-ctx-v0.3.0-the-discipline-release.md | 4 +- docs/cli/backup.md | 52 - docs/cli/bootstrap.md | 6 +- docs/cli/index.md | 71 +- docs/cli/init-status.md | 71 + docs/cli/mcp.md | 7 +- docs/cli/system.md | 3 +- docs/home/common-workflows.md | 2 +- docs/home/configuration.md | 95 +- docs/home/contributing.md | 18 +- .../runbooks/architecture-exploration.md | 71 +- docs/operations/runbooks/backup-strategy.md | 125 + docs/recipes/activating-context.md | 216 ++ docs/recipes/customizing-hook-messages.md | 2 - docs/recipes/external-context.md | 267 +- docs/recipes/hook-output-patterns.md | 1 - docs/recipes/hook-sequence-diagrams.md | 36 - docs/recipes/index.md | 16 +- docs/recipes/multi-tool-setup.md | 34 +- docs/recipes/scrutinizing-a-plan.md | 99 + docs/recipes/session-lifecycle.md | 17 +- docs/recipes/troubleshooting.md | 55 +- docs/reference/session-journal.md | 4 +- docs/security/design.md | 183 ++ docs/security/index.md | 15 +- docs/security/reporting.md | 112 +- editors/vscode/CHANGELOG.md | 20 +- editors/vscode/README.md | 18 +- examples/demo/.context/AGENT_PLAYBOOK.md | 179 -- examples/demo/.context/ARCHITECTURE.md | 100 - examples/demo/.context/CONSTITUTION.md | 88 - examples/demo/.context/CONVENTIONS.md | 83 - examples/demo/.context/DECISIONS.md | 87 - examples/demo/.context/GLOSSARY.md | 42 - examples/demo/.context/LEARNINGS.md | 57 - examples/demo/.context/TASKS.md | 26 - ...5-143000-database-timeout-investigation.md | 64 - examples/demo/PROMPT.md | 94 - examples/demo/README.md | 126 - examples/demo/specs/oauth2.md | 94 - hack/detect-ai-typography.sh | 28 +- internal/assets/claude/CLAUDE.md | 32 +- internal/assets/claude/hooks/hooks.json | 43 +- .../SKILL.md | 24 +- .../claude/skills/ctx-architecture/SKILL.md | 4 +- .../assets/claude/skills/ctx-commit/SKILL.md | 2 +- .../assets/claude/skills/ctx-plan/SKILL.md | 55 + .../claude/skills/ctx-remember/SKILL.md | 2 +- internal/assets/commands/commands.yaml | 91 +- internal/assets/commands/examples.yaml | 20 +- internal/assets/commands/flags.yaml | 10 +- internal/assets/commands/text/doctor.yaml | 4 + internal/assets/commands/text/errors.yaml | 53 +- internal/assets/commands/text/hooks.yaml | 43 +- internal/assets/commands/text/write.yaml | 34 +- internal/assets/context/AGENT_PLAYBOOK.md | 41 +- .../assets/context/AGENT_PLAYBOOK_GATE.md | 16 +- internal/assets/embed_test.go | 4 +- .../block-dangerous-command/cp-to-bin.txt | 1 - .../install-to-local-bin.txt | 1 - .../block-dangerous-command/mid-git-push.txt | 1 - .../block-dangerous-command/mid-sudo.txt | 1 - .../block-dangerous-commands/cp-to-bin.txt | 1 - .../install-to-local-bin.txt | 1 - .../block-dangerous-commands/mid-git-push.txt | 1 - .../block-dangerous-commands/mid-sudo.txt | 1 - .../messages/check-backup-age/warning.txt | 1 - internal/assets/hooks/messages/registry.yaml | 26 - .../assets/hooks/messages/registry_test.go | 4 +- .../integrations/copilot-cli/INSTRUCTIONS.md | 16 +- .../copilot-cli/scripts/session-start.ps1 | 2 +- .../copilot-cli/scripts/session-start.sh | 2 +- .../skills/_ctx-alignment-audit/SKILL.md | 4 +- .../skills/ctx-architecture/SKILL.md | 4 +- .../copilot-cli/skills/ctx-commit/SKILL.md | 2 +- .../copilot-cli/skills/ctx-remember/SKILL.md | 2 +- .../skills/ctx-sanitize-permissions/SKILL.md | 4 +- .../copilot/copilot-instructions.md | 8 +- internal/assets/read/desc/desc.go | 2 +- internal/assets/schema/ctxrc.schema.json | 8 - internal/assets/tpl/README.md | 4 +- internal/audit/README.md | 2 +- internal/audit/cmd_fprint_test.go | 171 ++ internal/bootstrap/bootstrap_test.go | 134 +- internal/bootstrap/cmd.go | 82 +- internal/bootstrap/doc.go | 3 +- internal/bootstrap/group.go | 10 +- internal/cli/activate/activate.go | 22 + internal/cli/activate/activate_test.go | 275 ++ internal/cli/activate/cmd/root/cmd.go | 77 + internal/cli/activate/cmd/root/doc.go | 33 + internal/cli/activate/cmd/root/run.go | 76 + internal/cli/activate/core/emit/doc.go | 28 + internal/cli/activate/core/emit/emit.go | 90 + internal/cli/activate/core/emit/posix.go | 57 + internal/cli/activate/core/emit/types.go | 12 + internal/cli/activate/core/resolve/doc.go | 26 + .../cli/activate/core/resolve/internal.go | 49 + internal/cli/activate/core/resolve/resolve.go | 32 + internal/cli/activate/doc.go | 36 + internal/cli/activate/testmain_test.go | 22 + internal/cli/add/add_test.go | 9 + internal/cli/add/cmd/coverage_test.go | 17 +- internal/cli/add/cmd/root/run.go | 14 +- internal/cli/agent/agent_test.go | 5 + internal/cli/agent/cmd/root/cmd.go | 14 +- internal/cli/agent/cmd/root/run.go | 18 +- internal/cli/agent/core/cooldown/cooldown.go | 76 +- internal/cli/agent/core/hub/load.go | 29 +- internal/cli/agent/core/steering/steering.go | 8 +- internal/cli/backup/cmd.go | 44 - internal/cli/backup/doc.go | 37 - internal/cli/backup/run.go | 99 - internal/cli/change/cmd/root/run.go | 5 + internal/cli/change/core/cmd_test.go | 15 +- internal/cli/change/core/detect/detect.go | 49 +- internal/cli/change/core/detect/parse.go | 4 +- internal/cli/change/core/scan/scan.go | 5 +- internal/cli/cli_test.go | 7 + internal/cli/compact/cmd/root/run.go | 4 + internal/cli/compact/compact_test.go | 5 + internal/cli/config/cmd/status/run_test.go | 9 +- internal/cli/config/cmd/switchcmd/run_test.go | 5 +- .../cli/config/core/profile/profile_test.go | 5 +- internal/cli/connection/core/config/config.go | 14 +- internal/cli/connection/core/config/path.go | 9 +- .../cli/connection/core/register/register.go | 7 +- internal/cli/connection/core/render/render.go | 6 +- .../cli/connection/core/render/render_test.go | 6 +- internal/cli/connection/core/sync/state.go | 12 +- internal/cli/deactivate/cmd/root/cmd.go | 58 + internal/cli/deactivate/cmd/root/doc.go | 19 + internal/cli/deactivate/cmd/root/run.go | 34 + internal/cli/deactivate/deactivate.go | 22 + internal/cli/deactivate/deactivate_test.go | 73 + internal/cli/deactivate/doc.go | 27 + internal/cli/deactivate/testmain_test.go | 21 + internal/cli/decision/cmd/reindex/run.go | 7 +- internal/cli/decision/decision_test.go | 7 +- internal/cli/doctor/cmd/root/run.go | 132 +- internal/cli/doctor/core/check/check.go | 201 +- internal/cli/doctor/core/check/types.go | 16 + internal/cli/doctor/doctor_test.go | 23 +- internal/cli/drift/cmd/root/run.go | 5 + internal/cli/drift/core/fix/fix.go | 10 +- internal/cli/drift/drift_test.go | 14 +- internal/cli/fmt/cmd/root/run.go | 6 +- internal/cli/hub/cmd/peer/cmd.go | 6 +- internal/cli/hub/cmd/peer/cmd_test.go | 24 + internal/cli/hub/cmd/start/cmd.go | 6 + internal/cli/hub/cmd/start/cmd_test.go | 29 + internal/cli/hub/cmd/status/cmd.go | 6 +- internal/cli/hub/cmd/status/cmd_test.go | 24 + .../cli/hub/cmd/status/integration_test.go | 73 + internal/cli/hub/cmd/stepdown/cmd.go | 6 +- internal/cli/hub/cmd/stepdown/cmd_test.go | 24 + internal/cli/hub/cmd/stop/cmd.go | 4 + internal/cli/hub/cmd/stop/cmd_test.go | 24 + internal/cli/initialize/cmd/root/run.go | 43 +- internal/cli/initialize/core/pad/setup.go | 5 +- .../core/project/getting_started.go | 31 +- internal/cli/initialize/init_test.go | 25 + internal/cli/journal/cmd/importer/run.go | 7 +- internal/cli/journal/cmd/obsidian/cmd.go | 18 +- internal/cli/journal/cmd/obsidian/run.go | 14 +- internal/cli/journal/cmd/schema/check/run.go | 5 + internal/cli/journal/cmd/site/cmd.go | 16 +- internal/cli/journal/cmd/site/run.go | 10 +- internal/cli/journal/cmd/sync/run.go | 7 +- internal/cli/journal/core/lock/lock.go | 7 +- internal/cli/journal/core/lock/sync_test.go | 11 + internal/cli/journal/core/lock/unlock_test.go | 11 + internal/cli/journal/core/schema/check.go | 15 +- internal/cli/learning/cmd/reindex/run.go | 7 +- internal/cli/learning/learning_test.go | 7 +- internal/cli/load/cmd/root/run.go | 5 + internal/cli/load/load_test.go | 5 + internal/cli/mcp/cmd/root/cmd.go | 7 +- internal/cli/mcp/cmd/root/cmd_test.go | 35 + internal/cli/memory/cmd/diff/run.go | 10 +- internal/cli/memory/cmd/importer/run.go | 25 +- internal/cli/memory/cmd/publish/run.go | 16 +- internal/cli/memory/cmd/status/run.go | 8 +- internal/cli/memory/cmd/sync/run.go | 13 +- internal/cli/memory/cmd/unpublish/run.go | 25 +- internal/cli/memory/core/resolve/doc.go | 22 + internal/cli/memory/core/resolve/resolve.go | 91 + internal/cli/message/cmd/edit/run.go | 10 +- internal/cli/message/cmd/list/run.go | 11 +- internal/cli/message/cmd/reset/run.go | 10 +- internal/cli/message/cmd/show/run.go | 10 +- internal/cli/notify/cmd/setup/run.go | 5 + internal/cli/notify/cmd/test/run.go | 5 + internal/cli/notify/notify_test.go | 7 +- internal/cli/pad/cmd/add/run.go | 5 + internal/cli/pad/cmd/edit/run.go | 5 + internal/cli/pad/cmd/export/run.go | 5 + internal/cli/pad/cmd/merge/run.go | 10 +- internal/cli/pad/cmd/mv/run.go | 5 + internal/cli/pad/cmd/normalize/run.go | 5 + internal/cli/pad/cmd/resolve/run.go | 12 +- internal/cli/pad/cmd/rm/run.go | 5 + internal/cli/pad/cmd/root/run.go | 5 + internal/cli/pad/cmd/show/run.go | 5 + internal/cli/pad/cmd/tag/run.go | 5 + internal/cli/pad/core/merge/merge.go | 25 +- internal/cli/pad/core/store/store.go | 36 +- internal/cli/pad/core/store/store_raw.go | 10 +- internal/cli/pad/pad_test.go | 84 +- internal/cli/pause/cmd/root/run.go | 4 +- internal/cli/pause/pause_test.go | 11 +- internal/cli/prune/run.go | 5 +- internal/cli/reindex/cmd/root/run.go | 6 +- internal/cli/reindex/reindex_test.go | 9 +- internal/cli/remind/core/store/store.go | 21 +- internal/cli/remind/remind_test.go | 15 +- internal/cli/resume/cmd/root/run.go | 4 +- internal/cli/resume/resume_test.go | 15 +- internal/cli/serve/cmd/root/run.go | 6 +- internal/cli/serve/serve_test.go | 16 +- internal/cli/skill/cmd/install/cmd.go | 7 +- internal/cli/skill/cmd/list/cmd.go | 7 +- internal/cli/skill/cmd/remove/cmd.go | 7 +- internal/cli/status/status_test.go | 5 + internal/cli/steering/cmd/add/cmd.go | 6 +- internal/cli/steering/cmd/initcmd/cmd.go | 23 +- internal/cli/sync/cmd/root/run.go | 10 +- internal/cli/sync/core/action/action.go | 13 +- internal/cli/sync/core/core_test.go | 28 +- internal/cli/sync/core/validate/validate.go | 32 +- internal/cli/sync/sync_test.go | 13 +- internal/cli/system/README.md | 12 +- .../system/cmd/block_dangerous_command/cmd.go | 35 - .../system/cmd/block_dangerous_command/doc.go | 49 - .../system/cmd/block_dangerous_command/run.go | 95 - .../cli/system/cmd/block_non_path_ctx/run.go | 2 +- internal/cli/system/cmd/bootstrap/run.go | 21 +- .../cmd.go | 12 +- .../cli/system/cmd/check_anchor_drift/doc.go | 64 + .../cli/system/cmd/check_anchor_drift/run.go | 73 + .../system/cmd/check_anchor_drift/run_test.go | 153 ++ .../cmd/check_anchor_drift/testmain_test.go | 22 + .../cli/system/cmd/check_backup_age/doc.go | 49 - .../cli/system/cmd/check_backup_age/run.go | 93 - internal/cli/system/cmd/check_ceremony/run.go | 30 +- .../cli/system/cmd/check_context_size/run.go | 69 +- .../cli/system/cmd/check_freshness/run.go | 17 +- internal/cli/system/cmd/check_hub_sync/run.go | 19 +- internal/cli/system/cmd/check_journal/run.go | 20 +- .../cli/system/cmd/check_knowledge/run.go | 19 +- .../cli/system/cmd/check_map_staleness/run.go | 28 +- .../cli/system/cmd/check_memory_drift/run.go | 18 +- .../cli/system/cmd/check_persistence/run.go | 23 +- internal/cli/system/cmd/check_reminder/run.go | 13 +- internal/cli/system/cmd/check_resource/run.go | 13 +- .../system/cmd/check_skill_discovery/run.go | 13 +- .../system/cmd/check_task_completion/run.go | 21 +- internal/cli/system/cmd/check_version/run.go | 25 +- .../cli/system/cmd/context_load_gate/run.go | 36 +- internal/cli/system/cmd/heartbeat/run.go | 42 +- internal/cli/system/cmd/mark_journal/run.go | 9 +- .../cli/system/cmd/mark_wrapped_up/run.go | 16 +- internal/cli/system/cmd/pause/run.go | 25 +- internal/cli/system/cmd/post_commit/run.go | 28 +- internal/cli/system/cmd/qa_reminder/run.go | 17 +- internal/cli/system/cmd/resume/run.go | 24 +- internal/cli/system/cmd/session_event/run.go | 26 +- internal/cli/system/cmd/specs_nudge/run.go | 17 +- internal/cli/system/core/anchor/doc.go | 26 + internal/cli/system/core/anchor/equal.go | 44 + internal/cli/system/core/archive/archive.go | 178 -- internal/cli/system/core/archive/backup.go | 227 -- internal/cli/system/core/archive/doc.go | 52 - internal/cli/system/core/archive/smb.go | 110 - internal/cli/system/core/archive/types.go | 23 - .../cli/system/core/check/full_preamble.go | 100 + .../cli/system/core/check/pause_preamble.go | 62 + internal/cli/system/core/check/wrapup.go | 12 +- .../cli/system/core/drift/version_drift.go | 27 +- .../cli/system/core/health/map_staleness.go | 46 +- internal/cli/system/core/health/prune.go | 15 +- internal/cli/system/core/hubsync/sync.go | 28 +- internal/cli/system/core/journal/mark.go | 10 +- .../cli/system/core/knowledge/knowledge.go | 37 +- internal/cli/system/core/log/log.go | 14 +- internal/cli/system/core/message/message.go | 34 +- .../cli/system/core/message/message_cmd.go | 36 +- .../cli/system/core/nudge/context_size.go | 60 +- internal/cli/system/core/nudge/oversize.go | 53 +- internal/cli/system/core/nudge/pause.go | 42 +- internal/cli/system/core/nudge/relay.go | 88 +- internal/cli/system/core/session/session.go | 27 +- .../cli/system/core/session/session_token.go | 6 +- internal/cli/system/core/state/state.go | 62 +- internal/cli/system/core/version/version.go | 22 +- internal/cli/system/doc.go | 3 +- internal/cli/system/system.go | 14 +- internal/cli/task/cmd/complete/run.go | 6 +- internal/cli/task/cmd/snapshot/run.go | 12 +- internal/cli/task/core/archive/archive.go | 10 +- internal/cli/task/core/complete/complete.go | 6 +- internal/cli/task/core/path/path.go | 18 +- internal/cli/task/task_test.go | 35 +- internal/cli/trace/cmd/collect/run.go | 6 +- internal/cli/trace/cmd/file/run.go | 6 +- internal/cli/trace/cmd/show/run.go | 6 +- internal/cli/trace/cmd/tag/run.go | 7 +- internal/cli/trace/core/collect/collect.go | 5 +- internal/cli/trace/trace_test.go | 3 + internal/cli/usage/run.go | 7 +- internal/cli/watch/cmd/root/run.go | 11 +- internal/cli/watch/core/apply/complete.go | 6 +- .../cli/watch/core/apply/complete_test.go | 3 + internal/cli/watch/core/core_test.go | 58 +- internal/cli/watch/watch_test.go | 23 +- internal/config/README.md | 26 +- internal/config/archive/archive.go | 8 - internal/config/archive/backup.go | 49 - internal/config/archive/doc.go | 57 +- internal/config/archive/var.go | 13 - internal/config/embed/cmd/backup.go | 19 - internal/config/embed/cmd/base.go | 8 + internal/config/embed/cmd/system.go | 22 +- internal/config/embed/flag/activate.go | 14 + internal/config/embed/flag/backup.go | 15 - internal/config/embed/flag/flag.go | 5 - internal/config/embed/text/backup.go | 47 - internal/config/embed/text/block.go | 9 - internal/config/embed/text/check_anchor.go | 25 + internal/config/embed/text/doctor.go | 10 + internal/config/embed/text/err_activate.go | 15 + internal/config/embed/text/err_backup.go | 51 +- internal/config/embed/text/err_fs.go | 24 +- internal/config/embed/text/err_validate.go | 3 - internal/config/embed/text/format.go | 6 - internal/config/embed/text/initialize.go | 12 + internal/config/embed/text/restore.go | 6 + internal/config/env/doc.go | 12 +- internal/config/env/env.go | 19 +- internal/config/flag/doc.go | 6 +- internal/config/flag/flag.go | 14 +- internal/config/git/git.go | 3 - internal/config/hook/hook.go | 7 +- internal/config/hook/variant.go | 8 - internal/config/rc/messages.go | 46 + internal/config/regex/cmd.go | 55 - internal/config/regex/cmd_test.go | 93 - internal/config/shell/doc.go | 17 + internal/config/shell/shell.go | 67 + internal/config/warn/warn.go | 58 + internal/context/load/loader.go | 10 +- internal/context/load/loader_test.go | 5 +- internal/context/resolve/resolve.go | 68 +- internal/context/validate/doc.go | 12 +- internal/context/validate/validate.go | 26 +- internal/drift/check.go | 23 +- internal/drift/check_ext.go | 16 +- internal/drift/check_ext_test.go | 7 +- internal/drift/detector_test.go | 3 + internal/entity/notify.go | 31 + internal/entity/system.go | 26 - internal/entry/write.go | 6 +- internal/err/activate/activate.go | 23 + internal/err/activate/doc.go | 21 + internal/err/backup/backup.go | 147 +- internal/err/backup/doc.go | 41 +- internal/err/context/context.go | 220 +- internal/err/context/doc.go | 6 +- internal/err/fs/fs.go | 14 - internal/exec/gio/doc.go | 32 - internal/exec/gio/mount.go | 27 - internal/flagbind/doc.go | 4 +- internal/flagbind/flag.go | 32 - internal/io/append.go | 41 +- internal/journal/parser/markdown_test.go | 5 +- internal/log/event/event.go | 75 +- internal/log/event/event_test.go | 68 +- internal/log/event/ops.go | 39 +- internal/log/event/path.go | 18 +- internal/mcp/README.md | 26 +- internal/mcp/handler/tool.go | 32 +- internal/mcp/server/server_test.go | 7 + internal/memory/promote_test.go | 4 +- internal/notify/notify.go | 49 +- internal/notify/notify_test.go | 4 +- internal/rc/candidates.go | 50 + internal/rc/doc.go | 83 +- internal/rc/load.go | 89 +- internal/rc/lock.go | 8 +- internal/rc/rc.go | 142 +- internal/rc/rc_test.go | 1066 ++++---- internal/rc/require.go | 94 + internal/rc/require_test.go | 197 ++ internal/rc/testmain_test.go | 22 + internal/rc/types.go | 5 - internal/rc/validate_test.go | 4 +- internal/rc/walk.go | 118 - internal/rc/walk_test.go | 295 -- internal/testutil/testctx/doc.go | 23 + internal/testutil/testctx/testctx.go | 55 + internal/tidy/archive.go | 6 +- internal/validate/path.go | 64 - internal/validate/path_test.go | 112 - internal/write/activate/activate.go | 88 + internal/write/activate/doc.go | 27 + internal/write/backup/backup.go | 43 - internal/write/backup/doc.go | 35 - internal/write/backup/skip.go | 27 - internal/write/initialize/info.go | 23 + site/404.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../blog/2026-02-14-irc-as-context/index.html | 24 +- .../index.html | 24 +- .../index.html | 28 +- .../index.html | 24 +- site/blog/2026-02-15-why-zensical/index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- site/blog/2026-02-17-the-3-1-ratio/index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../2026-02-28-the-last-question/index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- .../index.html | 24 +- site/blog/index.html | 24 +- site/cli/bootstrap/index.html | 30 +- site/cli/change/index.html | 26 +- site/cli/completion/index.html | 26 +- site/cli/config/index.html | 62 +- site/cli/connect/index.html | 24 +- site/cli/connection/index.html | 26 +- site/cli/context/index.html | 26 +- site/cli/doctor/index.html | 26 +- site/cli/event/index.html | 24 +- site/cli/guide/index.html | 26 +- site/cli/hook/index.html | 56 +- site/cli/hub/index.html | 26 +- site/cli/index.html | 181 +- site/cli/init-status/index.html | 205 +- site/cli/journal/index.html | 26 +- site/cli/loop/index.html | 26 +- site/cli/mcp/index.html | 33 +- site/cli/memory/index.html | 26 +- site/cli/message/index.html | 24 +- site/cli/notify/index.html | 26 +- site/cli/pad/index.html | 26 +- site/cli/pause/index.html | 26 +- site/cli/prune/index.html | 62 +- site/cli/remind/index.html | 26 +- site/cli/resume/index.html | 26 +- site/cli/serve/index.html | 26 +- site/cli/setup/index.html | 26 +- site/cli/site/index.html | 26 +- site/cli/skill/index.html | 26 +- site/cli/steering/index.html | 26 +- site/cli/sysinfo/index.html | 26 +- site/cli/system/index.html | 59 +- site/cli/trace/index.html | 26 +- site/cli/trigger/index.html | 26 +- site/cli/usage/index.html | 26 +- site/cli/watch/index.html | 26 +- site/cli/why/index.html | 26 +- site/home/about/index.html | 104 +- site/home/common-workflows/index.html | 181 +- site/home/community/index.html | 419 ++- site/home/configuration/index.html | 364 ++- site/home/context-files/index.html | 173 +- site/home/contributing/index.html | 577 ++-- site/home/faq/index.html | 110 +- site/home/first-session/index.html | 173 +- site/home/getting-started/index.html | 179 +- site/home/hub/index.html | 173 +- site/home/index.html | 226 +- site/home/is-ctx-right/index.html | 104 +- site/home/joining-a-project/index.html | 173 +- site/home/keeping-ai-honest/index.html | 173 +- site/home/prompting-guide/index.html | 173 +- site/home/repeated-mistakes/index.html | 173 +- site/home/steering/index.html | 173 +- site/home/triggers/index.html | 179 +- site/index.html | 24 +- site/operations/autonomous-loop/index.html | 66 +- site/operations/hub-failure-modes/index.html | 294 +- site/operations/hub/index.html | 294 +- site/operations/index.html | 60 +- site/operations/integrations/index.html | 60 +- site/operations/migration/index.html | 60 +- site/operations/release/index.html | 72 +- .../architecture-exploration/index.html | 538 ++-- .../runbooks/backup-strategy/index.html | 1326 +++++++++ .../runbooks/breaking-migration/index.html | 260 +- .../runbooks/codebase-audit/index.html | 260 +- .../runbooks/docs-semantic-audit/index.html | 260 +- .../runbooks/hub-deployment/index.html | 260 +- .../runbooks/new-contributor/index.html | 260 +- .../runbooks/plugin-release/index.html | 260 +- .../runbooks/release-checklist/index.html | 266 +- .../runbooks/sanitize-permissions/index.html | 260 +- site/operations/upgrading/index.html | 60 +- site/recipes/activating-context/index.html | 2416 +++++++++++++++++ .../recipes/architecture-deep-dive/index.html | 24 +- site/recipes/autonomous-loops/index.html | 28 +- site/recipes/building-skills/index.html | 28 +- .../claude-code-permissions/index.html | 28 +- .../recipes/configuration-profiles/index.html | 28 +- site/recipes/context-health/index.html | 28 +- .../customizing-hook-messages/index.html | 38 +- site/recipes/design-before-coding/index.html | 28 +- site/recipes/external-context/index.html | 496 ++-- site/recipes/guide-your-agent/index.html | 64 +- site/recipes/hook-output-patterns/index.html | 29 +- .../recipes/hook-sequence-diagrams/index.html | 74 +- site/recipes/hub-cluster/index.html | 28 +- site/recipes/hub-getting-started/index.html | 28 +- site/recipes/hub-multi-machine/index.html | 28 +- site/recipes/hub-overview/index.html | 28 +- site/recipes/hub-personal/index.html | 28 +- site/recipes/hub-team/index.html | 28 +- site/recipes/import-plans/index.html | 28 +- site/recipes/index.html | 63 +- site/recipes/knowledge-capture/index.html | 28 +- site/recipes/memory-bridge/index.html | 28 +- site/recipes/multi-tool-setup/index.html | 88 +- site/recipes/multilingual-sessions/index.html | 58 +- site/recipes/parallel-worktrees/index.html | 28 +- site/recipes/permission-snapshots/index.html | 28 +- site/recipes/publishing/index.html | 28 +- site/recipes/scratchpad-sync/index.html | 28 +- .../recipes/scratchpad-with-claude/index.html | 28 +- site/recipes/scrutinizing-a-plan/index.html | 1256 +++++++++ site/recipes/session-archaeology/index.html | 28 +- site/recipes/session-ceremonies/index.html | 28 +- site/recipes/session-changes/index.html | 24 +- site/recipes/session-lifecycle/index.html | 165 +- site/recipes/session-pause/index.html | 28 +- site/recipes/session-reminders/index.html | 28 +- site/recipes/state-maintenance/index.html | 24 +- site/recipes/steering/index.html | 28 +- site/recipes/system-hooks-audit/index.html | 28 +- site/recipes/task-management/index.html | 28 +- site/recipes/triggers/index.html | 28 +- site/recipes/troubleshooting/index.html | 200 +- site/recipes/webhook-notifications/index.html | 28 +- .../when-to-use-agent-teams/index.html | 28 +- site/reference/audit-conventions/index.html | 24 +- site/reference/comparison/index.html | 24 +- site/reference/design-invariants/index.html | 24 +- site/reference/index.html | 24 +- site/reference/scratchpad/index.html | 24 +- site/reference/session-journal/index.html | 28 +- site/reference/skills/index.html | 24 +- site/reference/versions/index.html | 24 +- site/search.json | 2 +- site/security/agent-security/index.html | 62 +- .../backup => security/design}/index.html | 1115 +++----- site/security/hub/index.html | 60 +- site/security/index.html | 97 +- site/security/reporting/index.html | 292 +- site/sitemap.xml | 37 +- site/thesis/index.html | 24 +- specs/context-resolution-analysis.md | 408 +++ specs/explicit-context-dir.md | 482 ++++ specs/rc-contextdir-upward-walk.md | 6 + specs/single-source-context-anchor.md | 1062 ++++++++ zensical.toml | 27 +- 603 files changed, 22165 insertions(+), 20059 deletions(-) delete mode 100755 .claude/hooks/block-hack-scripts.sh delete mode 100644 .claude/skills/_ctx-backup/SKILL.md create mode 100644 .context/HANDOVER-2026-04-22.md create mode 100644 .context/steering/product.md create mode 100644 .context/steering/structure.md create mode 100644 .context/steering/tech.md create mode 100644 .context/steering/workflow.md delete mode 100644 docs/cli/backup.md create mode 100644 docs/operations/runbooks/backup-strategy.md create mode 100644 docs/recipes/activating-context.md create mode 100644 docs/recipes/scrutinizing-a-plan.md create mode 100644 docs/security/design.md delete mode 100644 examples/demo/.context/AGENT_PLAYBOOK.md delete mode 100644 examples/demo/.context/ARCHITECTURE.md delete mode 100644 examples/demo/.context/CONSTITUTION.md delete mode 100644 examples/demo/.context/CONVENTIONS.md delete mode 100644 examples/demo/.context/DECISIONS.md delete mode 100644 examples/demo/.context/GLOSSARY.md delete mode 100644 examples/demo/.context/LEARNINGS.md delete mode 100644 examples/demo/.context/TASKS.md delete mode 100644 examples/demo/.context/sessions/2026-01-15-143000-database-timeout-investigation.md delete mode 100644 examples/demo/PROMPT.md delete mode 100644 examples/demo/README.md delete mode 100644 examples/demo/specs/oauth2.md create mode 100644 internal/assets/claude/skills/ctx-plan/SKILL.md delete mode 100644 internal/assets/hooks/messages/block-dangerous-command/cp-to-bin.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-command/install-to-local-bin.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-command/mid-git-push.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-command/mid-sudo.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-commands/cp-to-bin.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-commands/install-to-local-bin.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-commands/mid-git-push.txt delete mode 100644 internal/assets/hooks/messages/block-dangerous-commands/mid-sudo.txt delete mode 100644 internal/assets/hooks/messages/check-backup-age/warning.txt create mode 100644 internal/audit/cmd_fprint_test.go create mode 100644 internal/cli/activate/activate.go create mode 100644 internal/cli/activate/activate_test.go create mode 100644 internal/cli/activate/cmd/root/cmd.go create mode 100644 internal/cli/activate/cmd/root/doc.go create mode 100644 internal/cli/activate/cmd/root/run.go create mode 100644 internal/cli/activate/core/emit/doc.go create mode 100644 internal/cli/activate/core/emit/emit.go create mode 100644 internal/cli/activate/core/emit/posix.go create mode 100644 internal/cli/activate/core/emit/types.go create mode 100644 internal/cli/activate/core/resolve/doc.go create mode 100644 internal/cli/activate/core/resolve/internal.go create mode 100644 internal/cli/activate/core/resolve/resolve.go create mode 100644 internal/cli/activate/doc.go create mode 100644 internal/cli/activate/testmain_test.go delete mode 100644 internal/cli/backup/cmd.go delete mode 100644 internal/cli/backup/doc.go delete mode 100644 internal/cli/backup/run.go create mode 100644 internal/cli/deactivate/cmd/root/cmd.go create mode 100644 internal/cli/deactivate/cmd/root/doc.go create mode 100644 internal/cli/deactivate/cmd/root/run.go create mode 100644 internal/cli/deactivate/deactivate.go create mode 100644 internal/cli/deactivate/deactivate_test.go create mode 100644 internal/cli/deactivate/doc.go create mode 100644 internal/cli/deactivate/testmain_test.go create mode 100644 internal/cli/hub/cmd/peer/cmd_test.go create mode 100644 internal/cli/hub/cmd/start/cmd_test.go create mode 100644 internal/cli/hub/cmd/status/cmd_test.go create mode 100644 internal/cli/hub/cmd/status/integration_test.go create mode 100644 internal/cli/hub/cmd/stepdown/cmd_test.go create mode 100644 internal/cli/hub/cmd/stop/cmd_test.go create mode 100644 internal/cli/mcp/cmd/root/cmd_test.go create mode 100644 internal/cli/memory/core/resolve/doc.go create mode 100644 internal/cli/memory/core/resolve/resolve.go delete mode 100644 internal/cli/system/cmd/block_dangerous_command/cmd.go delete mode 100644 internal/cli/system/cmd/block_dangerous_command/doc.go delete mode 100644 internal/cli/system/cmd/block_dangerous_command/run.go rename internal/cli/system/cmd/{check_backup_age => check_anchor_drift}/cmd.go (64%) create mode 100644 internal/cli/system/cmd/check_anchor_drift/doc.go create mode 100644 internal/cli/system/cmd/check_anchor_drift/run.go create mode 100644 internal/cli/system/cmd/check_anchor_drift/run_test.go create mode 100644 internal/cli/system/cmd/check_anchor_drift/testmain_test.go delete mode 100644 internal/cli/system/cmd/check_backup_age/doc.go delete mode 100644 internal/cli/system/cmd/check_backup_age/run.go create mode 100644 internal/cli/system/core/anchor/doc.go create mode 100644 internal/cli/system/core/anchor/equal.go delete mode 100644 internal/cli/system/core/archive/archive.go delete mode 100644 internal/cli/system/core/archive/backup.go delete mode 100644 internal/cli/system/core/archive/doc.go delete mode 100644 internal/cli/system/core/archive/smb.go delete mode 100644 internal/cli/system/core/archive/types.go create mode 100644 internal/cli/system/core/check/full_preamble.go create mode 100644 internal/cli/system/core/check/pause_preamble.go delete mode 100644 internal/config/archive/backup.go delete mode 100644 internal/config/archive/var.go delete mode 100644 internal/config/embed/cmd/backup.go create mode 100644 internal/config/embed/flag/activate.go delete mode 100644 internal/config/embed/flag/backup.go delete mode 100644 internal/config/embed/text/backup.go create mode 100644 internal/config/embed/text/check_anchor.go create mode 100644 internal/config/embed/text/err_activate.go create mode 100644 internal/config/rc/messages.go delete mode 100644 internal/config/regex/cmd_test.go create mode 100644 internal/config/shell/doc.go create mode 100644 internal/config/shell/shell.go create mode 100644 internal/err/activate/activate.go create mode 100644 internal/err/activate/doc.go delete mode 100644 internal/exec/gio/doc.go delete mode 100644 internal/exec/gio/mount.go create mode 100644 internal/rc/candidates.go create mode 100644 internal/rc/require.go create mode 100644 internal/rc/require_test.go create mode 100644 internal/rc/testmain_test.go delete mode 100644 internal/rc/walk.go delete mode 100644 internal/rc/walk_test.go create mode 100644 internal/testutil/testctx/doc.go create mode 100644 internal/testutil/testctx/testctx.go create mode 100644 internal/write/activate/activate.go create mode 100644 internal/write/activate/doc.go delete mode 100644 internal/write/backup/backup.go delete mode 100644 internal/write/backup/doc.go delete mode 100644 internal/write/backup/skip.go create mode 100644 site/operations/runbooks/backup-strategy/index.html create mode 100644 site/recipes/activating-context/index.html create mode 100644 site/recipes/scrutinizing-a-plan/index.html rename site/{cli/backup => security/design}/index.html (65%) create mode 100644 specs/context-resolution-analysis.md create mode 100644 specs/explicit-context-dir.md create mode 100644 specs/single-source-context-anchor.md diff --git a/.claude/hooks/block-hack-scripts.sh b/.claude/hooks/block-hack-scripts.sh deleted file mode 100755 index 4d02407eb..000000000 --- a/.claude/hooks/block-hack-scripts.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -# block-hack-scripts.sh — PreToolUse hook for Bash tool -# Blocks direct invocation of hack/ scripts; nudges toward make targets. -# Reading hack/ files (cat, head, grep, etc.) is allowed. - -set -euo pipefail - -input=$(cat) -command=$(printf '%s' "$input" | sed -n 's/.*"command" *: *"\(.*\)".*/\1/p' | head -1) - -# Empty command — nothing to check -[ -z "$command" ] && exit 0 - -# Allow read-only operations on hack/ files -if printf '%s' "$command" | grep -qP '^\s*(cat|head|tail|less|read|ls|grep|rg|diff|wc|file|stat)\b'; then - exit 0 -fi - -# Pattern: hack script invocation at start of command or after a separator -# Matches: ./hack/foo.sh, hack/foo.sh, bash ./hack/foo.sh, sh hack/foo.sh -# Also after && ; || | -if ! printf '%s' "$command" | grep -qP '(^|\s|&&|;|\|\||\|)\s*(bash\s+|sh\s+)?(\.\/)?hack\/\S+\.sh'; then - exit 0 -fi - -# Extract the script name(s) that matched -script=$(printf '%s' "$command" | grep -oP '(\.\/)?hack\/\S+\.sh' | head -1 | sed 's|^\./||') - -# Map scripts to make targets -case "$script" in - hack/release.sh) target="make release" ;; - hack/build-all.sh) target="make build-all" ;; - hack/lint-drift.sh) target="make lint-drift" ;; - hack/lint-docs.sh) target="make lint-docs" ;; - hack/plugin-reload.sh) target="make plugin-reload" ;; - hack/reinstall.sh) target="make install" ;; - hack/gpg-fix.sh) target="make gpg-fix / make gpg-test" ;; - *) target="" ;; -esac - -if [ -n "$target" ]; then - reason="Use \`${target}\` instead of invoking \`${script}\` directly." -else - reason="Direct hack/ script invocation blocked. Ask the user to run it manually, or create a make target first." -fi - -printf '{"decision":"block","reason":"%s"}\n' "$reason" diff --git a/.claude/skills/_ctx-audit/SKILL.md b/.claude/skills/_ctx-audit/SKILL.md index b2832ea91..6b6253c1b 100644 --- a/.claude/skills/_ctx-audit/SKILL.md +++ b/.claude/skills/_ctx-audit/SKILL.md @@ -323,8 +323,6 @@ rg 'default|Default' docs/configuration.md -n ``` Cross-check: -- `config.DirContext` value matches the `context_dir` default in docs - and sample `.ctxrc` - `FileReadOrder` entries match the `priority_order` list in sample `.ctxrc` and the docs "Default priority order" section - `DefaultTokenBudget`, `DefaultArchiveAfterDays`, etc. in `rc.go` diff --git a/.claude/skills/_ctx-backup/SKILL.md b/.claude/skills/_ctx-backup/SKILL.md deleted file mode 100644 index 6a2983c5f..000000000 --- a/.claude/skills/_ctx-backup/SKILL.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -name: _ctx-backup -description: "Backup project context and global Claude data to SMB share. Use before risky operations, at end of session, or on request." -allowed-tools: Bash(ctx backup*), Bash(ls /tmp/ctx-backup*) ---- - -Backup `.context/`, `.claude/`, `ideas/`, and `~/.claude/` to -the configured SMB share. - -## When to Use - -- Before risky operations (major refactors, dependency upgrades) -- At the end of a productive session -- When the user explicitly asks for a backup -- Before switching branches with uncommitted context changes - -## When NOT to Use - -- When `CTX_BACKUP_SMB_URL` is not configured (the command will - error — tell the user to set it up) -- Multiple times in quick succession with no changes in between - -## Usage Examples - -```text -/backup -/backup project -/backup global -/backup all -``` - -## Arguments - -| Argument | What it backs up | -|-----------|-----------------------------------------------| -| (none) | Same as `all` | -| `project` | Project context only (`.context/`, `.claude/`, `ideas/`) | -| `global` | Global Claude data only (`~/.claude/`) | -| `all` | Both project and global | - -## Execution - -Based on the argument, run the appropriate command: - -```bash -# For "project" -ctx backup --scope project - -# For "global" -ctx backup --scope global - -# For "all" or no argument -ctx backup --scope all -``` - -## Process - -1. Parse the argument (default to `all` if none provided) -2. Run the appropriate `ctx backup` command -3. Report the archive path and size from the output -4. Confirm success to the user - -## Quality Checklist - -- [ ] The command completed without errors -- [ ] Archive size is reported to the user -- [ ] If the SMB share was not mounted, the error is clearly - communicated diff --git a/.context/AGENT_PLAYBOOK.md b/.context/AGENT_PLAYBOOK.md index a01d9b8d1..6ba48fe9c 100644 --- a/.context/AGENT_PLAYBOOK.md +++ b/.context/AGENT_PLAYBOOK.md @@ -15,6 +15,24 @@ making a decision, learning something, or hitting a milestone: persist before continuing. Don't wait for session end; it may never come cleanly. +## File Interaction Protocol + +When a task involves reading, modifying, or reasoning about a file: + +1. **Read before act** + - Read the file content directly before making any change + - Do not rely on memory, summaries, or prior reads +2. **No partial reads** + - Do not sample the beginning or end of a file and assume the rest +3. **Freshness requirement** + - A read must be recent relative to the action + - Do not reuse stale context from earlier in the session +4. **No implicit scope** + - "This change is small" is not a valid justification + - "This file is large" is not a valid justification +5. **Edit authority comes from visibility** + - If you haven't seen it, you don't get to modify it + ## Spec Requirement Do not begin implementation work without a spec. @@ -39,30 +57,12 @@ Required review inputs: - the current implementation Review prompt: -- "Review , TASKS.md, and the current implementation for drift, +- "Review , TASKS.md, and the current implementation for drift, omissions, invalid assumptions, and incomplete requirements." -Do not declare work complete until review findings are either resolved or +Do not declare work complete until review findings are either resolved or explicitly recorded. -## File Interaction Protocol - -When a task involves reading, modifying, or reasoning about a file: - -1. **Read before act** - - Read the file content directly before making any change - - Do not rely on memory, summaries, or prior reads -2. **No partial reads** - - Do not sample the beginning or end of a file and assume the rest -3. **Freshness requirement** - - A read must be recent relative to the action - - Do not reuse stale context from earlier in the session -4. **No implicit scope** - - "This change is small" is not a valid justification - - "This file is large" is not a valid justification -5. **Edit authority comes from visibility** - - If you haven't seen it, you don't get to modify it - ## Invoking ctx Always use `ctx` from PATH: @@ -72,9 +72,23 @@ ctx agent # ✓ correct ./dist/ctx # ✗ avoid hardcoded paths go run ./cmd/ctx # ✗ avoid unless developing ctx itself ``` - Check with `which ctx` if unsure whether it's installed. +### When ctx Returns an Error + +Triage the error before reacting: + +- **Invocation error**: the message points at your call: unknown + flag, unknown command, wrong argument count, missing required + flag. Read `ctx --help`, fix the call, and retry. +- **Everything else**: missing context directory, config problem, + hook rejection, permission denied, unexpected failure. Relay the + output to the user **verbatim** and stop. Do not add flags, run + other commands, edit files to fix the cause, or retry. Wait for + the user's next instruction. + +When unsure which kind you're looking at, treat it as the second. + ## Context Readback Before starting any work, read the required context files and confirm to the @@ -84,7 +98,7 @@ conventions." Do not begin implementation until you have done so. ## Supplementary Files These files live in `.context/` alongside the core context files. -Read them when the task at hand warrants it — not on every session. +Read them when the task at hand warrants it, not on every session. | File | Read when | |--------------------|----------------------------------------------------------------| @@ -92,6 +106,28 @@ Read them when the task at hand warrants it — not on every session. | DETAILED_DESIGN.md | Deep-diving into internals (generated via `/ctx-architecture`) | | GLOSSARY.md | Encountering unfamiliar project-specific terminology | +## Context Directory Lives at the Project Root + +The project root is the parent of `.context/`, by contract — +specifically `filepath.Dir(ContextDir())`. That's where `ctx sync`, +`ctx drift`, and the memory-drift hook look for code, secrets, +and `MEMORY.md`. + +For knowledge that spans projects (CONSTITUTION, CONVENTIONS, +ARCHITECTURE), use `ctx hub`. + +Recommended layout: + +``` +~/WORKSPACE/my-project + ├── .git + ├── .context + ├── Makefile + ├── Makefile.ctx + └── specs + └── ... +``` + ## Reason Before Acting Before implementing any non-trivial change, think through it step-by-step: @@ -148,7 +184,6 @@ Surface problems worth mentioning: - **Stale context files** (not modified recently): mention before stale context influences work - **Bloated token count** (over 30k): offer `ctx compact` -- **Long single-line entries**: run `ctx fmt` to normalize line widths - **Drift between files and code**: spot-check paths from ARCHITECTURE.md against the actual file tree @@ -171,14 +206,6 @@ is running long: Context compaction happens automatically, but the next window loses nuance. Explicit persistence is cheaper than re-discovery. -### Check Available Skills - -Before starting any task, scan the skill list in your system -prompt to see if a dedicated skill already handles the request. -Prefer invoking an existing skill over ad-hoc implementation: -skills encode project conventions, quality gates, and -persistence steps that are easy to miss otherwise. - ### Conversational Triggers Users rarely invoke skills explicitly. Recognize natural language: @@ -290,7 +317,7 @@ Never assume. If you don't see it in files, you don't know it. ## Planning Work Every commit requires a `Spec:` trailer (CONSTITUTION rule). This means -every piece of work needs a spec — no exceptions, no "trivial" qualifier. +every piece of work needs a spec; no exceptions, no "trivial" qualifier. A one-liner bugfix gets a one-paragraph spec; a multi-package feature gets a full design document. The spec exists for traceability, not ceremony. @@ -389,4 +416,3 @@ re-discovering it. 5 minutes reading saves 50 minutes of wasted work. - [ ] DECISIONS.md has no superseded entries unmarked - [ ] LEARNINGS.md gotchas still relevant - [ ] Run `ctx drift` and address warnings -- [ ] Run `ctx fmt` to normalize line widths diff --git a/.context/AGENT_PLAYBOOK_GATE.md b/.context/AGENT_PLAYBOOK_GATE.md index 65a9cb004..d5c3b2974 100644 --- a/.context/AGENT_PLAYBOOK_GATE.md +++ b/.context/AGENT_PLAYBOOK_GATE.md @@ -8,9 +8,15 @@ lifecycle details, or anti-patterns. ```bash ctx status # correct -./dist/ctx # wrong — never hardcode paths -go run ./cmd/ctx # wrong — unless developing ctx itself -```` +./dist/ctx # wrong: never hardcode paths +go run ./cmd/ctx # wrong: unless developing ctx itself +``` + +## When `ctx` Errors + +If the error names your flag, argument, or command, read +`ctx --help` and fix the call. Otherwise, relay verbatim +and stop. When unsure, stop. ## File Interaction Protocol @@ -30,12 +36,12 @@ When a task involves reading, modifying, or reasoning about a file: Do not begin implementation without a spec. Every commit requires a `Spec:` trailer. Every piece of work needs -a spec — no exceptions. Scale the spec to the work. Use `/ctx-spec` +a spec; no exceptions. Scale the spec to the work. Use `/ctx-spec` to scaffold. ## Proactive Persistence -After completing a task, making a decision, or hitting a gotcha — +After completing a task, making a decision, or hitting a gotcha, persist before continuing. Don't wait for session end. ## Chunk and Checkpoint diff --git a/.context/ARCHITECTURE.md b/.context/ARCHITECTURE.md index 61a17e664..5357482a2 100644 --- a/.context/ARCHITECTURE.md +++ b/.context/ARCHITECTURE.md @@ -19,377 +19,70 @@ and warns if they do not exist on disk. Keep paths accurate. ## Overview -ctx is a CLI tool that creates and manages a `.context/` directory -containing structured markdown files. These files provide persistent, -token-budgeted, priority-ordered context for AI coding assistants -across sessions. An MCP server exposes the same capabilities to any -MCP-compatible agent over JSON-RPC 2.0. + -Design philosophy: +## Package/Module Dependency Graph -- **Markdown-centric**: all context is plain markdown; no databases, - no proprietary formats. Files are human-readable and version- - controlled alongside the code they describe. -- **Token-budgeted**: context assembly respects configurable token - limits so AI agents receive the most important information first - without exceeding their context window. -- **Priority-ordered**: files are loaded in a deliberate sequence - (rules before tasks, conventions before architecture) so agents - internalize constraints before acting. -- **Convention over configuration**: sensible defaults with optional - `.ctxrc` overrides. No config file required to get started. -- **Agent-agnostic**: the MCP server speaks standard protocol; the - CLI works from any shell. No agent-specific coupling in core code. - -For per-module deep dives (types, exported API, data flow, edge -cases), see [DETAILED_DESIGN.md](DETAILED_DESIGN.md). - -## Layered Architecture - -The codebase is organized into strict dependency layers. Each layer -may only import from layers below it. - -``` -Layer 6: Entry Points - cmd/ctx, bootstrap (34 commands registered) - -Layer 5: CLI Commands + MCP Server - internal/cli/* (34 cmd/core packages) - internal/mcp/* (JSON-RPC 2.0 server) - -Layer 4: Output + Errors - internal/write/* (46 writer packages) - internal/err/* (35 error packages) - -Layer 3: Domain Logic - entity, entry, context/*, drift, index, task, tidy, - trace, journal/*, memory, notify, claude - -Layer 2: Infrastructure - io, format, parse, sanitize, validate, inspect, - flagbind, exec/*, log/*, crypto, sysinfo, rc - -Layer 1: Foundation (zero internal dependencies) - internal/config/* (60+ sub-packages) - internal/assets (embedded FS + 14 typed readers) - -Layer 0: Quality Gates (test-only) - internal/audit, internal/compliance -``` - -## Package Dependency Graph + BOOT[bootstrap] - BOOT --> CLI[cli/* 34 commands] - BOOT --> MCP[mcp/server] - - CLI --> CORE[core/ packages] - CLI --> WRITE[write/* 46 pkgs] - CLI --> ERR[err/* 35 pkgs] - - MCP --> HANDLER[mcp/handler] - MCP --> PROTO[mcp/proto] - HANDLER --> DOMAIN - - CORE --> DOMAIN[domain packages] - WRITE --> FMT[format] - WRITE --> DESC[assets/read/desc] - ERR --> DESC - - DOMAIN --> INFRA[infrastructure] - DOMAIN --> RC[rc] - - INFRA --> CONFIG[config/* 60+ pkgs] - INFRA --> ASSETS[assets + read/*] - RC --> CONFIG + core["core (no deps)"] + api["api"] --> core + cli["cli"] --> api ``` - -*Full dependency matrix: -[architecture-dia-dependencies.md](architecture-dia-dependencies.md)* +--> ## Component Map -### Foundation (zero internal dependencies) - -| Package | Purpose | Key Exports | -|---------|---------|-------------| -| `internal/config/*` | 60+ sub-packages: constants, types, regex, text keys | Domain-specific constants imported granularly | -| `internal/assets` | Embedded templates via `go:embed` | `FS` (single embed) | -| `internal/assets/read/*` | 14 typed accessor packages | `desc.Text()`, `skill.Content()`, `entry.List()` | -| `internal/assets/tpl` | Sprintf-based format templates | Entry, journal, loop, obsidian templates | - -### Infrastructure - -| Package | Purpose | Key Exports | -|---------|---------|-------------| -| `internal/io` | Guarded file I/O with path validation | `SafeReadFile()`, `SafeWriteFile()`, `SafePost()` | -| `internal/format` | Display formatting (time, bytes, tokens) | `TimeAgo()`, `Bytes()`, `Tokens()`, `Truncate()` | -| `internal/parse` | Text-to-typed-value conversions | `Date()` | -| `internal/sanitize` | Input mutation to conform constraints | `Filename()` | -| `internal/validate` | Path validation and symlink checks | `Boundary()`, `Symlink()` | -| `internal/inspect` | String predicates and position queries | `Contains()`, `StartsWithCtxMarker()` | -| `internal/flagbind` | Cobra flag binding with YAML descriptions | `BoolFlag()`, `StringFlag()`, `IntFlag()` | -| `internal/exec/*` | External command wrappers (5 packages) | `git.Run()`, `dep.GoListPackages()` | -| `internal/log/*` | Event logging + stderr warnings | `event.Append()`, `warn.Warn()` | -| `internal/crypto` | AES-256-GCM encryption (stdlib only) | `Encrypt()`, `Decrypt()`, `GenerateKey()` | -| `internal/sysinfo` | OS metrics with platform build tags | `Collect()`, `Evaluate()` | -| `internal/rc` | Runtime config (.ctxrc + env + flags) | `RC()`, `ContextDir()`, `TokenBudget()` | - -### Domain Logic - -| Package | Purpose | Key Exports | -|---------|---------|-------------| -| `internal/entity` | Shared domain types (no logic) | `Session`, `Context`, `FileInfo`, `EntryParams` | -| `internal/entry` | Entry validation and writing | `ValidateAndWrite()` | -| `internal/context/*` | Context loading with token counting | `load.Do()`, `token.Estimate()`, `summary.Generate()` | -| `internal/drift` | Context quality validation (7 checks) | `Detect()`, `Report.Status()` | -| `internal/index` | Markdown index tables | `Update()`, `ParseEntryBlocks()` | -| `internal/task` | Task checkbox parsing | `Completed()`, `Pending()`, `SubTask()` | -| `internal/tidy` | Context file maintenance | `CompactResult`, `parseBlockAt()` | -| `internal/trace` | Commit-to-context linking | `Collect()`, `FormatTrailer()` | -| `internal/journal/parser` | Session transcript parsing (4 formats) | `ParseFile()`, `FindSessionsForCWD()` | -| `internal/journal/state` | Journal pipeline state (JSON) | `Load()`, `Save()`, `Mark*()` | -| `internal/memory` | Memory bridge (MEMORY.md sync) | `DiscoverPath()`, `Sync()`, `SelectContent()` | -| `internal/notify` | Fire-and-forget webhooks | `Send()`, `LoadWebhook()` | -| `internal/claude` | Claude Code integration types | `Skills()`, `SkillContent()` | - -### MCP Server (`internal/mcp/*`) - -| Package | Purpose | -|---------|---------| -| `mcp/proto` | JSON-RPC 2.0 message types, MCP constants | -| `mcp/server` | Main loop: stdin read, dispatch, stdout write | -| `mcp/server/dispatch` | Method-based request routing | -| `mcp/server/dispatch/poll` | File mtime polling for change notifications | -| `mcp/server/catalog` | URI-to-file resource mapping (9 resources) | -| `mcp/server/route/*` | Handlers: initialize, ping, tool, prompt, resource | -| `mcp/server/def/*` | Tool (11) and prompt (5) definitions | -| `mcp/handler` | Domain logic as free functions taking `*entity.MCPDeps` | -| `entity.MCPSession` | Per-session advisory state (pure data + mutations) | - -### CLI Commands (`internal/cli/*`) - -34 commands in 8 groups, each following cmd/root + core/ taxonomy: - -| Group | Commands | -|-------|----------| -| Getting Started | `initialize`, `status`, `guide` | -| Context | `add`, `load`, `agent`, `sync`, `drift`, `compact` | -| Artifacts | `decision`, `learning`, `task` | -| Sessions | `journal`, `memory`, `remind`, `pad` | -| Runtime | `config`, `permission`, `pause`, `resume` | -| Integration | `setup`, `mcp`, `watch`, `notify`, `loop` | -| Diagnostics | `doctor`, `change`, `dep`, `why`, `trace` | -| Utilities | `reindex` | -| Hidden | `serve`, `site`, `system` (34 hook subcommands) | - -### Output Layer - -| Package | Purpose | -|---------|---------| -| `internal/write/*` | 46 packages: formatted terminal/JSON output | -| `internal/err/*` | 35 packages: error constructors with YAML text | - -### Quality Gates (test-only) - -| Package | Purpose | -|---------|---------| -| `internal/audit` | AST-based codebase invariant tests | -| `internal/compliance` | File-level convention adherence tests | - -## Data Flow Diagrams - -Five core flows define how data moves through the system: - -1. **`ctx init`**: User invokes -> `cli/initialize` reads embedded - templates from `assets` -> creates `.context/` directory -> writes - all template files -> generates AES-256 key -> deploys hooks and - skills -> merges `settings.local.json` -> writes/merges `CLAUDE.md`. - -2. **`ctx agent`**: Agent invokes with `--budget N` -> - `context/load.Do()` reads all `.md` files -> entries scored by - recency and relevance -> sorted and fitted to token budget -> - overflow entries listed as "Also Noted" -> returns Markdown packet. + -## External Dependencies +## Data Flow -Two direct Go dependencies: `spf13/cobra` (CLI framework), -`gopkg.in/yaml.v3` (YAML parsing). Optional external tools: -`zensical` (static site generation) and `gpg` (commit signing). +>API: OK + API-->>User: 201 Created +``` +--> -Local: `make build` (CGO_ENABLED=0, ldflags version), `make audit` -(gofmt, go vet, golangci-lint, lint scripts, tests), `make smoke` -(integration tests). Release: `hack/release.sh` bumps VERSION, -generates release notes, builds all targets, creates signed git tag. -CI: GitHub Actions on push; release on `v*` tags producing 6 -platform binaries (darwin/linux/windows x amd64/arm64). +## Key Patterns -*Full build pipeline diagram: -[architecture-dia-build.md](architecture-dia-build.md)* + ## File Layout + diff --git a/.context/CONSTITUTION.md b/.context/CONSTITUTION.md index ac35a0650..eaaf35fa2 100644 --- a/.context/CONSTITUTION.md +++ b/.context/CONSTITUTION.md @@ -12,11 +12,13 @@ DO NOT UPDATE FOR: - Temporary constraints (use TASKS.md blockers) --> -These rules are INVIOLABLE. If a task requires violating these, the task is wrong. +These rules are INVIOLABLE. If a task requires violating these, the +task is wrong. ## Completion Over Motion -Work is only complete when it is **fully done**, not when progress has been made. +Work is only complete when it is **fully done**, not when progress +has been made. - The requested outcome must be delivered end-to-end. - Partial progress is not completion. @@ -33,10 +35,10 @@ If you start something, you own it, you finish it. ## Context Integrity Invariants - [ ] **Never** modify or reason about a file based on partial or assumed content -- [ ] If a file is the subject of an operation, its relevant contents must be +- [ ] If a file is the subject of an operation, its relevant contents must be **fully understood** before acting -- [ ] Sampling, guessing, or relying on prior assumptions instead of - reading is a **violation** +- [ ] Sampling, guessing, or relying on prior assumptions instead of reading + is a **violation** --- @@ -83,27 +85,36 @@ Leave the system in a better state than you found it. - [ ] All code must pass tests before commit - [ ] No TODO comments in main branch (move to TASKS.md) -- [ ] Path construction uses stdlib — no string concatenation (security: prevents path traversal) +- [ ] Path construction uses stdlib: no string concatenation + (security: prevents path traversal) ## Process Invariants - [ ] All architectural changes require a decision record -- [ ] Context loading is not a detour from your task. It IS the first step of every session. A 30-second read delay is always cheaper than a decision made without context. -- [ ] Every commit references a spec (`Spec: specs/.md` trailer) — no exceptions, no "non-trivial" qualifier. Even one-liner fixes need a spec for traceability. Use `/ctx-commit` instead of raw `git commit`. +- [ ] Context loading is not a detour from your task. It IS the first + step of every session. A 30-second read delay is always cheaper + than a decision made without context. +- [ ] Every commit references a spec (`Spec: specs/.md` trailer): + no exceptions, no "non-trivial" qualifier. Even one-liner fixes + need a spec for traceability. Use `/ctx-commit` instead of raw + `git commit`. ## TASKS.md Structure Invariants -TASKS.md must remain a replayable checklist. Uncheck all items and re-run = verify/redo all tasks in order. +TASKS.md must remain a replayable checklist. Uncheck all items and +re-run = verify/redo all tasks in order. -- [ ] **Never move tasks** — tasks stay in their Phase section permanently -- [ ] **Never remove Phase headers** — Phase labels provide structure and order -- [ ] **Never merge or collapse Phase sections** — each phase is a logical unit -- [ ] **Never delete tasks** — mark as `[x]` completed, or `[-]` skipped with reason -- [ ] **Use inline labels for status** — add `#in-progress` to task text, don't move it -- [ ] **No "In Progress" / "Next Up" sections** — these encourage moving tasks -- [ ] **Ask before restructuring** — if structure changes seem needed, ask the user first +- [ ] **Never move tasks**: tasks stay in their Phase section permanently +- [ ] **Never remove Phase headers**: Phase labels provide structure and order +- [ ] **Never merge or collapse Phase sections**: each phase is a logical unit +- [ ] **Never delete tasks**: mark as `[x]` completed, or `[-]` skipped with reason +- [ ] **Use inline labels for status**: add `#in-progress` to task text, don't move it +- [ ] **No "In Progress" / "Next Up" sections**: these encourage moving tasks +- [ ] **Ask before restructuring**: if structure changes seem needed, ask the user first ## Context Preservation Invariants -- [ ] **Archival is allowed, deletion is not** — use `ctx tasks archive` to move completed tasks to `.context/archive/`, never delete context history -- [ ] **Archive preserves structure** — archived tasks keep their Phase headers for traceability +- [ ] **Archival is allowed, deletion is not**: use `ctx task archive` to move + completed tasks to `.context/archive/`, never delete context history +- [ ] **Archive preserves structure**: archived tasks keep their Phase headers + for traceability diff --git a/.context/CONVENTIONS.md b/.context/CONVENTIONS.md index de39c8fea..adf3d8624 100644 --- a/.context/CONVENTIONS.md +++ b/.context/CONVENTIONS.md @@ -26,39 +26,9 @@ DO NOT UPDATE FOR: - **Maps reference constants**: Use constants as keys, not literals - `map[string]X{ConstKey: value}` not `map[string]X{"literal": value}` -## Casing - -- **Proper nouns keep their casing** in comments, strings, and docs - - `Markdown` not `markdown` (it's a language name) - - `YAML`, `JSON`, `TOML` — always uppercase - - `GitHub`, `JavaScript`, `PostgreSQL` — match official casing - - Exception: code fence language identifiers are lowercase (`` ```markdown ``) - -## Predicates - -- **No Is/Has/Can prefixes**: `Completed()` not - `IsCompleted()`, `Empty()` not `IsEmpty()` -- Applies to exported methods that return bool -- Private helpers may use prefixes when it reads more naturally - -## File Organization - -- **Public API in main file, private helpers in separate logical files** - - `loader.go` (exports `Load()`) + `process.go` (unexported helpers) - - NOT: one file with unexported functions stacked at the bottom -- Reasoning: agent loads only the public API file unless - it needs implementation detail -- **Name files after what they contain, not their role** - - `format.go`, `sort.go`, `parse.go` — named by responsibility - - NOT: `util.go`, `utils.go`, `helper.go`, `common.go` — junk drawer names - - If a file can't be named without a generic label, - its contents don't belong together - - Existing junk drawers should be split as their contents grow - ## Patterns -- **Centralize magic strings**: All repeated literals - belong in a `config` or `constants` package +- **Centralize magic strings**: All repeated literals belong in a `config` or `constants` package - If a string appears in 3+ files, it needs a constant - If a string is used for comparison, it needs a constant - **Path construction**: Always use stdlib path joining @@ -67,75 +37,19 @@ DO NOT UPDATE FOR: - Node: `path.join(dir, file)` - Never: `dir + "/" + file` - **Constants reference constants**: Self-referential definitions - - `FileType[UpdateTypeTask] = FilenameTask` not - `FileType["task"] = "TASKS.md"` -- **No error variable shadowing**: Use descriptive names - when multiple errors exist in a function - - `readErr`, `writeErr`, `indexErr` — not repeated `err` / `err :=` - - Shadowed `err` silently disconnects from the outer - variable, causing subtle bugs + - `FileType[UpdateTypeTask] = FilenameTask` not `FileType["task"] = "TASKS.md"` - **Colocate related code**: Group by feature, not by type - `session/run.go`, `session/types.go`, `session/parse.go` - Not: `runners/session.go`, `types/session.go`, `parsers/session.go` -## Line Width - -- **Target ~80 characters**: Highly encouraged, not a hard limit - - Some lines will naturally exceed it (long strings, - struct tags, URLs) — that's fine - - Drift accumulates silently, especially in test code - - Break at natural points: function arguments, struct fields, chained calls - -## Duplication - -- **Non-test code**: Apply the rule of three — extract - when a block appears 3+ times - - Watch for copy-paste during task-focused sessions - where the agent prioritizes completion over shape -- **Test code**: Some duplication is acceptable for readability - - When the same setup/assertion block appears 3+ times, extract a test helper - - Use `t.Helper()` so failure messages point to the caller, not the helper - ## Testing - **Colocate tests**: Test files live next to source files - `foo.go` → `foo_test.go` in same package - Not a separate `tests/` folder -- **Test the unit, not the file**: One test file can test - multiple related functions +- **Test the unit, not the file**: One test file can test multiple related functions - **Integration tests are separate**: `cli_test.go` for end-to-end binary tests -## Code Change Heuristics - -- **Present interpretations, don't pick silently**: If a request has multiple - valid readings, lay them out rather than guessing -- **Push back when warranted**: If a simpler approach exists, say so -- **"Would a senior engineer call this overcomplicated?"**: If yes, simplify -- **Match existing style**: Even if you'd write it differently in a greenfield -- **Every changed line traces to the request**: If it doesn't, revert it - -## Decision Heuristics - -- **"Would I start this today?"**: If not, continuing is - the sunk cost — evaluate only future value -- **"Reversible or one-way door?"**: Reversible decisions - don't need deep analysis -- **"Does the analysis cost more than the decision?"**: - Stop deliberating when the options are within an order - of magnitude -- **"Order of magnitude, not precision"**: 10x better - matters; 10% better usually doesn't - -## Refactoring - -- **Measure the end state, not the effort**: When refactoring, ask what the - codebase looks like *after*, not how much work the change is -- **Three questions before restructuring**: - 1. What's the smallest codebase that solves this? - 2. Does the proposed change result in less total code? - 3. What can we delete now that this change makes obsolete? -- **Deletion is a feature**: Writing 50 lines that delete 200 is a net win - ## Documentation - **Godoc format**: Use canonical sections @@ -152,121 +66,5 @@ DO NOT UPDATE FOR: // - Type: Description of return value func FunctionName(param1, param2 string) error ``` -- **Struct field documentation**: Exported structs with 2+ fields - must document every field. Two accepted forms: - ```go - // Option A: Fields section in docblock (preferred for 4+ fields) - // TypeName describes X. - // - // Fields: - // - FieldA: Description - // - FieldB: Description - type TypeName struct { - - // Option B: Inline comments (acceptable for 2-3 fields) - // TypeName describes X. - type TypeName struct { - // FieldA is the description. - FieldA string - FieldB string // Description - } - ``` -- **Package doc in doc.go**: Each package gets a `doc.go` with package-level - documentation describing behavior, not structure. Do NOT include - `# File Organization` sections listing files — they drift when files are - added, renamed, or removed, and the filesystem is self-documenting +- **Package doc in doc.go**: Each package gets a `doc.go` with package-level documentation - **Copyright headers**: All source files get the project copyright header - -## Blog Publishing - -- **Checklist for ideas/ → docs/blog/ promotion**: - 1. Update date in frontmatter to publish date - 2. Fix relative paths (from `../docs/blog/` to peer references) - 3. Add cross-links to/from companion posts ("See also" sections) - 4. Add "The Arc" section connecting to the series narrative - 5. Update `docs/blog/index.md` with entry (newest first) - 6. Verify all link targets exist - 7. Build and test before commit -- **Arc section**: Every post includes "The Arc" near the end, framing - where the post sits in the broader blog narrative -- **See also links**: Use italic `*See also: [Title](file) -- one-line - description connecting the two posts.*` format at the end of posts -- **Frontmatter**: Include copyright header, title, date, author, topics list -- **Blog index order**: Newest post first, with topic tags and 3-4 line summary - -- **Update admonitions for historical blog content**: Use MkDocs admonitions - (`!!! note "Update"`) at the top of blog post sections where features have - been superseded or installation has changed. Link to current documentation. - Keep original content intact below for historical context. -- **New CLI subcommand documentation checklist**: Update docs in at least - three places: (1) Feature page — commands table, usage section, skill/NL - table. (2) CLI reference — full reference entry with args, flags, examples. - (3) Relevant recipes. (4) zensical.toml — only if adding a new page. -- **Rename/refactor documentation checklist**: Scope ALL documentation impact - before implementation. Three anchors plus one tangential: (1) Docstrings. - (2) User-facing docs (`docs/`). (3) Recipes (`docs/recipes/`). (4) Blog - posts and release notes. Also check: skills, hook messages, YAML text - files, `.context/` files, and specs. -- **Stage site/ with docs/ changes**: The generated HTML is tracked in git - with no CI build step - -## Error Handling - -- **Zero silent error discard**: Handle every error, never suppress with - `_ =` or `//nolint:errcheck`. Production: defer-close logs to stderr - via `log.Warn()`. Test: `t.Fatal(err)` for setup, `t.Log(err)` for - cleanup. For gosec false positives: fix the code rather than adding - nolint markers — the goal is zero golangci-lint suppressions -- **Error constructors in internal/err**: Never in per-package err.go - files — eliminates the broken-window pattern where agents add local - errors when they see a local err.go exists - -## CLI Structure - -- **CLI package taxonomy**: Every package under `internal/cli/` follows: - parent.go (Cmd wiring), doc.go, `cmd/root/` or `cmd//` - (implementation), `core/` (shared helpers) -- **cmd/ directories**: Only cmd.go, run.go, and tests — helpers and - output go to `core/` -- **core/ structs**: Consolidated into a single `types.go` file -- **User-facing text via assets**: All text routed through - `internal/assets` with YAML-backed TextDescKeys — no inline strings - in `core/` or `cmd/` packages -- **config/ doc.go**: Every package under `internal/config/` must have - a doc.go with the project header and a one-line package comment -- **DescKey prefix**: Not CmdDescKey — `cmd.DescKeyFoo` not - `cmd.CmdDescKeyFoo` (Go package hygiene, avoids stutter) -- **Cobra Use: fields**: Must reference `cmd.Use*` constants, never raw - strings or `cmd.DescKey*` -- **Run functions exported PascalCase**: `Run`, `RunImport`, - `RunArchive` etc. No private `runXXX` variants -- **write/ packages write to stdio only**: Functions take - `*cobra.Command`, not `io.Writer`. Exception: `write/rc` writes to - `os.Stderr` because rc loads before cobra -- **Package directory names singular**: Unless Go convention requires - plural -- **Import grouping**: stdlib — blank line — external deps (cobra, - yaml) — blank line — ctx imports. Three groups, always in this order -- **camelCase import aliases**: `cFlag` not `cflag`, `cfgFmt` not - `cfgfmt` -- **Icons and symbols as token constants**: Not unicode escapes -- **Cross-cutting domain types in internal/entity**: Types used by one - package stay in that package; types used across packages go to entity - -- Warn format strings centralized in config/warn/ — use warn.Close, - warn.Write, warn.Remove, warn.Mkdir, warn.Rename, warn.Walk, warn.Getwd, - warn.Readdir, warn.Marshal instead of inline format strings in log.Warn calls - -- Nav frontmatter title: fields must not contain ctx — frontmatter does not - support backticks, so the brand stays out of nav titles entirely (Hub, not The - ctx Hub). Body headings can use `ctx` since markdown supports backticks. - -- CLI flags and slash-commands inside headings or admonition titles must be - backticked: `--keep-frontmatter=false`, `/ctx-reflect`. The title-case engine - in hack/title-case-headings.py protects these patterns automatically, but - authors should still backtick at write time for clarity. - -- File extensions inside headings must be backticked when title-case - capitalization would otherwise apply: write `CONSTITUTION.md`, not - CONSTITUTION.Md. The title-case engine refuses to capitalize lowercase tokens - following a literal . dot, but explicit backticks remain the clearest signal. diff --git a/.context/DECISIONS.md b/.context/DECISIONS.md index 652a9c08d..4e0e67ffe 100644 --- a/.context/DECISIONS.md +++ b/.context/DECISIONS.md @@ -1,84 +1,6 @@ # Decisions -| Date | Decision | -|----|--------| -| 2026-04-16 | Deprecate and remove ctx backup | -| 2026-04-14 | doc.go quality floor: behavior-grounded, ~25-100 body lines, related-packages section required | -| 2026-04-14 | Bootstrap stays under ctx system bootstrap (reverted experimental top-level promotion) | -| 2026-04-14 | Title Case style for docs is AP-leaning with explicit ambiguity carve-outs | -| 2026-04-13 | Walk boundary uses git as a hint, not a requirement | -| 2026-04-11 | Journal stays local; LEARNINGS.md is the shareable layer | -| 2026-04-11 | `Entry.Author` is server-authoritative, not client-authoritative | -| 2026-04-09 | Architecture skill pipeline is a triad not a quartet | -| 2026-04-08 | Remove #done tag convention, simplify task archival | -| 2026-04-06 | Use hook relay for session provenance instead of JSONL parsing or env vars | -| 2026-04-04 | TestNoMagicStrings and TestNoMagicValues no longer exempt const/var definitions outside config/ | -| 2026-04-04 | String-typed enums belong in config/, not domain packages | -| 2026-04-03 | Output functions belong in write/ (consolidated) | -| 2026-04-03 | YAML text externalization pipeline (consolidated) | -| 2026-04-03 | Package taxonomy and code placement (consolidated) | -| 2026-04-03 | Eager init over lazy loading (consolidated) | -| 2026-04-03 | Pure logic separation of concerns (consolidated) | -| 2026-04-03 | config/ explosion is correct — fix is documentation, not restructuring | -| 2026-04-01 | IRC to Discord as primary community channel | -| 2026-04-01 | AST audit tests live in internal/audit/, one file per check | -| 2026-04-01 | Split assets/hooks/ into assets/integrations/ + assets/hooks/messages/ | -| 2026-04-01 | Rename ctx hook → ctx setup to disambiguate from the hook system | -| 2026-03-31 | Split log into log/event and log/warn to break import cycles | -| 2026-03-31 | Context-load-gate injects only CONSTITUTION and AGENT_PLAYBOOK_GATE, not full ReadOrder | -| 2026-03-31 | Spec signal words and nudge threshold are user-configurable via .ctxrc | -| 2026-03-30 | Flags-not-subcommands for journal source: list and show are view modes on a noun, not independent entities | -| 2026-03-30 | Journal consumed recall — recall CLI package deleted | -| 2026-03-30 | Classify rules are user-configurable via .ctxrc | -| 2026-03-25 | Architecture analysis and enrichment are separate skills — constraint is the feature | -| 2026-03-25 | Companion tools documented as optional MCP enhancements with runtime check | -| 2026-03-25 | Prompt templates removed — skills are the single agent instruction mechanism | -| 2026-03-24 | Write-once baseline with explicit end-consolidation for consolidation lifecycle | -| 2026-03-23 | Pre/pre HTML tags promoted to shared constants in config/marker | -| 2026-03-22 | Output functions belong in write/, never in core/ or cmd/ | -| 2026-03-20 | Shared formatting utilities belong in internal/format | -| 2026-03-20 | Go-YAML linkage check added to lint-drift as check 5 | -| 2026-03-18 | Singular command names for all CLI entities | -| 2026-03-17 | Pre-compute-then-print for write package output blocks | -| 2026-03-16 | Resource name constants in config/mcp/resource, mapping in server/resource | -| 2026-03-16 | Rename --consequences flag to --consequence for singular consistency | -| 2026-03-14 | Error package taxonomy: 22 domain files replace monolithic errors.go | -| 2026-03-14 | Session prefixes are parser vocabulary, not i18n text | -| 2026-03-14 | System path deny-list as safety net, not security boundary | -| 2026-03-14 | Config-driven freshness check with per-file review URLs | -| 2026-03-13 | Delete ctx-context-monitor skill — hook output is self-sufficient | -| 2026-03-13 | build target depends on sync-why to prevent embedded doc drift | -| 2026-03-12 | Recommend companion RAGs as peer MCP servers not bridge through ctx | -| 2026-03-12 | Rename ctx-map skill to ctx-architecture | -| 2026-03-07 | Use composite directory path constants for multi-segment paths | -| 2026-03-06 | Drop fatih/color dependency — Unicode symbols are sufficient for terminal output, color was redundant | -| 2026-03-06 | PR #27 (MCP server) meets v0.1 spec requirements — merge-ready pending 3 compliance fixes | -| 2026-03-06 | Skills stay CLI-based; MCP Prompts are the protocol equivalent | -| 2026-03-06 | Peer MCP model for external tool integration | -| 2026-03-06 | Create internal/parse for shared text-to-typed-value conversions | -| 2026-03-06 | Centralize errors in internal/err, not per-package err.go files | -| 2026-03-05 | Gitignore .context/memory/ for this project | -| 2026-03-05 | Memory bridge design: three-phase architecture with hook nudge + on-demand | -| 2026-03-05 | Revised strategic analysis: blog-first execution order, bidirectional sync as top-level section | -| 2026-03-04 | Interface-based GraphBuilder for multi-ecosystem ctx deps | -| 2026-03-02 | Billing threshold piggybacks on check-context-size, not heartbeat | -| 2026-03-02 | Replace auto-migration with stderr warning for legacy keys | -| 2026-03-02 | Consolidate all session state to .context/state/ | -| 2026-03-01 | PersistentPreRunE init guard with three-level exemption | -| 2026-03-01 | Global encryption key at ~/.ctx/.ctx.key | -| 2026-03-01 | Heartbeat token telemetry: conditional fields, not always-present | -| 2026-03-01 | Hook log rotation: size-based with one previous generation, matching eventlog pattern | -| 2026-03-01 | Promote 6 private skills to bundled plugin skills; keep 7 project-local | -| 2026-02-27 | Context window detection: JSONL-first fallback order | -| 2026-02-27 | Context injection architecture v2 (consolidated) | -| 2026-02-26 | .context/state/ directory for project-scoped runtime state | -| 2026-02-26 | Hook and notification design (consolidated) | -| 2026-02-26 | ctx init and CLAUDE.md handling (consolidated) | -| 2026-02-26 | Task and knowledge management (consolidated) | -| 2026-02-26 | Agent autonomy and separation of concerns (consolidated) | -| 2026-02-26 | Security and permissions (consolidated) | -| 2026-02-27 | Webhook and notification design (consolidated) | - -## [2026-04-16-011520] Deprecate and remove ctx backup - -**Status**: Accepted - -**Context**: ctx backup is environment-specific (SMB/GVFS), fires nag hooks for -unconfigured users, and solves a problem that belongs to the OS layer. ctx hub -already handles cross-machine knowledge persistence. - -**Decision**: Deprecate and remove ctx backup - -**Rationale**: Hub handles persistence, backup is env-specific, wrong layer for -ctx to own. No external users depend on it. Broadcom mirror issue and GVFS -Linux-only dependency add maintenance burden. - -**Consequence**: Need backup-strategy runbook before removal. Maintainer must -set up replacement cron job. About 60 files to remove across CLI, config, hooks, -docs, skills. Spec: specs/deprecate-ctx-backup.md - ---- - -## [2026-04-14-010205] doc.go quality floor: behavior-grounded, ~25-100 body lines, related-packages section required - -**Status**: Accepted - -**Context**: About 140 doc.go files were rewritten this session. User flagged -the original 5-line Key exports + See source files + Part of subsystem pattern -as lazy minimum effort. - -**Decision**: doc.go quality floor: behavior-grounded, ~25-100 body lines, -related-packages section required - -**Rationale**: Behavior-grounded rewrites (read source first, then write) are -the only acceptable form for any non-trivial package. The lazy template -communicates nothing a future reader cannot grep for; it satisfies tooling -without adding signal. - -**Consequence**: Every non-trivial package's doc.go now leads with the package's -actual purpose, names key behaviors, calls out non-obvious design choices -(Raft-lite, two-step indirection, idempotency contracts), and lists related -packages with paths. New packages should follow the same shape. - ---- - -## [2026-04-14-010205] Bootstrap stays under ctx system bootstrap (reverted experimental top-level promotion) - -**Status**: Accepted - -**Context**: Mid-session promoted ctx bootstrap to top-level to make a stale -CLAUDE.md instruction work. User reverted it and reaffirmed the original design. - -**Decision**: Bootstrap stays under ctx system bootstrap (reverted experimental -top-level promotion) - -**Rationale**: The ctx system namespace is for agent and hook plumbing the user -does not type by hand. Bootstrap is invoked by AI agents at session start; -surfacing it at top-level pollutes ctx --help for humans without benefit. - -**Consequence**: internal/bootstrap/group.go reverted; -internal/config/embed/cmd/system.go header now correctly states bootstrap is -intentionally not promoted. The CLAUDE.md template across the repo (and the -workspace copy) updated to reference ctx system bootstrap as canonical. - ---- - -## [2026-04-14-010205] Title Case style for docs is AP-leaning with explicit ambiguity carve-outs - -**Status**: Accepted - -**Context**: Needed a deterministic Title Case engine for headings and -admonition titles across docs/. User precedent (Working with AI lowercase with) -ruled out strict Chicago. - -**Decision**: Title Case style for docs is AP-leaning with explicit ambiguity -carve-outs - -**Rationale**: AP lowercase prepositions regardless of length matches -user-approved titles. But strict AP would lowercase ambiguous prep/conj/adv -words like before, after, since, until, past, near, down, up, off, hurting -common cases. Carve-outs leave them at default-cap and let the engine reach a -sensible result for ~95 percent of headings without manual review. - -**Consequence**: hack/title-case-headings.py ships an AP-leaning with ambiguity -carve-outs PREPOSITIONS set. Future style changes must touch that set explicitly -with reasoning. New brand or acronym additions go through the same audited -pattern. - ---- - -## [2026-04-13-153617] Walk boundary uses git as a hint, not a requirement - -**Status**: Accepted - -**Context**: ctx init failed when a non-ctx-initialized repo lived inside a -ctx-initialized parent workspace. walkForContextDir walked up and found the -parent's .context, then the boundary check rejected it. We considered -project-marker heuristics (go.mod, package.json) and making git mandatory. - -**Decision**: Walk boundary uses git as a hint, not a requirement - -**Rationale**: Project markers are unreliable (e.g. package.json for customer -shipments, Haskell projects have no common marker). Making git mandatory breaks -ctx's 'git recommended but not required' stance. Git-as-hint resolves the bug -without new dependencies: walk finds candidate, validate against git root, -discard if outside; fall back to CWD when no git is found. - -**Consequence**: walkForContextDir now consults findGitRoot to anchor ancestor -.context candidates. Monorepos, submodules, and nested workspaces resolve -correctly. No-git projects still work via CWD fallback. - ---- - -## [2026-04-11-200000] Journal stays local; LEARNINGS.md is the shareable layer - -**Status**: Accepted - -**Context**: With the hub now carrying shared project context between machines -and eventually between teammates, the question came up whether enriched -journal entries should ride along — either the raw `.context/journal/` files -or an "export enriched entries as shareable learning items" pipeline layered -on top of `/ctx-journal-enrich`. The journal is already gitignored per the -2026-03-05 `.context/memory/` decision and for the same reason: it's a -first-person log of raw prompts, half-formed thoughts, dead ends, personal -names, and things the user talks through with themselves. It sits in the -same trust tier as shell history or a private notebook. - -The trade-off is real: shared journals would make it trivial for teammates -(or future-me on another machine) to see the full reasoning trail behind a -decision. But "full reasoning trail" is precisely the thing that makes a -journal journal and not a changelog — it includes the parts the author -hasn't decided to stand behind yet, plus incidental private content. - -**Decision**: The journal is **Tier-0 personal** and never leaves the -originating machine. No hub sync, no export-by-default, no -enriched-entries-as-shareable-items pipeline. The enrichment pipeline -(`/ctx-journal-enrich`) stays as-is: journal → human-in-the-loop review → -explicit promotion to LEARNINGS.md / DECISIONS.md / CONVENTIONS.md via the -existing `/ctx-learning-add`, `/ctx-decision-add`, `/ctx-convention-add` -commands. Those distilled artifacts are **Tier-1 shareable** and are what -the hub syncs when a team opts into shared context. - -The promotion boundary is therefore the enrichment step, not a new export -pipeline. The user is the gate. - -**Rationale**: Any "shareable enriched journal entry" pipeline would have to -re-implement the trust boundary that `/ctx-learning-add` already enforces: -the human decides what's worth sharing, strips incidental private content, -and rewrites it as a standalone artifact. A second pipeline that tries to -do this automatically would either (a) leak private content by accident, or -(b) require the same human review and thus collapse back into -`/ctx-learning-add`. The principled answer is that there is no second -pipeline — LEARNINGS.md *is* the shareable form of the journal. - -This also preserves the psychological safety of the journal: the author -can write freely because they know nothing they write is one sync away -from a teammate's screen. Lose that property and the journal stops being a -journal and starts being a changelog draft. - -**Consequence**: - -- Journal files stay gitignored and stay out of `ctx hub` sync paths. Any - future code that walks context files for replication must exclude - `.context/journal/` explicitly and be covered by a test. -- `/ctx-journal-enrich` remains the promotion boundary. Its output targets - are LEARNINGS.md / DECISIONS.md / CONVENTIONS.md, never a separate - "shareable journal" bucket. -- Hub docs (`docs/home/hub.md`, `docs/recipes/hub-personal.md`, - `docs/recipes/hub-team.md`, `docs/security/hub.md`) should state the - Tier-0 / Tier-1 split explicitly so users building team workflows don't - assume "shared context" means "shared everything." -- The sync code path in `internal/hub/sync_helper.go` and any future - replication of context files must enforce this exclusion at the - code level — a gitignore entry is a user-convenience signal, not a - hub-trust boundary. -- A potential future "personal multi-machine journal sync" (same human, - different laptops) is explicitly **out of scope** of this decision. If - it ever ships, it rides a different transport (encrypted-at-rest, - single-user, not the team hub) and needs its own decision record. - -**Alternatives considered**: - -- **Sync raw journal files via hub**: rejected. Inverts the gitignore - decision, leaks private content by construction, destroys the - journal's "safe to write freely" property. -- **Auto-export enriched entries as a new shareable artifact type**: - rejected. Duplicates `/ctx-learning-add` without the human gate, or - collapses back into it. No real difference from the status quo except - the opportunity for accidental leakage. -- **Opt-in per-entry "publish to hub" flag in the journal**: rejected as - premature. If the user wants an entry on the hub, the existing flow is - one command away — write it as a learning or decision. A second path - adds surface area without adding capability. - -**Related**: Reinforces the 2026-03-05 `.context/memory/` gitignore -decision (same trust-tier reasoning for a different private artifact). - -## [2026-04-11-180000] `Entry.Author` is server-authoritative, not client-authoritative - -**Status**: Accepted - -**Context**: The `Entry.Author` field on hub entries is copied verbatim from -the client's publish request (`handler.go:82`). It's optional, freeform, and -unauthenticated — a client with a valid token for project `alpha` can publish -entries claiming `Author: "bob@acme.com"` regardless of who actually -authenticated. This is the same spoofing pattern as `Origin` (audit finding -H-04) and was flagged as audit finding H-22 with three options: keep, drop, -override, or promote. The decision was never formally closed. - -The premise that resolved it: **identity is eventually part of the token**. -Under the sysadmin-registry MVP, the server already knows `{user_id, project}` -from the authenticated token. Under the PKI stretch, the signed claim carries -identity cryptographically. In both models, the client has nothing to say about -authorship that the server doesn't already know with higher confidence. - -**Decision**: `Entry.Author` is **server-authoritative**. The server stamps it -from the authenticated identity source on every publish. The client's -`pe.Author` input is ignored (or rejected — implementation choice, not -semantic difference). The field stays in the wire format but its semantics -change from "whatever the client said" to "whatever the server's auth layer -resolved." - -Stamping source by phase: - -- **Today (pre-registry)**: `Author = ClientInfo.ProjectName`, same source as - the `Origin` server-enforcement fix (H-04). Lossy but consistent. -- **Registry MVP**: `Author = users.json` row's `user_id` (e.g., - `alice@acme.com`). Precise per-human attribution. -- **PKI stretch**: `Author = signed claim's sub field`. Cryptographic identity. - -**Rationale**: Dropping the field is wrong because the registry MVP will -already give us a per-user identity to stamp — removing Author just to re-add -it later is churn. "Override" and "promote" are cosmetically different forms -of the same decision (server fills from auth context); "promote" is what -happens naturally once the registry MVP types the field as `UserID`. -Client-sourced Author is indefensible because it replicates the Origin -spoofing vector in a second field. - -**Consequence**: - -- The Author field stays on the wire and in `Entry{}`. -- Client-side code that populates `pe.Author` from local config becomes a - no-op. Audit `ctx connect publish` and `ctx add --share` for any such - code paths before the server-enforcement fix lands. -- `handler.go publish()` fills Author from the authenticated context (the - same `ClientInfo` that H-04 pulls for Origin). Single unified - auth-to-handler pipe. -- `docs/security/hub.md` "Compromised client token" section gets rewritten: - attribution becomes **wrong** on compromise (attacker's token maps to - attacker's identity), not **forgeable** (attacker cannot stamp someone - else's name). -- The sysadmin-registry spec (`specs/hub-identity-registry.md`, tasked) - MUST include a `user_id` field per row — it's the stamping source. -- Three open tasks collapse into one: H-22 resolves to "implement - server-authoritative Author" instead of "decide Author fate." TASKS.md - updated. - -**Alternatives considered**: - -- **Keep client-authoritative**: rejected. Same spoofing vector as Origin; - trivially defeats any downstream attribution check. -- **Drop the field**: rejected. The registry MVP will need per-human - attribution anyway. Dropping today is churn that gets undone - immediately. -- **Override at client-side before publish**: rejected. Puts the security - boundary on the wrong side of the trust zone. Must be server-side. - -**Follow-up — client-advisory metadata**: the client still has useful -information to share that isn't an identity claim: a human-friendly -display name, the machine that made the publish, the tool version, a -CI system label, a team/role handle. This lives on a **new sibling -field `Meta`** (a `ClientMetadata` sub-struct), not on `Author`. The -separation of types is what protects the security property: `Author` -is reserved for server-authoritative identity, `Meta` is -client-advisory and explicitly labeled as such in any rendered -surface. `Meta` fields are size-capped individually (256 bytes) and -in aggregate (2 KB), validated for plain-string content (no -newlines, no control characters), and never claimed as attribution -in any API response. The renderer MUST label `Meta`-sourced values -with prose like "client label" or "client-reported" so readers -cannot mistake them for authoritative identity. See TASKS.md for -the implementation task. - ---- - -## [2026-04-09-001332] Architecture skill pipeline is a triad not a quartet - -**Status**: Accepted - -**Context**: Had a proposed ctx-architecture-extend for extension point mapping, -making four skills - -**Decision**: Architecture skill pipeline is a triad not a quartet - -**Rationale**: Extension points already covered per-module in DETAILED_DESIGN -and by registration site discovery in enrich. Fourth skill fragments pipeline -without distinct value - -**Consequence**: Pipeline is map enrich hunt. Three skills three questions: how -does it work, how well does it connect, where will it break - ---- - -## [2026-04-08-013731] Remove #done tag convention, simplify task archival - -**Status**: Accepted - -**Context**: Tasks had #done:YYYY-MM-DD timestamps that agents added -inconsistently and nobody read. compact --archive filtered by age using these -timestamps. - -**Decision**: Remove #done tag convention, simplify task archival - -**Rationale**: [x] checkbox is semantically sufficient. git blame provides the -completion timestamp. Removing #done eliminates redundant ceremony and -simplifies compact --archive to archive all completed tasks regardless of age. - -**Consequence**: compact --archive no longer filters by archive_after_days for -tasks. The .ctxrc field is inert but retained for backwards compatibility. -Historical #done tags in archives are preserved. - ---- - -## [2026-04-06-204212] Use hook relay for session provenance instead of JSONL parsing or env vars - -**Status**: Accepted - -**Context**: Needed to give agents awareness of their session ID, branch, and -commit hash for task/decision/learning provenance. Considered three approaches: -(1) parsing most-recent JSONL at runtime, (2) CTX_SESSION_ID env var, (3) hook -relay via UserPromptSubmit. - -**Decision**: Use hook relay for session provenance instead of JSONL parsing or -env vars - -**Rationale**: JSONL parsing breaks with parallel sessions (wrong file picked). -Env vars aren't exported by Claude Code. Hook relay is zero-state: the hook -receives session_id from Claude Code on every prompt, emits it, agent absorbs -through repetition. No counters, no cleanup, no resume edge cases. - -**Consequence**: Provenance depends on the hook being registered (enabledPlugins -in settings.local.json). Projects without plugin registration get no provenance. -Filed as separate bug. - ---- - -## [2026-04-04-025755] TestNoMagicStrings and TestNoMagicValues no longer exempt const/var definitions outside config/ - -**Status**: Accepted - -**Context**: The isConstDef/isVarDef blanket exemption masked 156+ string and 7 -numeric constants in the wrong package - -**Decision**: TestNoMagicStrings and TestNoMagicValues no longer exempt -const/var definitions outside config/ - -**Rationale**: Const definitions outside config/ are magic values in the wrong -place — naming them does not fix the structural problem - -**Consequence**: All new code with string/numeric constants outside config/ -fails these tests immediately - ---- - -## [2026-04-04-025746] String-typed enums belong in config/, not domain packages - -**Status**: Accepted - -**Context**: Debated whether type IssueType string with const values belongs in -domain or config. The string value is the same regardless of type annotation. - -**Decision**: String-typed enums belong in config/, not domain packages - -**Rationale**: Types without behavior belong in config. Promote to entity/ only -when methods/interfaces appear. - -**Consequence**: All type Foo string + const blocks outside config/ are now -caught by TestNoMagicStrings. - ---- - -## [2026-04-03-180000] Output functions belong in write/ (consolidated) - -**Status**: Accepted - -**Consolidated from**: 2 entries (2026-03-21 to 2026-03-22) - -**Decision**: Output functions belong in write/, logic and types in core/, -orchestration in cmd/ - -**Rationale**: The write/ taxonomy is flat by domain — each CLI feature gets -its own write/ package. core/ owns domain logic and types. cmd/ owns Cobra -orchestration. Functions that call cmd.Print/Println/Printf belong in write/. -core/ never imports cobra for output purposes. - -**Consequence**: All new CLI output must go through a write/ package. No -cmd.Print* calls in internal/cli/ outside of internal/write/. - ---- - -## [2026-04-03-180000] YAML text externalization pipeline (consolidated) - -**Status**: Accepted - -**Consolidated from**: 5 entries (2026-03-06 to 2026-04-03) - -**Decision**: All user-facing text externalized to embedded YAML domain files, -justified by agent legibility and drift prevention — not i18n - -**Rationale**: The real justification is agent legibility (named DescKey -constants as traversable graphs) and drift prevention (TestDescKeyYAMLLinkage -catches orphans mechanically). i18n is a free downstream consequence. The -exhaustive test verifies all constants resolve to non-empty YAML values — new -keys are automatically covered. - -**Consequence**: commands.yaml split into 4 domain files (commands, flags, text, -examples) loaded via dedicated loaders. text.yaml split into 6 domain files -loaded via loadYAMLDir. The 3-file ceremony (DescKey + YAML + write/err -function) is the cost of agent-legible, drift-proof output. - ---- - -## [2026-04-03-180000] Package taxonomy and code placement (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 entries (2026-03-06 to 2026-03-13) - -**Decision**: Three-zone taxonomy: cmd/ for Cobra wiring (cmd.go + run.go), -core/ for logic and types, assets/ for templates and user-facing text. config/ -for structural constants only. - -**Rationale**: Taxonomical symmetry makes navigation instant and agent-friendly. -Domain types that multiple packages consume belong in domain packages -(internal/entry), not CLI subpackages. Templates and user-facing text live in -assets/ for i18n readiness; structural constants (paths, limits, regexes) stay -in config/. - -**Consequence**: Every CLI package has the same predictable shape. Shared entry -types live in internal/entry. Template files (tpl_*.go) moved from config/ to -assets/. 474 files changed in initial restructuring. - ---- - -## [2026-04-03-180000] Eager init over lazy loading (consolidated) - -**Status**: Accepted - -**Consolidated from**: 2 entries (2026-03-16 to 2026-03-18) - -**Decision**: Explicit Init() called eagerly at startup for static embedded data -and resource lookups, instead of per-accessor sync.Once or package-level init() - -**Rationale**: Static embedded data is required at startup — sync.Once per -accessor is cargo cult. Package-level init() hides startup dependencies and -makes ordering unclear. Explicit Init() called from main.go / NewServer makes -the dependency visible and testable. - -**Consequence**: Maps unexported, accessors are plain lookups. Tests call Init() -in TestMain. res.Init() called from NewServer before ToList(). No package-level -side effects, zero sync.Once in the lookup pipeline. - ---- - -## [2026-04-03-180000] Pure logic separation of concerns (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 entries (2026-03-15 to 2026-03-23) - -**Decision**: Pure-logic functions return data structs; callers own I/O, file -writes, and reporting. Function pointers in param structs replaced with text -keys. - -**Rationale**: Pure logic with no I/O lets both MCP (JSON-RPC) and CLI (cobra) -callers control output independently. Methods that don't access receiver state -hide their true dependencies — make them free functions. If all callers of a -callback vary only by a string key, the callback is data in disguise. - -**Consequence**: CompactContext returns CompactResult; callers iterate -FileUpdates. Server response helpers in server/out, prompt builders in -server/prompt. All cross-cutting param structs in entity are -function-pointer-free. - ---- - -## [2026-04-03-133244] config/ explosion is correct — fix is documentation, not restructuring - -**Status**: Accepted - -**Context**: Architecture analysis flagged 60+ config sub-packages as a -bottleneck. Evaluation showed the alternative (8-10 domain packages) trades -granular imports for fat dependency units. Current structure gives zero internal -dependencies, surgical dependency tracking, and minimal recompile scope. - -**Decision**: config/ explosion is correct — fix is documentation, not -restructuring - -**Rationale**: Go's compilation unit is the package. Granular packages mean -precise dependency tracking. The developer experience cost (IDE noise, package -discovery) is real but solvable with a README decision tree, not restructuring. -Restructuring would be massive mechanical churn for cosmetic benefit. - -**Consequence**: config/README.md written with organizational guide and decision -tree. No restructuring planned. embed/text/ file count will shrink naturally -when tpl/ migrates to text/template. - ---- - -## [2026-04-01-233247] IRC to Discord as primary community channel - -**Status**: Accepted - -**Context**: Discord server exists at https://ctx.ist/discord; IRC/libera.chat -references were stale - -**Decision**: IRC to Discord as primary community channel - -**Rationale**: Discord is faster for async community support; IRC was historical - -**Consequence**: Updated zensical.toml, README, community docs, journal -template. Added community footer to ctx help and ctx init output via YAML assets -pipeline - ---- - -## [2026-04-01-233246] AST audit tests live in internal/audit/, one file per check - -**Status**: Accepted - -**Context**: Needed a home for AST-based codebase invariant tests separate from -the existing compliance_test.go monolith - -**Decision**: AST audit tests live in internal/audit/, one file per check - -**Rationale**: One test per file prevents the 1200+ line monster pattern. Shared -helpers in helpers_test.go with sync.Once caching. Package is all _test.go -except doc.go — produces no binary, not importable - -**Consequence**: New checks are added as individual *_test.go files; the pattern -(loadPackages, walk AST, collect violations, t.Error) is established and -repeatable - ---- - -## [2026-04-01-074417] Split assets/hooks/ into assets/integrations/ + assets/hooks/messages/ - -**Status**: Accepted - -**Context**: The directory mixed Copilot integration templates with hook message -templates - -**Decision**: Split assets/hooks/ into assets/integrations/ + -assets/hooks/messages/ - -**Rationale**: Integration assets (Copilot instructions, AGENTS.md, CLI -scripts/skills) are not hooks. Hook messages ARE the hook system templates. - -**Consequence**: integrations/ for tool integration assets, hooks/messages/ for -hook system templates. Embed directives and all config constants updated. - ---- - -## [2026-04-01-074416] Rename ctx hook → ctx setup to disambiguate from the hook system - -**Status**: Accepted - -**Context**: PR #45 contributor assumed hook meant the setup command, causing -naming collisions with the PreToolUse/PostToolUse hook system - -**Decision**: Rename ctx hook → ctx setup to disambiguate from the hook system - -**Rationale**: hook has a specific meaning in ctx; setup accurately describes -generating AI tool integration configs - -**Consequence**: CLI breaking change. All docs, specs, TypeScript extension, and -YAML assets updated. Released specs left as historical. - ---- - -## [2026-03-31-224245] Split log into log/event and log/warn to break import cycles - -**Status**: Accepted - -**Context**: io and notify could not import log.Warn because log imported both -of them for event logging, creating circular dependencies - -**Decision**: Split log into log/event and log/warn to break import cycles - -**Rationale**: Separating concerns (stderr sink vs JSONL event log) into -subpackages eliminated the cycle. Warn sink is foundation-level with only config -imports, event logging is higher-level - -**Consequence**: All stderr warnings now route through logWarn.Warn(). New code -importing log/warn has no cycle risk. Event types moved to internal/entity - ---- - -## [2026-03-31-182003] Context-load-gate injects only CONSTITUTION and AGENT_PLAYBOOK_GATE, not full ReadOrder - -**Status**: Accepted - -**Context**: Force-loading ~14k tokens of context files (8 files) every session -diluted attention without proportional value. CLAUDE.md already instructs agents -to read full context files on-demand. Behavioral prose in force-loaded content -was routinely skipped. - -**Decision**: Context-load-gate injects only CONSTITUTION and -AGENT_PLAYBOOK_GATE, not full ReadOrder - -**Rationale**: Hard rules (CONSTITUTION) must be present before any action. -Distilled directives (gate file) provide actionable session-start guidance in -~2k tokens. Full playbook, conventions, architecture, decisions, learnings are -pulled on-demand when task context requires them. - -**Consequence**: New AGENT_PLAYBOOK_GATE.md file must stay in sync with -AGENT_PLAYBOOK.md. HTML comment cross-reference added to playbook header for -contributor discoverability. - ---- - -## [2026-03-31-005113] Spec signal words and nudge threshold are user-configurable via .ctxrc - -**Status**: Accepted - -**Context**: Initially hardcoded signal words and 150-char threshold in run.go. -User pointed out these are localizable vocabulary, following the -session_prefixes / classify_rules pattern - -**Decision**: Spec signal words and nudge threshold are user-configurable via -.ctxrc - -**Rationale**: Signal words are language-dependent and project-dependent — a -Spanish-speaking user or a non-Go project would have different signal terms - -**Consequence**: Added spec_signal_words and spec_nudge_min_len to CtxRC struct, -rc accessors with defaults in config/entry, JSON schema updated - ---- - -## [2026-03-30-075927] Flags-not-subcommands for journal source: list and show are view modes on a noun, not independent entities - -**Status**: Accepted - -**Context**: During the journal-recall merge, recall had separate list and show -subcommands. Merging them into journal created a design choice: source list + -source show (three levels) vs source --show (two levels). - -**Decision**: Flags-not-subcommands for journal source: list and show are view -modes on a noun, not independent entities - -**Rationale**: Keeps CLI nesting to two levels max. Default behavior (bare -source) lists sessions; --show switches to inspect mode. When two operations -differ only in how they view the same data, make them flags on one command. - -**Consequence**: journal source dispatches via --show flag rather than -positional subcommand. Future view-mode toggles should follow this pattern. - ---- - -## [2026-03-30-003756] Journal consumed recall — recall CLI package deleted - -**Status**: Accepted - -**Context**: ctx recall was never registered in bootstrap; ctx journal had all -the same subcommands - -**Decision**: Journal consumed recall — recall CLI package deleted - -**Rationale**: One dead command group creates confusion in docs and skills. -Journal is the canonical command group. - -**Consequence**: internal/cli/recall/ deleted, 19 doc files updated, -docs/cli/recall.md renamed to journal.md, zensical.toml updated. MCP tool -ctx_recall rename tasked separately (API contract) - ---- - -## [2026-03-30-003745] Classify rules are user-configurable via .ctxrc - -**Status**: Accepted - -**Context**: Memory entry classification used hardcoded keyword rules that could -not be customized - -**Decision**: Classify rules are user-configurable via .ctxrc - -**Rationale**: Users may work in domains where the default keywords do not match -(non-English, specialized terminology). Same pattern as session_prefixes. - -**Consequence**: classify_rules in .ctxrc overrides defaults; schema updated; -rc.ClassifyRules() accessor with fallback to config/memory.DefaultClassifyRules - ---- - -## [2026-03-25-233646] Architecture analysis and enrichment are separate skills — constraint is the feature - -**Status**: Accepted - -**Context**: Observed that agents take shortcuts when code intelligence tools -are available during architecture analysis. A 5.2x depth reduction was measured -(5866 vs 1124 lines) when GitNexus was available during reading. Mentioning -unavailable tools by name in a skill plants the idea for the agent to use them. - -**Decision**: Architecture analysis and enrichment are separate skills — -constraint is the feature - -**Rationale**: Discovery requires forced reading without shortcuts. Validation -and quantification are a separate pass. Two-pass compiler analogy: semantic -parsing (human-style reading) then static analysis (graph enrichment). Never -mention tools you want the agent to avoid — absence is the only reliable -constraint. - -**Consequence**: ctx-architecture deliberately excludes code intelligence tools -from allowed-tools and never mentions them. ctx-architecture-enrich is a -separate skill that runs after, using the deep artifacts as baseline. Gemini is -allowed in both for upstream/external lookups only. - ---- - -## [2026-03-25-173337] Companion tools documented as optional MCP enhancements with runtime check - -**Status**: Accepted - -**Context**: Gemini Search and GitNexus improve skills but no docs mentioned -them and no code checked their availability - -**Decision**: Companion tools documented as optional MCP enhancements with -runtime check - -**Rationale**: Users should know what tools enhance their workflow without being -forced to install them. Suppressible via .ctxrc for users who don't want them. - -**Consequence**: /ctx-remember smoke-tests MCPs at session start. -companion_check: false suppresses. - ---- - -## [2026-03-25-173336] Prompt templates removed — skills are the single agent instruction mechanism - -**Status**: Accepted - -**Context**: Prompt templates (.context/prompts/) overlapped with skills but had -no discoverability — even the project creator didn't know they existed - -**Decision**: Prompt templates removed — skills are the single agent -instruction mechanism - -**Rationale**: Adding metadata to prompts to fix discoverability would recreate -the skill system. One concept is better than two. - -**Consequence**: code-review, explain, refactor promoted to proper skills. ctx -prompt CLI removed. loop.md retained as ctx loop config file at -.context/loop.md. - ---- - -## [2026-03-24-001001] Write-once baseline with explicit end-consolidation for consolidation lifecycle - -**Status**: Accepted - -**Context**: Designing the consolidation nudge hook; multi-pass consolidation -spans dozens of sessions and you cannot programmatically distinguish feature -from consolidation sessions - -**Decision**: Write-once baseline with explicit end-consolidation for -consolidation lifecycle - -**Rationale**: First ctx-consolidate stamps baseline (write-once), user runs -end-consolidation when done. Failure mode is silence (no stale nudges), not -wrong behavior - -**Consequence**: Requires mark-consolidation, end-consolidation, and -snooze-consolidation plumbing commands. Spec: specs/consolidation-nudge-hook.md - ---- - -## [2026-03-23-165612] Pre/pre HTML tags promoted to shared constants in config/marker - -**Status**: Accepted - -**Context**: Two packages (normalize and format) used hardcoded pre strings -independently - -**Decision**: Pre/pre HTML tags promoted to shared constants in config/marker - -**Rationale**: Cross-package magic strings belong in config constants per -CONVENTIONS.md - -**Consequence**: marker.TagPre and marker.TagPreClose are the canonical -references; package-local constants deleted - ---- - -## [2026-03-22-084316] Output functions belong in write/, never in core/ or cmd/ - -**Status**: Accepted - -**Context**: System write migration revealed that cmd.Print* calls scattered -across core/ and cmd/ packages prevented localization and violated separation of -concerns - -**Decision**: Output functions belong in write/, never in core/ or cmd/ - -**Rationale**: The write/ taxonomy is flat by domain — each CLI feature gets -its own write/ package. core/ owns logic and types, cmd/ owns orchestration, -write/ owns all output. - -**Consequence**: All new CLI output must go through a write/ package. No -cmd.Print* calls in internal/cli/ outside of internal/write/. - ---- - -## [2026-03-20-232506] Shared formatting utilities belong in internal/format - -**Status**: Accepted - -**Context**: Pluralize, Duration, DurationAgo, and TruncateFirstLine were -duplicated across memory/core, change/core, and other CLI packages - -**Decision**: Shared formatting utilities belong in internal/format - -**Rationale**: internal/format already existed with TimeAgo and Number -formatters. Centralizing prevents duplication and matches the convention that -domain-agnostic utilities live in shared packages, not CLI subpackages - -**Consequence**: CLI packages import internal/format instead of defining local -helpers. Local copies deleted. - ---- - -## [2026-03-20-160103] Go-YAML linkage check added to lint-drift as check 5 - -**Status**: Accepted - -**Context**: Prior refactoring sessions left broken and orphan linkages between -Go DescKey constants and YAML entries that caused silent runtime failures - -**Decision**: Go-YAML linkage check added to lint-drift as check 5 - -**Rationale**: Shell-based grep+comm approach fits the existing lint-drift -pattern, runs at CI time, and is simpler than programmatic Go AST parsing - -**Consequence**: CI-time check catches orphans in both directions plus -cross-namespace duplicates, preventing recurrence - ---- - -## [2026-03-18-193623] Singular command names for all CLI entities - -**Status**: Accepted - -**Context**: ctx add used learning (singular) but ctx learnings was plural. -Inconsistency across 6 commands. - -**Decision**: Singular command names for all CLI entities - -**Rationale**: Less headache for i18n; one rule (singular = entity); developers -think in OOP. Use field values come from DescKey constants for -single-source-of-truth renaming. - -**Consequence**: All commands singular: task, decision, learning, change, -permission, dep. YAML keys, desc constants, directory names, and 50+ files -updated. - ---- - -## [2026-03-17-105627] Pre-compute-then-print for write package output blocks - -**Status**: Accepted - -**Context**: Audit of internal/write/ found 337 Println calls across 160 -functions. Asked whether text/template or single format strings would clean up -multi-Println functions like InfoLoopGenerated. - -**Decision**: Pre-compute-then-print for write package output blocks - -**Rationale**: text/template trades compile-time safety for runtime errors and -only 38 of 160 functions benefit from consolidation. fmt.Sprintf with -pre-computed conditional args handles all cases without new dependencies. -Loop-based functions stay imperative. - -**Consequence**: Functions with 4+ Printlns pre-compute conditionals into -strings, then emit one cmd.Println with a multiline block template. Per-line -Tpl* constants replaced with TplXxxBlock. Trivial (1-3 line) and loop-based -functions excluded. - ---- - -## [2026-03-16-104142] Resource name constants in config/mcp/resource, mapping in server/resource - -**Status**: Accepted - -**Context**: MCP resource handler had string literals scattered through -handle_resource.go and rebuilt the resource list on every call - -**Decision**: Resource name constants in config/mcp/resource, mapping in -server/resource - -**Rationale**: Constants follow the same pattern as config/mcp/tool. Mapping -stays in server/resource because it bridges config constants with assets text -(too many cross-cutting deps for a config package). Resource list and URI lookup -are pre-built once at server init. - -**Consequence**: URI-to-file lookup is O(1) via pre-built map; resource list -built once in NewServer, not per request; no string literals in handler code - ---- - -## [2026-03-16-022635] Rename --consequences flag to --consequence for singular consistency - -**Status**: Accepted - -**Context**: All other CLI flags (context, rationale, lesson, application) are -singular nouns. consequences was the only plural. - -**Decision**: Rename --consequences flag to --consequence for singular -consistency - -**Rationale**: Singular form matches the pattern. Consistency wins over natural -language preference. - -**Consequence**: 75+ files updated. Breaking change for --consequences users. - ---- - -## [2026-03-14-180905] Error package taxonomy: 22 domain files replace monolithic errors.go - -**Status**: Accepted - -**Context**: internal/err/errors.go was 1995 lines with 188 functions in one -file - -**Decision**: Error package taxonomy: 22 domain files replace monolithic -errors.go - -**Rationale**: Convention requires files named by responsibility, not junk -drawers; domain grouping makes it possible to find error constructors by domain - -**Consequence**: 22 files (backup, config, crypto, date, fs, git, hook, init, -journal, memory, notify, pad, parser, prompt, recall, reminder, session, site, -skill, state, task, validation); errors.go deleted - ---- - -## [2026-03-14-131152] Session prefixes are parser vocabulary, not i18n text - -**Status**: Accepted - -**Context**: Markdown session parser had hardcoded Session:/Oturum: pair in -text.yaml as session_prefix/session_prefix_alt — didn't scale beyond two -languages - -**Decision**: Session prefixes are parser vocabulary, not i18n text - -**Rationale**: Session header prefixes are recognition patterns for parsing, not -user-facing interface strings. Separating content recognition from interface -language lets users parse multilingual session files without code changes. -Single-language default (Session:) avoids implicit favoritism. - -**Consequence**: Prefixes moved to .ctxrc session_prefixes list. text.yaml -entries and embed.go constants removed. Parser reads from rc.SessionPrefixes() -with fallback to config/parser.DefaultSessionPrefixes. Users extend via .ctxrc. - ---- - -## [2026-03-14-110748] System path deny-list as safety net, not security boundary - -**Status**: Accepted - -**Context**: Replacing nolint:gosec directives with centralized I/O wrappers in -internal/io - -**Decision**: System path deny-list as safety net, not security boundary - -**Rationale**: ctx paths are internally constructed from config constants. The -deny-list catches agent hallucinations (writing to /etc), not adversarial input. -Public security docs would imply a threat model that does not exist. - -**Consequence**: internal/io/doc.go documents limitations honestly for -contributors. No user-facing security docs. The deny-list is a modicum of -protection, not a promise. - ---- - -## [2026-03-14-093748] Config-driven freshness check with per-file review URLs - -**Status**: Accepted - -**Context**: Building a hook to warn when technology-dependent constants go -stale. Initially hardcoded the file list and Anthropic docs URL in the binary, -but this only worked inside the ctx repo and assumed all projects care about -Anthropic docs. - -**Decision**: Config-driven freshness check with per-file review URLs - -**Rationale**: Making the file list and review URLs configurable via .ctxrc -freshness_files means any project can opt in. Per-file review_url avoids -special-casing by project name — ctx sets Anthropic docs, other projects set -their own vendor links or omit it entirely. - -**Consequence**: The hook is a no-op by default (opt-in). ctx's own .ctxrc -carries the tracked files. All nudge text goes through assets/text.yaml for -localization. No project detection logic needed. - ---- - -## [2026-03-13-223111] Delete ctx-context-monitor skill — hook output is self-sufficient - -**Status**: Accepted - -**Context**: The skill documented how to relay context window warnings, but the -hook message already includes IMPORTANT: Relay this context window warning to -the user VERBATIM which agents follow without the skill. - -**Decision**: Delete ctx-context-monitor skill — hook output is -self-sufficient - -**Rationale**: No mechanism exists for hooks to trigger skills. The skill was -never loaded during sessions. Adding enforcement elsewhere would either be too -far back in context (playbook) or dilute the already-crisp hook message. - -**Consequence**: One fewer skill to maintain. No behavioral change — agents -continue relaying warnings as before. - ---- - -## [2026-03-13-151955] build target depends on sync-why to prevent embedded doc drift - -**Status**: Accepted - -**Context**: assets/why/ files had silently drifted from their docs/ sources - -**Decision**: build target depends on sync-why to prevent embedded doc drift - -**Rationale**: Derived assets that are not in the build dependency chain will -drift — the only reliable enforcement is making the build fail without sync - -**Consequence**: Every make build now copies docs into assets before compiling - ---- - -## [2026-03-12-133007] Recommend companion RAGs as peer MCP servers not bridge through ctx - -**Status**: Accepted - -**Context**: Explored whether ctx should proxy RAG queries or integrate a RAG -directly - -**Decision**: Recommend companion RAGs as peer MCP servers not bridge through -ctx - -**Rationale**: MCP is the composition layer — agents already compose multiple -servers. ctx is context, RAGs are intelligence. No bridging, no plugin system, -no schema abstraction - -**Consequence**: Spec created at ideas/spec-companion-intelligence.md; future -work is documentation and UX only - ---- - -## [2026-03-12-133007] Rename ctx-map skill to ctx-architecture - -**Status**: Accepted - -**Context**: The name 'map' didn't convey the iterative, architectural nature of -the ritual - -**Decision**: Rename ctx-map skill to ctx-architecture - -**Rationale**: 'architecture' better describes surveying and evolving project -structure across sessions - -**Consequence**: All cross-references updated across skills, docs, .context -files, and settings - ---- - -## [2026-03-07-221155] Use composite directory path constants for multi-segment paths - -**Status**: Accepted - -**Context**: Needed a constant for hooks/messages path used in message.go and -message_cmd.go - -**Decision**: Use composite directory path constants for multi-segment paths - -**Rationale**: Matches existing pattern of DirClaudeHooks = '.claude/hooks' — -keeps filepath.Join calls cleaner and avoids scattering path segments - -**Consequence**: New multi-segment directory paths should be single constants -(e.g. DirHooksMessages, DirMemoryArchive) rather than joined from individual -segment constants - ---- - -## [2026-03-06-200306] Drop fatih/color dependency — Unicode symbols are sufficient for terminal output, color was redundant - -**Status**: Accepted - -**Context**: fatih/color was used in 32 files for green checkmarks, yellow -warnings, cyan headings, dim text - -**Decision**: Drop fatih/color dependency — Unicode symbols are sufficient for -terminal output, color was redundant - -**Rationale**: Every colored output already had a semantic symbol (✓, ⚠, -○) that conveyed the same meaning; color added visual noise in non-terminal -contexts (logs, pipes) - -**Consequence**: Removed --no-color flag (only existed for color.NoColor); one -fewer external dependency; FlagNoColor retained in config for CLI compatibility - ---- - -## [2026-03-06-141507] PR #27 (MCP server) meets v0.1 spec requirements — merge-ready pending 3 compliance fixes - -**Status**: Accepted - -**Context**: Reviewed PR against specs/mcp-server.md; all 7 action items -addressed, CI fails on 3 mechanical compliance issues - -**Decision**: PR #27 (MCP server) meets v0.1 spec requirements — merge-ready -pending 3 compliance fixes - -**Rationale**: All spec requirements met; CI failures are trivial and low-risk; -keeping PR open risks merge conflicts during active refactoring - -**Consequence**: Merge and fix compliance issues in follow-up commit on main - ---- - -## [2026-03-06-184816] Skills stay CLI-based; MCP Prompts are the protocol equivalent - -**Status**: Accepted - -**Context**: Question arose whether skills should switch from ctx CLI (Bash) to -MCP tool calls once the MCP server ships - -**Decision**: Skills stay CLI-based; MCP Prompts are the protocol equivalent - -**Rationale**: CLI is always available (PATH prerequisite); MCP requires -optional configuration. Hooks will always be CLI (shell commands). Two access -patterns in the same tool is gratuitous complexity. - -**Consequence**: Skills call CLI. MCP Prompts call MCP Tools. Hooks call CLI. -Clean layer separation; no replacement, only parallel access paths. - ---- - -## [2026-03-06-184812] Peer MCP model for external tool integration - -**Status**: Accepted - -**Context**: Evaluated three integration models (orchestrator, peer, hub) for -how ctx relates to GitNexus and context-mode - -**Decision**: Peer MCP model for external tool integration - -**Rationale**: Peer model (side-by-side MCP servers, each queried independently -by the agent) respects ctx's markdown-on-filesystem invariant and avoids -coupling. ctx provides behavioral scaffolding; external tools provide their -specialties. - -**Consequence**: ctx MCP Prompts can reference external tools by convention -without tight coupling. No plugin registry needed. - ---- - -## [2026-03-06-050132] Create internal/parse for shared text-to-typed-value conversions - -**Status**: Accepted - -**Context**: parseDate with 2006-01-02 duplicated in 5+ files; needed a home -that is not internal/utils or internal/strings (collides with stdlib) - -**Decision**: Create internal/parse for shared text-to-typed-value conversions - -**Rationale**: internal/parse scopes to convert text to typed values without -becoming a junk drawer. Name invites sibling functions (duration, identifier -parsing) naturally. - -**Consequence**: parse.Date() is the first function; config.DateFormat holds the -layout constant. Other time.Parse callers can migrate incrementally. - ---- - -## [2026-03-06-050131] Centralize errors in internal/err, not per-package err.go files - -**Status**: Accepted - -**Context**: Duplicate error constructors across 5+ CLI packages; agents copying -the pattern when they see a local err.go - -**Decision**: Centralize errors in internal/err, not per-package err.go files - -**Rationale**: Single location makes duplicates visible, enables future sentinel -errors, and prevents broken-window accumulation - -**Consequence**: All CLI err.go files migrated and deleted. New errors go to -internal/err/errors.go exclusively. - ---- - -## [2026-03-05-205424] Gitignore .context/memory/ for this project - -**Status**: Accepted - -**Context**: Memory mirror contains copies of MEMORY.md which holds strategic -analysis and session notes - -**Decision**: Gitignore .context/memory/ for this project - -**Rationale**: Strategic content should not be in git history. Docs updated to -say 'often git-tracked' for the general recommendation — this project is the -exception. - -**Consequence**: Mirror and archives are local-only for this project. Other -projects can still track them. Sync and drift detection work the same way -regardless. - ---- - -## [2026-03-05-042154] Memory bridge design: three-phase architecture with hook nudge + on-demand - -**Status**: Accepted - -**Context**: Brainstormed how to bridge Claude Code MEMORY.md with ctx -structured context files - -**Decision**: Memory bridge design: three-phase architecture with hook nudge + -on-demand - -**Rationale**: Hook nudge + on-demand gives user choice and freedom. Wrap-up is -the publish trigger, never commit (footgun). Heuristic classification for v1, no -LLM. Marker-based merge for bidirectional conflict. Mirror is git-tracked + -timestamped archives. Foundation spec delivers sync/status/diff/hook; import and -publish are future phases. - -**Consequence**: Foundation spec in specs/memory-bridge.md, import/publish specs -deferred to ideas/. Tasked out as S-0.1.1 through S-0.1.10 in ideas/TASKS.md. - ---- - -## [2026-03-05-023937] Revised strategic analysis: blog-first execution order, bidirectional sync as top-level section - -**Status**: Accepted - -**Context**: Editorial review of ideas/claude-memory-strategic-analysis.md -surfaced six structural weaknesses in competitive positioning - -**Decision**: Revised strategic analysis: blog-first execution order, -bidirectional sync as top-level section - -**Rationale**: 200-line cap is fragile differentiator (demoted); org-scoped -memory is the real threat (elevated to HIGH); model agnosticism is premature -(parked with trigger condition); bidirectional sync is the most underweighted -insight (promoted); narrative shapes categories before implementation does (blog -first) - -**Consequence**: Execution order is now S-3 (blog) -> S-0 -> S-1 -> S-2. -Strategic doc restructured from 9 to 10 sections. Blog post shipped as first -deliverable. - ---- - -## [2026-03-04-105238] Interface-based GraphBuilder for multi-ecosystem ctx deps - -**Status**: Accepted - -**Context**: P-1.3 questioned whether non-Go dependency support would introduce -bloat and whether a semantic approach was better - -**Decision**: Interface-based GraphBuilder for multi-ecosystem ctx deps - -**Rationale**: The output pipeline (map[string][]string to Mermaid/table/JSON) -was already language-agnostic. Each ecosystem builder is ~40 lines — this is -finishing what was started, not bloat. Static manifest parsing (no external -tools for Node/Python) keeps dependencies minimal. - -**Consequence**: ctx deps now auto-detects Go, Node.js, Python, Rust. --type -flag overrides detection. ctx-architecture skill works across ecosystems without -changes. - ---- - -## [2026-03-02-165038] Billing threshold piggybacks on check-context-size, not heartbeat - -**Status**: Accepted - -**Context**: User wanted a configurable token-count nudge for billing awareness -(Claude Pro 1M context, extra cost after 200k). Heartbeat produces zero stdout -and can't relay to user. - -**Decision**: Billing threshold piggybacks on check-context-size, not heartbeat - -**Rationale**: check-context-size already reads tokens, has VERBATIM relay -working, and runs every prompt. Adding a third independent trigger there is -minimal code and follows established patterns. - -**Consequence**: New .ctxrc field billing_token_warn (default 0 = disabled). -One-shot per session via billing-warned-{sessionID} state file. -Template-overridable via check-context-size/billing.txt. - ---- - -## [2026-03-02-123611] Replace auto-migration with stderr warning for legacy keys - -**Status**: Accepted - -**Context**: Auto-migration code existed for promoting keys from -~/.local/ctx/keys/ and .context/.ctx.key to ~/.ctx/.ctx.key. Userbase is small -and this is alpha — no need to bloat the codebase. - -**Decision**: Replace auto-migration with stderr warning for legacy keys - -**Rationale**: Warn-only is simpler, avoids silent file operations, and puts the -user in control. Migration instructions in docs are sufficient for the small -userbase. - -**Consequence**: MigrateKeyFile() now only warns on stderr. promoteToGlobal() -helper deleted. Tests verify keys are not moved. - ---- - -## [2026-03-02-005213] Consolidate all session state to .context/state/ - -**Status**: Accepted - -**Context**: Session-scoped state (cooldown tombstones, pause markers, daily -throttle markers) was split between /tmp (via secureTempDir()) and -.context/state/ for project-scoped state - -**Decision**: Consolidate all session state to .context/state/ - -**Rationale**: Single location simplifies mental model, eliminates duplicated -secureTempDir() in two packages, removes the cleanup-tmp SessionEnd hook -entirely. .context/state/ is already gitignored and project-scoped. - -**Consequence**: All 18 callers updated. Tests switch from XDG_RUNTIME_DIR -mocking to CTX_DIR + rc.Reset(). Hook lifecycle drops from 4 events to 3 -(SessionEnd removed). - ---- - -## [2026-03-01-222733] PersistentPreRunE init guard with three-level exemption - -**Status**: Accepted - -**Context**: ctx commands handled missing .context/ inconsistently — some -caught errors, some got confusing file-not-found messages, some produced empty -output - -**Decision**: PersistentPreRunE init guard with three-level exemption - -**Rationale**: Single PersistentPreRunE on root command gives one clear error. -Three-level exemption (hidden commands, annotated commands, grouping commands) -covers all edge cases without per-command boilerplate - -**Consequence**: Boundary violation now returns an error instead of os.Exit(1), -making it testable. The subprocess-based boundary test was simplified to a -direct error assertion - ---- - -## [2026-03-01-161457] Global encryption key at ~/.ctx/.ctx.key - -**Status**: Superseded by [2026-03-02] global key simplification - -**Context**: Key stored next to ciphertext (.context/.ctx.key) was a security -antipattern and broke in worktrees. The slug-based per-project key system at -~/.local/ctx/keys/ was over-engineered for the common case (one user, one -machine, one key). - -**Decision**: Single global key at ~/.ctx/.ctx.key. Project-local override via -.ctxrc key_path or .context/.ctx.key. - -**Rationale**: One key per machine covers 99% of users. Per-project slug -filenames and three-tier resolution added complexity without clear benefit. -~/.ctx/ is the natural home (matches ~/.claude/ convention). Tilde expansion in -.ctxrc key_path fixes a standalone bug. - -**Consequence**: Auto-migration promotes legacy keys (project-local, -~/.local/ctx/keys/) to ~/.ctx/.ctx.key. Deleted KeyDir(), ProjectKeySlug(), -ProjectKeyPath(). ResolveKeyPath simplified to two params. 15+ doc files -updated. - ---- - -## [2026-03-01-112544] Heartbeat token telemetry: conditional fields, not always-present - -**Status**: Accepted - -**Context**: Adding tokens, context_window, usage_pct to heartbeat payloads. -First prompt of a session has no JSONL usage data yet. - -**Decision**: Heartbeat token telemetry: conditional fields, not always-present - -**Rationale**: Token fields are only included in the template ref when tokens > -0. This avoids misleading pct=0% on the first heartbeat and keeps payloads clean -for receivers that filter on field presence. - -**Consequence**: Webhook consumers must handle heartbeats both with and without -token fields. The message string also varies (with/without tokens=N pct=N% -suffix). - ---- - -## [2026-03-01-092613] Hook log rotation: size-based with one previous generation, matching eventlog pattern - -**Status**: Accepted - -**Context**: .context/logs/ files grow unbounded (~200KB after one month); -needed a cap - -**Decision**: Hook log rotation: size-based with one previous generation, -matching eventlog pattern - -**Rationale**: Architectural symmetry with eventlog, O(1) size check vs O(n) -line counting, diagnostic logs don't need deep history (webhooks cover serious -setups) - -**Consequence**: Each log file caps at ~2MB (current + .1). config.LogMaxBytes = -1MB, same as EventLogMaxBytes - ---- - -## [2026-03-01-090124] Promote 6 private skills to bundled plugin skills; keep 7 project-local - -**Status**: Accepted - -**Context**: Reviewed all 13 _ctx-* private skills to determine which are -universally useful for any ctx user vs specific to the ctx codebase or personal -infra. - -**Decision**: Promote 6 private skills to bundled plugin skills; keep 7 -project-local - -**Rationale**: Promote if the skill benefits any ctx-powered project without -project-specific hardcoding. Keep private if it references this repo's Go -internals, personal infra, or language-specific tooling. Promote list: _ctx-spec -(generic scaffolding), _ctx-brainstorm (design facilitation), _ctx-verify (claim -verification), _ctx-skill-create (skill authoring), _ctx-link-check (doc link -audit), _ctx-permission-sanitize (Claude Code permissions audit). Keep list: -_ctx-audit (Go/ctx checks), _ctx-qa (Go Makefile), _ctx-backup (SMB infra), -_ctx-release/_ctx-release-notes (ctx release workflow), _ctx-update-docs (ctx -package mapping), _ctx-absorb (borderline, revisit later). - -**Consequence**: Six skills move from .claude/skills/ to -internal/assets/claude/skills/ and become available to all ctx users via ctx -init. Cross-references between skills need updating (e.g., /_ctx-brainstorm -becomes /ctx-brainstorm). The seven remaining private skills stay project-local. - ---- - -## [2026-02-27-230718] Context window detection: JSONL-first fallback order - -**Status**: Accepted - -**Context**: check-context-size defaults to 200k but user runs 1M-context model, -causing false 110% warnings. JSONL contains the model name which maps to actual -window size. - -**Decision**: Context window detection: JSONL-first fallback order - -**Rationale**: effective_window = detect_from_jsonl(model) ?? -ctxrc.context_window ?? 200_000. JSONL is ground truth (reflects actual model in -use); ctxrc is fallback for first-hook-of-session or unknown models; 200k is -safe last resort. Having ctxrc override JSONL would artificially restrict the -check when a user forgets to update their config after switching models. - -**Consequence**: Most users get correct window automatically. ctxrc -context_window becomes a fallback, not an override. Task exists for -implementation. - ---- - -## [2026-02-27-002830] Context injection architecture v2 (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 decisions (2026-02-26) - -- **Diagram extraction**: ARCHITECTURE.md contained ~600 lines of ASCII/Mermaid - diagrams (~12K tokens). Extracted to 5 architecture-dia-*.md files outside - FileReadOrder. Agents get verbal summaries at session start; diagrams - available on demand. Total injection dropped 53% (20K→9.5K tokens). -- **Auto-injection replaces directives**: Soft instructions have ~75-85% - compliance ceiling because "don't apply judgment" is itself evaluated by - judgment. The v2 context-load-gate injects content directly via - `additionalContext` — agents never choose whether to comply. Injection - strategy: CONSTITUTION, CONVENTIONS, ARCHITECTURE, AGENT_PLAYBOOK verbatim; - DECISIONS, LEARNINGS index-only; TASKS mention-only. Total ~7,700 tokens. See: - `specs/context-load-gate-v2.md`. -- **Imperative framing**: Advisory framing allowed agents to assess relevance - and skip files. Imperative framing with unconditional compliance checkpoint - removes the escape hatch. Verbatim relay is fallback safety net, not primary - instruction. - ---- - -## [2026-02-26-200001] .context/state/ directory for project-scoped runtime state - -**Status**: Accepted - -New gitignored directory under `context_dir` resolution for ephemeral -project-scoped state. Follows `.context/logs/` precedent — added to -`config.GitignoreEntries` and root `.gitignore`. - -First use: injection oversize flag written by context-load-gate when injected -tokens exceed the configurable `injection_token_warn` threshold (`.ctxrc`, -default 15000). The check-context-size VERBATIM hook reads the flag and nudges -the user to run `/ctx-consolidate`. - -See: `specs/injection-oversize-nudge.md`. - ---- - -## [2026-02-26-100001] Hook and notification design (consolidated) - -**Status**: Accepted - -**Consolidated from**: 4 decisions (2026-02-12 to 2026-02-24) - -- Tone down proactive content suggestion claims in docs rather than add more - hooks. Already have 9 UserPromptSubmit hooks; adding another risks fatigue. - Conversational prompting already works. -- Hook commands must use structured JSON output - (hookSpecificOutput.additionalContext) instead of plain text, because Claude - Code treats plain text as ignorable ambient context. -- Drop prompt-coach hook entirely: zero useful tips fired, output channel - invisible to user, orphan temp file accumulation. The prompting guide already - covers best practices. -- De-emphasize /ctx-journal-normalize from the default journal pipeline. The - normalize skill is expensive and nondeterministic; programmatic normalization - handles most cases. Skill remains available for targeted per-file use. - ---- - -## [2026-02-26-100002] ctx init and CLAUDE.md handling (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 decisions (2026-01-20) - -- `ctx init` handles CLAUDE.md intelligently: creates if missing, backs up and - offers merge if existing, uses marker comment for idempotency. The `--merge` - flag enables non-interactive append. -- `ctx init` always generates `.claude/hooks/` alongside `.context/` with no - flag needed. Other AI tools ignore `.claude/`; Claude Code users get seamless - zero-config experience. -- Core tool stays generic and tool-agnostic, with optional Claude Code - enhancements via `.claude/hooks/`. Other AI tools can be supported similarly - (`ctx hook cursor`, etc.). - ---- - -## [2026-02-26-100004] Task and knowledge management (consolidated) - -**Status**: Accepted - -**Consolidated from**: 4 decisions (2026-01-27 to 2026-02-18) - -- Tasks must include explicit deliverables, not just implementation steps. - Parent tasks define WHAT the user gets; subtasks decompose HOW to build it. - Without explicit deliverables, AI optimizes for checking boxes. -- Use reverse-chronological order (newest first) for DECISIONS.md and - LEARNINGS.md. Ensures most recent items are read first regardless of token - budget. -- Add quick reference index to DECISIONS.md: compact table at top allows - scanning; agents can grep for full timestamp to jump to entry. Auto-updated on - `ctx add decision`. -- Knowledge scaling via archive path for decisions and learnings: follow the - task archive pattern, move old entries to `.context/archive/`, extend `ctx - compact --archive` to cover all three file types. - ---- - -## [2026-02-26-100005] Agent autonomy and separation of concerns (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 decisions (2026-01-21 to 2026-01-28) - -- Removed AGENTS.md from project root. Consolidated on CLAUDE.md (auto-loaded) + - .context/AGENT_PLAYBOOK.md as the canonical agent instruction path. Projects - using ctx should not create AGENTS.md. -- ~~Separate orchestrator directive from agent tasks~~ (superseded 2026-03-25: - IMPLEMENTATION_PLAN.md removed — TASKS.md is the single source of truth for - work items, AGENT_PLAYBOOK.md covers agent behavior). -- No custom UI -- IDE is the interface. UI is a liability; IDEs already excel at - file browsing, search, markdown editing, and git integration. Focus CLI - efforts on good markdown output. - ---- - -## [2026-02-26-100006] Security and permissions (consolidated) - -**Status**: Accepted - -**Consolidated from**: 4 decisions (2026-01-21 to 2026-02-24) - -- Keep CONSTITUTION.md minimal: only truly inviolable rules (security, - correctness, process invariants). Style preferences go in CONVENTIONS.md. - Overly strict constitution gets ignored. -- Centralize constants with semantic prefixes in `internal/config/config.go`: - `Dir*` for directories, `File*` for paths, `Filename*` for names, - `UpdateType*` for entry types. Single source of truth, compile-time typo - checks. -- Hooks use `ctx` from PATH, not hardcoded absolute paths. Standard Unix - practice; portable across machines/users. `ctx init` checks PATH availability - before proceeding. -- Drop absolute-path-to-ctx regex from block-dangerous-commands shell script. - The block-non-path-ctx Go subcommand already covers this with better patterns; - duplicating creates two sources of truth. - ---- - -## [2026-02-27-002831] Webhook and notification design (consolidated) - -**Status**: Accepted - -**Consolidated from**: 3 decisions (2026-02-22 to 2026-02-26) - -- **Session attribution**: All webhook payloads must include session_id. Reading - it from stdin costs nothing and enables multi-agent diagnostics. All run - functions take stdin parameter; tests use createTempStdin. -- **Opt-in events**: Notify events are opt-in, not opt-out. EventAllowed returns - false for nil/empty event lists. The correct default for notifications is - silence. `ctx notify test` bypasses the filter as a special case. -- **Shared encryption key**: Webhook URLs encrypted with the shared .ctx.key - (AES-256-GCM), not a dedicated key. One key, one gitignore entry, one rotation - cycle. Notify is a peer of scratchpad — both store user secrets encrypted at - rest. - ---- - -## [2026-02-11] Remove .context/sessions/ storage layer and ctx session command - -**Status**: Accepted - -**Context**: The session/recall/journal system had three overlapping storage -layers: `~/.claude/projects/` (raw JSONL transcripts, owned by Claude Code), -`.context/sessions/` (JSONL copies + context snapshots), and `.context/journal/` -(enriched markdown from `ctx recall import`). The recall pipeline reads directly -from `~/.claude/projects/`, making `.context/sessions/` a dead-end write sink -that nothing reads from. The auto-save hook copied transcripts to a directory -nobody consumed. The `ctx session save` command created context snapshots that -git already provides through version history. This was ~15 Go source files, a -shell hook, ~20 config constants, and 30+ doc references supporting -infrastructure with no consumers. - -**Decision**: Remove `.context/sessions/` entirely. Two stores remain: raw -transcripts (global, tool-owned in `~/.claude/projects/`) and enriched journal -(project-local in `.context/journal/`). - -**Rationale**: Dead-end write sinks waste code surface, maintenance effort, and -user attention. The recall pipeline already proved that reading directly from -`~/.claude/projects/` is sufficient. Context snapshots are redundant with git -history. Removing the middle layer simplifies the architecture from three stores -to two, eliminates an entire CLI command tree (`ctx session`), and removes a -shell hook that fired on every session end. - -**Consequence**: Deleted `internal/cli/session/` (15 files), removed auto-save -hook, removed `--auto-save` from watch, removed pre-compact auto-save from -compact, removed `/ctx-save` skill, updated ~45 documentation files. Four -earlier decisions superseded (SessionEnd hook, Auto-Save Before Compact, Session -Filename Format, Two-Tier Persistence Model). Users who want session history use -`ctx journal source`/`ctx journal import` instead. - ---- - - -*Module-specific, already-shipped, and historical decisions: -[decisions-reference.md](decisions-reference.md)* diff --git a/.context/GLOSSARY.md b/.context/GLOSSARY.md index 1e5d3a09c..c8575b829 100644 --- a/.context/GLOSSARY.md +++ b/.context/GLOSSARY.md @@ -15,48 +15,4 @@ DO NOT UPDATE FOR: ## Domain Terms -| Term | Definition | -|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Context | The set of `.context/*.md` files that give AI agents persistent project knowledge across sessions. Not a generic word; when capitalized, refers specifically to the ctx system. | -| Context packet | The token-budgeted markdown blob assembled by `ctx agent`. Contains prioritized excerpts from context files, sized to fit the agent's context window. | -| Context file | Any `.md` file in `.context/` that ctx manages (CONSTITUTION, TASKS, DECISIONS, etc.). Each has a defined purpose and priority. | -| Constitution | The set of inviolable rules in `CONSTITUTION.md`. Distinct from conventions: constitution rules cannot be bent; violating one means the task is wrong. | -| Convention | A project pattern or standard in `CONVENTIONS.md`. Conventions are strong recommendations that can be bent with justification; contrast with constitution rules. | -| Drift | When context files diverge from the actual codebase state. Types: dead path references, stale task counts, missing required files, potential secrets. Detected by `ctx drift`. | -| Dead path | A backtick-enclosed file path in ARCHITECTURE.md or CONVENTIONS.md that references a file no longer on disk. A drift warning type. | -| Staleness | When context files have not been updated to reflect recent code changes. Specific indicator: >10 completed tasks in TASKS.md signals the file needs compaction. | -| Read order | The priority sequence in which context files are loaded and presented to agents. Defined by `config.FileReadOrder`. Higher priority files are loaded first and survive token budget cuts. | -| Token budget | Maximum estimated token count for assembled context. Default 8000. Configurable via `CTX_TOKEN_BUDGET`, `.ctxrc`, or `--budget` flag. Uses 4-chars-per-token heuristic. | -| Curated tier | The `.context/*.md` files: manually maintained, token-budgeted, loaded by `ctx agent`. Contrast with full-dump tier. | -| Full-dump tier | The `.context/journal/` directory: imported session transcripts. Not auto-loaded; used for archaeology when curated context is insufficient. Browse with `ctx journal source`. | -| Compaction | The process of archiving completed tasks and cleaning up context files. Run via `ctx compact`. Moves completed tasks to archive; preserves phase structure. | -| Entry header | The timestamped heading format used in DECISIONS.md and LEARNINGS.md: `## [YYYY-MM-DD-HHMMSS] Title`. Parsed by `config.RegExEntryHeader`. | -| Index table | The auto-generated markdown table at the top of DECISIONS.md and LEARNINGS.md (between `` and `` markers). Updated by `ctx add` and `ctx decision/learnings reindex`. | -| Readback | A structured summary where the agent plays back what it knows (last session, active tasks, recent decisions) so the user can confirm correct context was loaded. From aviation: pilots repeat ATC instructions back to confirm they heard correctly. In ctx, triggered by "do you remember?" or `/ctx-remember`. | -| Ralph Loop | An iterative autonomous AI development workflow that uses `.context/loop.md` as a directive. Separate from ctx but complementary: Ralph drives the loop, ctx provides the memory. | -| Skill | A Claude Code Agent Skill: a markdown file in `.claude/skills/` that teaches the agent a specialized workflow. Invoked via `/skill-name`. | -| Live skill | The project-local copy of a skill in `.claude/skills/`. Can be edited by the user or agent. Contrast with template skill. | -| Template skill | The embedded copy of a skill in `internal/assets/claude/skills/`. Deployed on `ctx init`. Source of truth for the default version. | -| Hook | A Claude Code lifecycle script in `.claude/hooks/`. Fires on events: PreToolUse, UserPromptSubmit, SessionEnd. Generated by `ctx init`. | -| Consolidation | A code-quality sweep checking for convention drift: magic strings, predicate naming, file size, dead exports, etc. Run via `/consolidate` skill. Distinct from compaction (which is context-level). | -| 3:1 ratio | Heuristic for consolidation timing: consolidate after every 3 feature/bugfix sessions. Prevents convention drift from compounding. | -| E/A/R classification | Expert/Activation/Redundant taxonomy for evaluating skill quality. Good skill = >70% Expert knowledge, <10% Redundant with what the model already knows. | -| DescKey | A string constant in `config/embed/text/` that maps to a user-facing message in embedded YAML. All output text is looked up by DescKey, enabling future i18n. Adding a new message requires a DescKey constant, a YAML entry, and a write/err function. | -| Governance warning | An advisory message the MCP server appends to tool responses based on session state (e.g., "context not loaded", "drift not checked", "time to persist"). Never blocks tool execution; only nudges. | -| Entry block | A parsed unit from DECISIONS.md or LEARNINGS.md: the timestamped header plus all content until the next header. Used by index generation and compaction. Parsed by `internal/index.ParseEntryBlocks()`. | -| Journal pipeline | The 5-stage processing chain for imported session transcripts: exported -> enriched (YAML frontmatter) -> normalized (soft-wrap, clean JSON) -> fences_verified -> locked. Tracked in `.context/journal/.state.json`. | -| Slug format | The encoding used to derive a filesystem path from a project's absolute path. Used by `internal/memory` to locate Claude Code's MEMORY.md. Convention: replace `/` with `-`, prefix with `-`. | -| Boundary validation | The security check ensuring all resolved file paths stay under the project root. Prevents path traversal attacks. Enforced by `internal/validate` and `internal/io/Safe*` functions. | -| cmd/root + core/ | The package taxonomy for CLI commands (Decision 2026-03-06). `cmd/root/cmd.go` defines the Cobra command; `cmd/root/run.go` implements the handler; `core/` holds reusable logic. Grouping commands use `parent.Cmd()` instead. | -| Safe* functions | The guarded I/O wrappers in `internal/io` (SafeReadFile, SafeWriteFile, etc.) that apply path validation, symlink rejection, and deny-list filtering. Direct `os.ReadFile`/`os.WriteFile` calls are banned by audit. | -| Throttle marker | A date-stamped file in `.context/state/` that prevents hook checks from running more than once per day. The marker's mtime is compared to today's date; if same day, the check is skipped. | - ## Abbreviations - -| Abbreviation | Expansion | -|--------------|---------------------------------------------------------------------------------------------------------| -| ctx | Context (the CLI tool and the system it manages) | -| rc | Runtime configuration (from Unix `.xxxrc` convention); refers to `.ctxrc` and the `internal/rc` package | -| assets | Embedded assets; the `internal/assets` package containing go:embed templates and plugin files | -| CWD | Current working directory; used in session matching to correlate sessions with projects | -| JSONL | JSON Lines; the format Claude Code uses for session transcripts (one JSON object per line) | diff --git a/.context/HANDOVER-2026-04-22.md b/.context/HANDOVER-2026-04-22.md new file mode 100644 index 000000000..09040b555 --- /dev/null +++ b/.context/HANDOVER-2026-04-22.md @@ -0,0 +1,213 @@ +# Session Handover — 2026-04-22 + +This file captures state from session `d6889b7c` (branch +`feat/explicit-context-dir`) so the next session can pick up +without re-investigating. Delete this file after it's been read. + +## Branch State + +- Branch: `feat/explicit-context-dir` (0 commits ahead of main — + everything is in the working tree). +- **328 files changed**, 4402 insertions, 4652 deletions. +- `go build ./...` clean, `make lint` 0 issues, `go test ./...` + exit 0 as of end of session. +- No commits were made. **First order of business next session: + decide the commit strategy.** Logical splits that make sense: + + 1. Agent-docs rewrite (AGENT_PLAYBOOK, CLAUDE.md, gate, internal + assets — watermelon-rind removal + triage error policy). + 2. Explicit-context-dir hardening in `internal/**` (resolver + plumbing, ContextChild removal, KeyPath signature change, + ctxrcPath rename, DirLine/AppendDir propagation). + 3. FullPreamble 5-value return + 16 hook/callsite migration. + 4. RequireContextDir command-entry sweep (25 RunE gates). + 5. `ctx backup` full deprecation (per `specs/deprecate-ctx-backup.md`). + 6. Pre-existing build-fix: missing `DescKeyWriteSnapshotUpdated` / + `Saved` constants added (un-breaks `internal/write/restore`). + + Splitting (2) from (3) may be impractical — the callsite diff + is tangled. One "explicit-context-dir hardening" commit + one + "ctx backup removal" commit is the minimum defensible split. + One giant commit also works and may be honest given the churn. + +## What Changed, By Theme + +### 1. Agent docs rewrite + +Rewrote `ctx`'s agent-facing prose to drop prohibitions about +behaviors agents wouldn't otherwise invent (watermelon-rind +pattern) and to split the old blanket "relay ctx errors verbatim +and stop" into an **invocation-error vs everything-else triage**. + +Touched: `CLAUDE.md`, `.context/AGENT_PLAYBOOK.md`, +`.context/AGENT_PLAYBOOK_GATE.md`, `internal/assets/claude/CLAUDE.md`, +`internal/assets/context/AGENT_PLAYBOOK.md`, same gate copy. + +### 2. Explicit-context-dir hardening + +Sharpened the "explicit declaration or nothing" model: + +- **Deleted `resolve.ContextChild`**. Inlined all 10 callers with + explicit `rc.ContextDir()` + `errors.Is(err, ErrDirNotDeclared)` + + `filepath.Join` (no string concat). +- **`rc.KeyPath()`** now returns `(string, error)`. Propagates + resolver failures instead of silently handing `""` to + `crypto.ResolveKeyPath` (which used to either filepath-join a + CWD-relative path or fall through to a global key — both bugs + this branch was built to kill). +- **`rc.ctxRcPath`** → `(string, error)` (plus renamed from + `ctxRcPath` to `ctxrcPath` to clear the stutter audit). +- **`resolve.DirLine` / `resolve.AppendDir`** now return + `(string, error)`. Noisy-TUI warn log stays (intentional, + documented) — return channel added so non-rendering callers + can propagate. +- **`notify.LoadWebhook`**, **`message.OverridePath`**, + **`message.HasOverride`**, **`hub.LoadBodies`**, + **`merge.LoadKey`**, **`store.KeyPath`**, + **`context/validate.Exists`**, **`sync.CheckNewDirectories`** + all propagate `ErrDirNotDeclared` instead of swallowing into + `(zero, nil)` pairs. +- **`knowledge.CheckHealth(sessionID, ctxDir)`**, + **`health.ReadMapTracking(ctxDir)`**, + **`hubsync.Connected(ctxDir)`**, + **`oversizeContent(ctxDir)`** now take `ctxDir` as a parameter + from the calling hook (dead internal resolver call eliminated). +- **`coreArchive.BackupProject` CWD-based resolution**: would + have been a latent bug but the whole archive package went away + with `ctx backup`. + +### 3. FullPreamble extended + +`internal/cli/system/core/check/full_preamble.go` now returns +`(input, sessionID, ctxDir, stateDir, ok)`. All 16 callers +updated. 4 hooks (`check_persistence`, `check_memory_drift`, +`check_ceremony`, `check_journal`) had their redundant +`errors.Is(ctxErr, ErrDirNotDeclared)` blocks deleted and use the +preamble's `ctxDir` directly. The 3 non-FullPreamble hooks +(`context_load_gate`, `check_context_size`, `heartbeat`) had +their `ErrDirNotDeclared` branches replaced with a defensive +"unreachable but log loudly" fallback after the `state.Initialized` +gate. + +**Pending follow-up** (already in TASKS.md): refactor +`FullPreamble` to return a `Preamble` struct instead of a +5-value tuple. Ugly positional return is tolerated for now. + +### 4. RequireContextDir command-entry sweep + +Added `rc.RequireContextDir()` gates at the top of 25 command +RunE entry points so users get the rich multi-line error instead +of a terse sentinel propagated through library layers. +Commands touched: +`add`, `agent`, `backup` (conditional on scope — then the whole +command died; see theme 5), `change`, `compact`, `drift`, +`journal schema check`, `load`, `memory unpublish` (already +gated via callee — only `SilenceUsage` added), `message {edit, +list, reset, show}`, `notify {setup, test}`, `pad {add, edit, +export, merge, mv, normalize, rm, root, show, tag}`, `sync`, +`watch`. + +Decision heuristic for which commands got gated: anything that +writes/reads files under `.context/` or calls a library that +does. Exempt: `init`, `activate`, `deactivate`, `bootstrap`, +`version`, `help` — they handle not-declared themselves. + +### 5. `ctx backup` full deprecation + +Executed `specs/deprecate-ctx-backup.md` end-to-end: + +- **Deleted**: `internal/cli/backup/`, + `internal/cli/system/cmd/check_backup_age/`, + `internal/cli/system/core/archive/`, `internal/write/backup/`, + `internal/exec/gio/`, + `internal/assets/hooks/messages/check-backup-age/`, + `.claude/skills/_ctx-backup/`, `docs/cli/backup.md`. +- **Trimmed**: `internal/config/archive/` to task-archive + constants only; `internal/err/backup/` to four generic + constructors (`Create`, `CreateArchiveDir`, `WriteArchive`, + `ContextDirNotFound`) that `init`, task archival, and + bootstrap still use — package name kept as a historical + label rather than churning the non-backup callers. +- **Surgical edits**: `bootstrap/group.go`, `cli/system/system.go`, + `config/env/env.go`, `config/hook/hook.go`, `entity/system.go`, + `cli/system/doc.go`, and every `internal/assets/commands/**.yaml`. +- **Tests updated**: `registry_test.go` count `32 → 31`; + `watch_test.go` expects `"no context directory"` instead of + the old `"ctx init"` suggestion. +- **All shipped docs updated**: `docs/cli/index.md`, + `docs/cli/system.md`, `docs/home/common-workflows.md`, + `docs/home/contributing.md`, `docs/recipes/customizing-hook-messages.md`, + `docs/recipes/hook-sequence-diagrams.md`, + `docs/recipes/hook-output-patterns.md`, + `internal/cli/system/README.md`, `zensical.toml`, + `CONTRIBUTING-SKILLS.md`. +- **New**: `docs/operations/runbooks/backup-strategy.md` — the + migration path (rsync, cron, Time Machine, Borg/restic). + +Historical artifacts intentionally left alone (per explicit +call): `.context/journal-site/**`, `.context/DECISIONS.md`, +`.context/decisions-reference.md`, `specs/released/**`, +`specs/deprecate-ctx-backup.md` itself, other spec drafts in +`specs/**`, `ideas/**`. Editing those would rewrite history. + +Reminder [12] — "had to stop sync-to-asgard because broadcom +mirror repo is huge; solve it to resume backups" — now has a +different resolution path: there is no `ctx backup` anymore. +The runbook suggests external tools (rsync, Borg, etc.) and +hub for knowledge. Worth reviewing whether the reminder should +be dismissed or reframed. + +### 6. Pre-existing build break (fixed while owning the branch) + +`internal/write/restore/permission.go` referenced +`DescKeyWriteSnapshotUpdated` and `DescKeyWriteSnapshotSaved` +that never existed in any `internal/config/embed/text/*.go` +file. Caused a hard build failure mid-session. Added both to +`internal/config/embed/text/restore.go`. Corresponding YAML +entries are **not** added (write.yaml has no `write.snapshot-*` +keys yet) — the restore flow may print empty strings until +those entries are authored. **Worth verifying** next session: +either add the YAML entries or confirm this feature was +stubbed-but-incomplete before my session. + +## Persisted Memories (global, not project-local) + +Three feedback memories saved in +`~/.claude/projects/-Users-volkan-Desktop-WORKSPACE-ctx/memory/`: + +- `feedback_no_watermelon_rinds.md` — don't warn agents off + behaviors they wouldn't invent. +- `feedback_no_preserve_old_behavior.md` — "preserves existing + behavior" is not a justification for leaving silent-skip code. +- `feedback_no_phase_deferral.md` — phasing sweeps into "Round 1 + now, Round 2 later" is deferral; do the sweep in one pass. + +## What Was NOT Touched + +- **Reminders [4] through [11]** are standing todos from earlier + sessions, out of scope for this one. [12] is partially + resolved by the backup deprecation (see theme 5). +- **Ideas and spec drafts** that reference `ctx backup` — left + intentionally as historical artifacts. +- **Journal import / enrichment backlog** (129 unimported, + 435 unenriched at session start) — not addressed. +- **Knowledge-file growth warnings** (DECISIONS.md 79 entries, + LEARNINGS.md 103, CONVENTIONS.md 272 lines) — consolidation + not attempted this session. + +## Quick Gotchas For Next Session + +- `go build ./...` still works, but the diff is enormous. + `go test -count=1 ./...` to rule out cache issues before + claiming green. +- The `state/` directory at the repo root was deleted manually + earlier in this session and did NOT reappear under normal + use — the earlier "state leaks outside `.context/state/`" + concern looks resolved. If `state/` shows up in `git status` + during next session, investigate before deleting. +- When committing, **do not** let a pre-commit hook drop the + new memories or this handover. +- `/ctx-wrap-up` was not run this session despite the + heuristic. If you run it, it will suggest capturing much of + this file's content into LEARNINGS.md / DECISIONS.md — + de-dupe carefully. diff --git a/.context/LEARNINGS.md b/.context/LEARNINGS.md index 762326806..5d937d14d 100644 --- a/.context/LEARNINGS.md +++ b/.context/LEARNINGS.md @@ -15,1653 +15,6 @@ DO NOT UPDATE FOR: --> -| Date | Learning | -|----|--------| -| 2026-04-14 | Constitution forbids context window as a deferral excuse | -| 2026-04-14 | docs/cli/system.md and embed/cmd/system.go diverged on bootstrap promotion intent | -| 2026-04-14 | Raft-lite trade-off is the load-bearing choice in internal/hub | -| 2026-04-14 | AST stutter test only checks FuncDecl, not GenDecl | -| 2026-04-14 | Brand-name handling in title-case engines must cover possessives | -| 2026-04-13 | GPG signing from non-TTY contexts requires pinentry-mac (or equivalent) | -| 2026-04-13 | Load average measures a queue, not CPU utilization | -| 2026-04-13 | rc.ContextDir() is the single source of truth — fix the resolver, not callers | -| 2026-04-09 | Pad index shifting is a real UX bug in batch operations | -| 2026-04-08 | fmt.Fprintf to strings.Builder silently discards errors | -| 2026-04-08 | AST audit tests must cover unexported functions too | -| 2026-04-06 | Agents ignore system-reminder content without explicit relay instructions | -| 2026-04-04 | Format-verb strings are localizable text, not exempt from magic string checks | -| 2026-04-04 | Agents add allowlist entries to make tests pass — guard every exemption | -| 2026-04-03 | Subagent scope creep and cleanup (consolidated) | -| 2026-04-03 | Bulk rename and replace_all hazards (consolidated) | -| 2026-04-03 | Import cycles and package splits (consolidated) | -| 2026-04-03 | Lint suppression and gosec patterns (consolidated) | -| 2026-04-03 | Skill lifecycle and promotion (consolidated) | -| 2026-04-03 | Cross-cutting change ripple (consolidated) | -| 2026-04-03 | Dead code detection (consolidated) | -| 2026-04-03 | desc.Text() is the single highest-connectivity symbol in the codebase | -| 2026-04-01 | Raw I/O migration unlocks downstream checks for free | -| 2026-04-01 | go/packages respects build tags — darwin-only violations invisible on Linux | -| 2026-04-01 | Copilot CLI skills need a sync mechanism to prevent drift from ctx skills | -| 2026-04-01 | Contributor PRs based on older code reintroduce removed features | -| 2026-03-31 | Magic string cleanup compounds: each pass reveals the next layer | -| 2026-03-31 | Force-loaded behavioral prose gets ignored — action-gating hooks don't | -| 2026-03-31 | Legacy key directory cleanup was specified but not automated | -| 2026-03-31 | Convention audits must check cmd/ purity, not just types and docstrings | -| 2026-03-31 | JSON Schema default fields cause linter errors with some validators | -| 2026-03-30 | Architecture diagrams drift silently during feature additions | -| 2026-03-30 | Python-generated doc.go files need gofmt — formatter strips bare // padding lines | -| 2026-03-30 | lint-docstrings.sh greedy sed hid all return-type violations | -| 2026-03-25 | Machine-generated CLAUDE.md content consumes per-turn budget without proportional value | -| 2026-03-25 | Template improvements don't propagate to existing projects | -| 2026-03-24 | lint-drift false positives from conflating constant namespaces | -| 2026-03-24 | git describe --tags follows ancestry, not global tag list | -| 2026-03-23 | Typography detection script needs exclusion lists for intentional uses | -| 2026-03-23 | Splitting core/ into subpackages reveals hidden structure | -| 2026-03-23 | Higher-order callbacks in param structs are a code smell | -| 2026-03-20 | Commit messages containing script paths trigger PreToolUse hooks | -| 2026-03-18 | Lazy sync.Once per-accessor is a code smell for static embedded data | -| 2026-03-17 | Write package output census: 69 trivial/simple, 38 consolidation candidates, 18 complex | -| 2026-03-16 | Docstring tasks require reading CONVENTIONS.md Documentation section first | -| 2026-03-16 | Convention enforcement needs mechanical verification, not behavioral repetition | -| 2026-03-16 | One-liner method wrappers hide dependencies without adding value | -| 2026-03-16 | Agents reliably introduce gofmt issues during bulk renames | -| 2026-03-15 | Contributor PRs need post-merge follow-up commits for convention alignment | -| 2026-03-15 | Grep for callers must cover entire working tree before deleting functions | -| 2026-03-14 | Stderr error messages are user-facing text that belongs in assets | -| 2026-03-14 | Hardcoded _alt suffixes create implicit language favoritism | -| 2026-03-13 | sync-why mechanism existed but was not wired to build | -| 2026-03-12 | Project-root files vs context files are distinct categories | -| 2026-03-12 | Constants belong in their domain package not in god objects | -| 2026-03-07 | Always search for existing constants before adding new ones | -| 2026-03-07 | SafeReadFile requires split base+filename paths | -| 2026-03-06 | Stale directory inodes cause invisible files over SSH | -| 2026-03-06 | Stats sort uses string comparison on RFC3339 timestamps with mixed timezones | -| 2026-03-06 | Claude Code supports PreCompact and SessionStart hooks that ctx does not use | -| 2026-03-06 | Package-local err.go files invite broken windows from future agents | -| 2026-03-05 | State directory accumulates silently without auto-prune | -| 2026-03-05 | Global tombstones suppress hooks across all sessions | -| 2026-03-05 | Claude Code has two separate memory systems behind feature flags | -| 2026-03-05 | Blog post editorial feedback is higher-leverage than drafting | -| 2026-03-04 | CONSTITUTION hook compliance is non-negotiable — don't work around it | -| 2026-03-02 | Hook message registry test enforces exhaustive coverage of embedded templates | -| 2026-03-02 | Existing Projects is ambiguous framing for migration notes | -| 2026-03-02 | Claude Code JSONL model ID does not distinguish 200k from 1M context | -| 2026-03-01 | Gosec G306 flags test file WriteFile with 0644 permissions | -| 2026-03-01 | Converting PersistentPreRun to PersistentPreRunE changes exit behavior | -| 2026-03-01 | Test HOME isolation is required for user-level path functions | -| 2026-03-01 | Task descriptions can be stale in reverse — implementation done but task not marked complete | -| 2026-03-01 | Model-to-window mapping requires ordered prefix matching | -| 2026-03-01 | TASKS.md template checkbox syntax inside HTML comments is parsed by RegExTaskMultiline | -| 2026-03-01 | Hook logs had no rotation; event log already did | -| 2026-02-28 | ctx pad import, ctx pad export, and ctx system resources make three hack scripts redundant | -| 2026-02-28 | Getting-started docs assumed Claude Code as the only agent | -| 2026-02-28 | Plugin reload script must rebuild cache, not just delete it | -| 2026-02-27 | site/ directory must be committed with docs changes | -| 2026-02-27 | Doctor token_budget vs context_window confusion | -| 2026-02-27 | Drift detector false positives on illustrative code examples | -| 2026-02-27 | Context injection and compliance strategy (consolidated) | -| 2026-02-26 | Webhook silence after ctxrc profile swap is the most common notify debugging red herring | -| 2026-02-26 | Documentation drift and auditing (consolidated) | -| 2026-02-26 | Agent context loading and task routing (consolidated) | -| 2026-02-26 | Go testing patterns (consolidated) | -| 2026-02-26 | PATH and binary handling (consolidated) | -| 2026-02-26 | Task management and exit criteria (consolidated) | -| 2026-02-26 | Agent behavioral patterns (consolidated) | -| 2026-02-26 | Hook compliance and output routing (consolidated) | -| 2026-02-26 | ctx add and decision recording (consolidated) | -| 2026-02-24 | CLI tools don't benefit from in-memory caching of context files | -| 2026-02-22 | Hook behavior and patterns (consolidated) | -| 2026-02-22 | UserPromptSubmit hook output channels (consolidated) | -| 2026-02-22 | Linting and static analysis (consolidated) | -| 2026-02-22 | Permission and settings drift (consolidated) | -| 2026-02-22 | Gitignore and filesystem hygiene (consolidated) | -| 2026-01-28 | IDE is already the UI | ---- - -## [2026-04-14-010134] Constitution forbids context window as a deferral excuse - -**Context**: Mid-session, agent proposed pacing through doc.go rewrites with the -reasoning that context budget was tight. - -**Lesson**: The CONSTITUTION explicitly lists 'We are running out of context -window' as a forbidden deferral phrase under No Excuse Generation. The rule is -real and applies to agent self-pacing, not just user-facing answers. - -**Application**: When tempted to scope down because context is tight, re-read -the constitution. The right move is to do the work end-to-end, not to ask the -user which slice to skip. - ---- - -## [2026-04-14-010134] docs/cli/system.md and embed/cmd/system.go diverged on bootstrap promotion intent - -**Context**: Header comment in internal/config/embed/cmd/system.go claimed -bootstrap was promoted to top-level; the bootstrap.go registration never -actually promoted it. Two contradictory sources of truth coexisted silently. - -**Lesson**: Header-comment claims about command-tree structure are unaudited; -they can drift from registrations without any test failing. Trust the code, not -the comment. - -**Application**: When evaluating any package_name namespace cleanup type claim -about command structure, verify against the actual cobra registration in -internal/bootstrap/group.go before acting. - ---- - -## [2026-04-14-010134] Raft-lite trade-off is the load-bearing choice in internal/hub - -**Context**: Discovered while writing thorough doc.go for internal/hub. The -package embeds HashiCorp Raft for leader election only; data replication is -sequence-based gRPC sync over the append-only JSONL store. - -**Lesson**: A leader crash window between accept and replicate can lose the most -recent write. Append-only storage plus idempotent clients make this acceptable; -full Raft log replication would not be needed and would not be simpler. - -**Application**: Any future make hub stronger proposal must engage with this -trade-off explicitly. Do not abandon Raft-lite accidentally by introducing -log-replicated state; that would invalidate the simplicity argument. - ---- - -## [2026-04-14-010134] AST stutter test only checks FuncDecl, not GenDecl - -**Context**: tpl.TplEntryMarkdown stuttered for a long time because -TestNoStutteryFunctions in internal/audit walks *ast.FuncDecl only; the constant -slipped through. - -**Lesson**: The audit suite has a real coverage gap for *ast.GenDecl (consts, -vars, types). Stuttery type/const names will not be caught until the audit is -extended to walk those node kinds. - -**Application**: When a stuttery identifier is reported by a human, check both -the offending file and whether the audit can catch it; if not, file an -audit-extension task. - ---- - -## [2026-04-14-010105] Brand-name handling in title-case engines must cover possessives - -**Context**: First pass of hack/title-case-headings.py produced 'Ctx's' from -'ctx's' because the brand check matched the bare token only. - -**Lesson**: A brand allowlist needs to recognize , 's, s, -and short apostrophe-suffixed variants. Single-word matching misses contractions -and possessives. - -**Application**: When adding a new always-lowercase brand to -hack/title-case-headings.py, extend the suffix-aware loop in title_case_word, -not just the BRAND_LOWER set. - ---- - -## [2026-04-13-153618] GPG signing from non-TTY contexts requires pinentry-mac (or equivalent) - -**Context**: git commit failed from Claude Code's shell with 'gpg: signing -failed: No such file or directory' — the default pinentry-curses cannot open a -TTY in agent-invoked shells. Manual commits from a real terminal worked fine. - -**Lesson**: GPG's default curses pinentry requires an interactive TTY. In -non-TTY contexts (Claude Code, CI, scripts, cron), signing fails silently-ish. -The fix is to configure a GUI pinentry that uses the OS keychain: brew install -pinentry-mac; echo 'pinentry-program $(brew --prefix)/bin/pinentry-mac' >> -~/.gnupg/gpg-agent.conf; gpgconf --kill gpg-agent. Once the passphrase is saved -in Keychain, signing works from any context. - -**Application**: If agents or CI need to sign commits, configure pinentry-mac -(macOS) or pinentry-gtk/pinentry-qt (Linux) with the OS keychain, not -pinentry-curses. This is a one-time setup per machine. - ---- - -## [2026-04-13-153618] Load average measures a queue, not CPU utilization - -**Context**: The 'Load Xx CPU count' resource alert fired at 1.74x while htop -showed per-core utilization well under 50% and idle cores. Load average counts -runnable + uninterruptible-sleep processes, smoothed over 1/5/15 minutes. - -**Lesson**: Load average and CPU% measure different things. High load with low -CPU% typically means many short-lived processes or I/O-bound work (e.g., go test -spawning hundreds of parallel test binaries). The 1-minute average is too -reactive for dev machines that periodically run test suites — 5-minute smooths -transient spikes without hiding sustained pressure. - -**Application**: For alerting thresholds based on system load, prefer 5-minute -over 1-minute averages. 1-minute is useful for interactive debugging; 5-minute -is better for automated alerts that should not fire on normal build/test -activity. - ---- - -## [2026-04-13-153618] rc.ContextDir() is the single source of truth — fix the resolver, not callers - -**Context**: When ctx init failed with a boundary error, my first instinct was -to have init bypass rc.ContextDir() and use filepath.Join(cwd, dir.Context) -directly. Volkan shut that down: rc.ContextDir() encodes invariants (team -shares, symlinks, network mounts, .ctxrc overrides) that individual commands -cannot reason about. - -**Lesson**: Resolution chains with multiple fallbacks are contracts. If one -command bypasses the chain, it silently diverges from every other command's -notion of 'the context directory.' When a resolver produces a wrong answer for a -specific case, fix the resolver — don't let callers opt out. - -**Application**: Any time you see rc.ContextDir(), rc.RC(), or similar central -resolvers producing a bad result, the fix belongs in the resolver itself (or in -its input data like .ctxrc). Caller-side bypasses create drift. - ---- - -## [2026-04-09-001323] Pad index shifting is a real UX bug in batch operations - -**Context**: ctx pad rm 10; rm 11; rm 12 deleted wrong entries because indices -shifted after each deletion - -**Lesson**: Any ID-based system where users chain operations needs stable IDs. -Look-then-act is safe for single ops; look-then-batch-act breaks with shifting -indices - -**Application**: Both pad and remind now use stable IDs with batch delete and -range support. Apply same pattern to any future numbered-list subsystem - ---- - -## [2026-04-08-074612] fmt.Fprintf to strings.Builder silently discards errors - -**Context**: golangci-lint errcheck allows fmt.Fprintf to strings.Builder -because Write never fails, but project convention says zero silent discard - -**Lesson**: Linter coverage gaps exist where language guarantees mask -conventions. AST tests fill the gap - -**Application**: Created TestNoUncheckedFmtWrite to enforce fmt.Fprintf error -handling. Use if _, err := fmt.Fprintf(...) with log.Warn on the error path - ---- - -## [2026-04-08-074604] AST audit tests must cover unexported functions too - -**Context**: TestDocCommentStructure only checked exported functions, so -agent-written helpers in format.go had no godoc enforcement - -**Lesson**: Convention enforcement tests must default to scanning all documented -functions. Use explicit opt-outs (test files) not opt-ins (exported only) - -**Application**: When adding AST audit tests, scan all functions. We fixed -TestDocCommentStructure to drop the IsExported gate and fixed 84 violations - ---- - -## [2026-04-06-204226] Agents ignore system-reminder content without explicit relay instructions - -**Context**: Provenance line (Session: abc | Branch: main @ hash) was emitted by -hook but agents in other projects silently ignored it. The line appeared in the -system-reminder but the agent treated it as internal metadata. - -**Lesson**: Claude Code surfaces hook stdout as system-reminder tags. Agents -only relay content that has explicit display instructions. IMPORTANT: means pay -attention internally. Display this line verbatim means show to user. Without the -instruction, even correct output is invisible to the user. - -**Application**: Any hook output intended for the user must include an explicit -relay instruction like Display this line verbatim at the start of your response. -Do not rely on IMPORTANT: alone — it signals internal priority, not -user-facing output. - ---- - -## [2026-04-04-025813] Format-verb strings are localizable text, not exempt from magic string checks - -**Context**: Strings like '%d entries checked' were passing TestNoMagicStrings -because the format-verb exemption was too broad - -**Lesson**: Any string containing English words alongside format directives is -user-facing text that belongs in YAML assets - -**Application**: Removed format-verb, URL-scheme, HTML-entity, and err/ -exemptions from TestNoMagicStrings - ---- - -## [2026-04-04-025805] Agents add allowlist entries to make tests pass — guard every exemption - -**Context**: Found that every exemption map/allowlist in audit tests is a -tempting shortcut for agents - -**Lesson**: Added DO NOT widen guard comments to all 10 exemption data -structures across 7 test files - -**Application**: Every new audit test with an exemption must include the guard -comment. Review PRs for drive-by allowlist additions. - ---- - -## [2026-04-03-180000] Subagent scope creep and cleanup (consolidated) - -**Consolidated from**: 4 entries (2026-03-06 to 2026-03-23) - -- Subagents reliably rename functions, restructure files, change import aliases, - and modify function signatures beyond their stated scope — even narrowly - scoped tasks like fixing em-dashes in comments -- Subagents create new files during refactors but consistently fail to delete - the originals — always audit for stale files, duplicate definitions, and - orphaned imports afterward -- After any agent-driven refactor: run `git diff --stat` and `git diff - --name-only HEAD`, revert anything outside the intended scope, and check for - stale package declarations before building - ---- - -## [2026-04-03-180000] Bulk rename and replace_all hazards (consolidated) - -**Consolidated from**: 3 entries (2026-03-15 to 2026-03-20) - -- `replace_all` on short tokens (e.g. `core.`, function names) matches inside - longer identifiers and function definitions — `remindcore.` becomes - `remindtidy.`, `func HumanAgo` becomes `func format.DurationAgo` (invalid Go) -- `sed` insert-before-first-match does not understand Go import aliases — the - alias attaches to whatever line sed inserts, not the original target -- For function renames: delete the old definition separately rather than using - replace_all. For bulk import additions: check for aliased imports first and - handle them separately, or use goimports - ---- - -## [2026-04-03-180000] Import cycles and package splits (consolidated) - -**Consolidated from**: 5 entries (2026-03-06 to 2026-03-22) - -- Types in god-object files (e.g. hook/types.go with 15+ types from 8 domains) - create circular dependencies — move types to their owning domain package -- Tests in parent package X cannot import X/sub packages that import X back — - move tests to the sub-package they exercise -- Variable shadowing causes cascading failures after splits: `dir`, `file`, - `entry` are common Go variable names that collide with new sub-package names - — run `go test ./...` before committing splits -- When moving constants between packages, change imports and all references in a - single atomic write so the linter never sees an inconsistent state -- Import cycle rule: the package providing implementation logic must own the - shared types; the facade package aliases them (e.g. `entry.Params` aliases - `add/core.EntryParams`) - ---- - -## [2026-04-03-180000] Lint suppression and gosec patterns (consolidated) - -**Consolidated from**: 4 entries (2026-03-04 to 2026-03-19) - -- Rename constants to avoid gosec G101 false positives (Tokens->Usage, - Passed->OK) instead of adding nolint/nosec/path exclusions — exclusions - break on file reorganization -- `nolint:goconst` for trivial values normalizes magic strings — use config - constants instead of suppressing the linter -- `nolint:errcheck` in tests teaches agents to spread the pattern to production - code — use `t.Fatal(err)` for setup, `defer func() { _ = f.Close() }()` for - cleanup -- golangci-lint v2 ignores inline nolint directives for some linters — use - config-level `exclusions.rules` for gosec patterns, fix the code instead of - suppressing errcheck - ---- - -## [2026-04-03-180000] Skill lifecycle and promotion (consolidated) - -**Consolidated from**: 4 entries (2026-03-01 to 2026-03-14) - -- Internal skill renames and promotions require synchronized updates across 6+ - layers: SKILL.md frontmatter, internal cross-references, external docs, - embed_test.go expected list, recipe/reference docs, and plugin cache rebuild + - session restart -- Skill behavior changes ripple through hook messages, fallback strings in Go - code, doc descriptions, and Makefile hints — grep for the skill name across - the entire repo -- Skills without a trigger mechanism (no user invocation, no hook loading) are - dead code — audit skills for reachability -- After promoting skills: grep -r for the old name across the whole tree, run - plugin-reload.sh, restart session to verify autocomplete, and clean stale - Skill() entries from settings.local.json - ---- - -## [2026-04-03-180000] Cross-cutting change ripple (consolidated) - -**Consolidated from**: 4 entries (2026-02-19 to 2026-03-01) - -- Path changes (e.g. key file location) ripple across 15+ doc files and 2 skills - — grep broadly (not just code) and budget for 15+ file touches -- Removing embedded asset directories requires synchronized cleanup across 5+ - layers: embed directive, accessor functions, callers, tests, config constants, - build targets, documentation — work outward from the embed -- Absorbing shell scripts into Go commands creates a discoverability gap — - update contributing.md, common-workflows.md, and CLI index as part of the - absorption checklist -- A feature without docs is invisible to users: always check feature page, - cli-reference.md, relevant recipes, and zensical.toml nav after implementing a - new CLI subcommand - ---- - -## [2026-04-03-180000] Dead code detection (consolidated) - -**Consolidated from**: 3 entries (2026-03-15 to 2026-03-30) - -- Dead packages can build and test green while being completely unreachable — - detection requires checking bootstrap registration, not just build success - (e.g. internal/cli/recall/ existed with tests but was never wired into the - command tree) -- Files created by `ctx init` that no agent, hook, or skill ever reads are dead - on arrival — verify there is at least one consumer before adding to init - scaffolding -- When touching legacy compat code, first ask whether the legacy path has real - users — if not, delete it entirely rather than improving it (MigrateKeyFile - had 5 callers and test coverage but zero users) - ---- - -## [2026-04-03-133244] desc.Text() is the single highest-connectivity symbol in the codebase - -**Context**: GitNexus enrichment during architecture analysis revealed -desc.Text() (internal/assets/read/desc/desc.go:75) has 30+ direct callers -spanning every architectural layer (MCP handler, format, index, tidy, trace, -memory, sysinfo, io) and participates in 53 execution flows. - -**Lesson**: TestDescKeyYAMLLinkage is the most critical guard in the codebase -— it protects the symbol with the widest blast radius. If YAML text loading -breaks, the entire CLI and MCP server output blank strings silently (no crash, -no warning). - -**Application**: Treat desc.Text() as a frozen API — add new functions rather -than modifying the existing signature. Any change to config/embed/text or -assets/read/desc should be followed by running the linkage audit. Monitor this -symbol during major refactors. - ---- - -## [2026-04-01-233250] Raw I/O migration unlocks downstream checks for free - -**Context**: TestNoRawPermissions had zero violations because the raw I/O -migration moved all octal literals into internal/io/ which already used -config/fs constants - -**Lesson**: Chokepoint migrations have cascading benefits — centralizing one -concern (file I/O) automatically resolves other drift (raw permissions) - -**Application**: Prioritize chokepoint migrations (io, exec, write, err) before -smaller checks that depend on them - ---- - -## [2026-04-01-233248] go/packages respects build tags — darwin-only violations invisible on Linux - -**Context**: TestNoExecOutsideExecPkg could not detect violations in _darwin.go -files when running on Linux - -**Lesson**: AST checks using go/packages only see files matching the current -GOOS. Cross-platform violations need either multi-GOOS CI or a go/parser -fallback - -**Application**: When writing audit checks for code with build tags, fix the -violations regardless (code correctness) but note that test coverage is -platform-dependent - ---- - -## [2026-04-01-074419] Copilot CLI skills need a sync mechanism to prevent drift from ctx skills - -**Context**: 5 Copilot CLI skills were condensed versions of ctx skills, -independently maintained with no drift detection - -**Lesson**: Any time the same content exists in two locations without a sync -mechanism, it will drift silently - -**Application**: make sync-copilot-skills added to build deps, make -check-copilot-skills added to audit target - ---- - -## [2026-04-01-074418] Contributor PRs based on older code reintroduce removed features - -**Context**: PR #45 brought back prompt templates, PROMPT.md, and -IMPLEMENTATION_PLAN.md that were explicitly removed in March - -**Lesson**: When resolving contributor merge conflicts, check decisions history -for intentional removals — do not assume the PR content is additive - -**Application**: Cross-reference DECISIONS.md before accepting PR content that -adds files or features - ---- - -## [2026-03-31-224247] Magic string cleanup compounds: each pass reveals the next layer - -**Context**: What started as fix 4 fmt.Fprintf(os.Stderr) calls expanded to -over-tokenized format strings, magic hex perms, unstandardized TOML parsing -tokens, missing docstrings on new constants — each fix exposed adjacent -violations - -**Lesson**: Mechanical cleanup is fractal. The first sweep finds the obvious -violations, but fixing them puts adjacent code under scrutiny. Budget for 2-3x -the initial estimate - -**Application**: When scoping cleanup tasks, do not commit to done in one pass. -Commit after each layer and let the user decide when to stop - ---- - -## [2026-03-31-182054] Force-loaded behavioral prose gets ignored — action-gating hooks don't - -**Context**: AGENT_PLAYBOOK was force-injected at ~14k tokens every session. -Agent routinely skipped its Context Readback directive when the user's first -message was a concrete task. Meanwhile, hooks that gate actions (qa-reminder, -specs-nudge, block-dangerous-commands) were consistently followed because they -fire at the moment of violation. - -**Lesson**: Prose instructions compete with the user's immediate request and -lose. Hooks that intercept actions at execution time are enforceable. More -injected content means less attention per token — slim injection to only what -must be internalized before any action. - -**Application**: When adding agent directives, prefer action-gating hooks over -injected prose. If it must be injected, keep it small and directive-only. -Reserve force-injection for hard rules (CONSTITUTION) and distilled actionable -checklists (gate file). - ---- - -## [2026-03-31-112534] Legacy key directory cleanup was specified but not automated - -**Context**: ~/.local/ctx/keys/ accumulated 584 orphan keys from test runs -before the v0.8.0 migration to ~/.ctx/.ctx.key - -**Lesson**: Migration specs that call for manual cleanup of old paths should -include an automated step — either in the migration code itself or as a -post-release cleanup task. Tests that write to global paths must isolate HOME. - -**Application**: When writing migration specs, always include automated cleanup -of the old path. When writing tests that touch user-level directories, verify -HOME is isolated via t.Setenv. - ---- - -## [2026-03-31-005112] Convention audits must check cmd/ purity, not just types and docstrings - -**Context**: Placed needsSpec helper in cmd/root/run.go instead of -core/entry/predicate.go. Missed it because the audit checklist only covered -types and docstrings - -**Lesson**: cmd/ directories must contain only Cmd() and Run*() — all helper -functions, unexported logic, and types belong in core/. Added TestCmdDirPurity -compliance test to enforce this mechanically - -**Application**: The compliance test now catches this automatically. 28 -pre-existing violations grandfathered in the allowlist - ---- - -## [2026-03-31-005110] JSON Schema default fields cause linter errors with some validators - -**Context**: ctxrc.schema.json had default: values on 16 fields that triggered -incompatible type errors in the user's linter - -**Lesson**: Move default values into the description string instead of using the -default keyword — Go rc.*() accessors handle the actual defaults - -**Application**: When adding new .ctxrc fields, document defaults in the -description, never use default: in the schema - ---- - -## [2026-03-30-075941] Architecture diagrams drift silently during feature additions - -**Context**: During the journal-recall merge, architecture-dia-build.md listed -23 CLI packages but 31 existed. 8 packages added over months without updating -the diagram. - -**Lesson**: Exhaustive lists and counts in architecture docs go stale every time -a package is added. The drift is invisible because nobody re-counts. - -**Application**: After adding a new CLI package, grep architecture diagrams for -package counts and directory listings. Consider adding a drift-check comment -that validates the count programmatically. - ---- - -## [2026-03-30-003734] Python-generated doc.go files need gofmt — formatter strips bare // padding lines - -**Context**: Batch-generated doc.go files used blank // lines for padding, which -gofmt removes as unnecessary whitespace - -**Lesson**: Programmatic Go file generation must produce substantive content -lines, not blank comment padding — gofmt enforces this - -**Application**: Always run gofmt after any scripted Go file generation - ---- - -## [2026-03-30-003707] lint-docstrings.sh greedy sed hid all return-type violations - -**Context**: sed 's/.*) //' consumed return type parens, leaving { — functions -with return types were invisible to the script for months - -**Lesson**: Greedy regex in shell scripts can silently suppress entire -categories of lint violations — test with edge cases, not just happy paths - -**Application**: When writing sed-based lint checks, test with multi-paren -signatures (func Foo() (string, error)) - ---- - -## [2026-03-25-234039] Machine-generated CLAUDE.md content consumes per-turn budget without proportional value - -**Context**: GitNexus injected 121 lines (61% of CLAUDE.md) with auto-generated -skill pointers like 'Work in the Watch area (39 symbols)' — generic index data -loaded on every conversation turn - -**Lesson**: CLAUDE.md is prime real estate — every token competes with -project-specific instructions. Auto-generated content belongs in on-demand -skills, not in always-loaded files - -**Application**: Audit CLAUDE.md periodically for content that could be -delivered via skills instead. Prefer a one-line pointer over inline content for -companion tools - ---- - -## [2026-03-25-173338] Template improvements don't propagate to existing projects - -**Context**: 5 of 8 context files in the ctx project itself had stale/missing -comment headers — templates evolved but non-destructive init never re-synced -them - -**Lesson**: Any template change is invisible to existing users until they run -ctx init --force - -**Application**: Added drift detection (checkTemplateHeaders) to ctx drift. -Consider surfacing this during ctx status too. - ---- - -## [2026-03-24-001001] lint-drift false positives from conflating constant namespaces - -**Context**: lint-drift.sh checked all string constants in embed/cmd/*.go -against commands.yaml, but Use* constants are cobra syntax strings, not YAML -lookup keys - -**Lesson**: Shell grep on constant values cannot distinguish constant types; -only DescKey* constants are YAML keys. AST-based analysis is needed for -type-aware checks - -**Application**: Already captured in specs/ast-audit-tests.md; the lint-drift -fix is shipped in v0.8.0 - ---- - -## [2026-03-24-000959] git describe --tags follows ancestry, not global tag list - -**Context**: Release notes skill diffed against v0.3.0 instead of v0.6.0 because -the release branch diverged before v0.6.0 was tagged - -**Lesson**: git describe --tags --abbrev=0 follows reachability from HEAD; use -git tag --sort=-v:refname | head -1 for the latest tag globally - -**Application**: Any script or skill that needs the latest release should use -sorted tag list, not describe - ---- - -## [2026-03-23-165611] Typography detection script needs exclusion lists for intentional uses - -**Context**: detect-ai-typography.sh flagged config/token/delim.go (intentional -delimiter constants) and test files (test data containing em-dashes) - -**Lesson**: Detection scripts for convention enforcement need exclusion patterns -for files where the flagged patterns are intentional data, not prose - -**Application**: Add exclusion patterns proactively when creating detection -scripts; *_test.go and constant-definition files are common false positive -sources - ---- - -## [2026-03-23-003544] Splitting core/ into subpackages reveals hidden structure - -**Context**: init core/ was a flat bag of domain objects — splitting into -backup/, claude/, entry/, merge/, plan/, plugin/, project/, prompt/, tpl/, -validate/ exposed duplicated logic, misplaced types, and function-pointer -smuggling that were invisible in the flat layout - -**Lesson**: Flat core/ packages hide coupling — circular dependency resolution -during splits naturally groups related items, increases cohesion, and surfaces -objects that don't belong - -**Application**: When a core/ package grows, split it into subpackages even if -it creates temporary circular deps — resolving those deps is the design work -that reveals the right structure - ---- - -## [2026-03-23-003353] Higher-order callbacks in param structs are a code smell - -**Context**: MergeParams.UpdateFn and DeployParams.ListErr/ReadErr were function -pointers where all callers passed thin wrappers varying only by a text key - -**Lesson**: If all callers pass thin wrappers around the same pattern -(fmt.Errorf with different keys), the callback is just data in disguise - -**Application**: When a struct field is a function pointer, check if all callers -vary only by a string key — if so, replace the callback with the key and let -the consumer do the dispatch - ---- - -## [2026-03-20-160112] Commit messages containing script paths trigger PreToolUse hooks - -**Context**: Git commit message body contained a path to a shell script under -the hack directory which matched a hook pattern that blocks direct script -invocation - -**Lesson**: Hooks scan all Bash tool input including heredoc content used for -commit messages, not just the command itself - -**Application**: Rephrase commit messages and ctx add content to avoid paths -that match hook deny patterns, use generic references instead of literal file -paths - ---- - -## [2026-03-18-133457] Lazy sync.Once per-accessor is a code smell for static embedded data - -**Context**: assets package had 4 sync.Once guards, 4 exported maps, 4 Load*() -functions, and a wrapper desc package — all to lazily load YAML from embed.FS -that never mutates. Every accessor call went through sync.Once + global map + -wrapper indirection. - -**Lesson**: When data is static and loaded from embedded bytes, scatter-loading -with per-accessor sync.Once is over-engineering. A single Init() called eagerly -at startup is simpler, and one sync.Once on Init() itself provides the test -safety net. Exported maps that exist only for wrapper packages to reach are a -sign the abstraction boundary is wrong. - -**Application**: Prefer eager Init() in main.go for static embedded data. Keep -maps unexported. Accessors do plain map lookups. If a wrapper package exists -solely to break a cycle caused by exported state, delete the wrapper and -unexport the state. - ---- - -## [2026-03-17-105637] Write package output census: 69 trivial/simple, 38 consolidation candidates, 18 complex - -**Context**: Full audit of internal/write/ (26 files, 160 functions, 337 Println -calls) to evaluate whether block template consolidation is worth a systematic -refactor. - -**Lesson**: Only 30% of write functions benefit from output consolidation. The -sweet spot is multi-line (16) and conditional (22) functions. - -**Application**: Check function category before consolidating. Trivial/simple -stay as-is. Conditional functions need pre-computation before block templates. -Loop-based complex functions stay imperative. Don't bulk-refactor. - ---- - -## [2026-03-16-114227] Docstring tasks require reading CONVENTIONS.md Documentation section first - -**Context**: Agent was asked to review docstrings in server.go but skipped -convention loading, missed incomplete Parameter/Returns sections, and needed -three hints to recall the known issue - -**Lesson**: Any task involving docstrings, comments, or documentation formatting -is a convention-sensitive task — read CONVENTIONS.md (Documentation section) -and LEARNINGS.md (for known gaps) before reviewing or writing - -**Application**: On any docstring/comment task: (1) load CONVENTIONS.md -Documentation section, (2) check LEARNINGS.md for related entries, (3) audit all -functions in scope against the convention template, not just the ones in the -diff - ---- - -## [2026-03-16-104146] Convention enforcement needs mechanical verification, not behavioral repetition - -**Context**: Godoc Parameters/Returns sections were missed repeatedly across -sessions despite memory entries and feedback - -**Lesson**: System-level brevity instructions outcompete context-injected -conventions. Memory shifts probability (~40% to ~70%) but doesn't create -invariants. The competing pressures are architectural, not a recall problem. - -**Application**: Invest in linter rules or PreToolUse gates for -mechanically-checkable conventions. Reserve behavioral nudges for judgment calls -that can't be linted. See ideas/spec-convention-enforcement.md for the -three-tier strategy. - ---- - -## [2026-03-16-022650] One-liner method wrappers hide dependencies without adding value - -**Context**: checkBoundary() and loadContext() were methods on Handler that just -called validation.ValidateBoundary and context.Load with h.ContextDir - -**Lesson**: If a method only passes a struct field to a stdlib function, inline -it — the wrapper obscures the real dependency - -**Application**: Before extracting a helper method, check if it just forwards a -field to another function. If so, call the function directly. - ---- - -## [2026-03-16-022642] Agents reliably introduce gofmt issues during bulk renames - -**Context**: Subagents renamed consequences->consequence across 75+ files but -left formatting errors in 12 Go files - -**Lesson**: Always run gofmt -l after agent-driven refactors before trusting the -build - -**Application**: Add gofmt -w pass as a standard step after any agent-driven -bulk edit - ---- - -## [2026-03-15-101342] Contributor PRs need post-merge follow-up commits for convention alignment - -**Context**: PR #42 (MCP v0.2) addressed bulk of review feedback but left ~12 -inline strings, no embed_test coverage, and substring matching in -containsOverlap - -**Lesson**: Merging with known gaps is fine when the gaps are mechanical, but -the follow-up must be immediate — track in ideas/done/ with a review status -doc - -**Application**: For future contributor PRs: create ideas/pr{N}-review-status.md -during review, merge when architecture is sound, fix convention gaps in a -same-day follow-up commit - ---- - -## [2026-03-15-040642] Grep for callers must cover entire working tree before deleting functions - -**Context**: Deleted 7 err/prompt functions as dead code, but callers existed in -unstaged refactoring files — caused build failures - -**Lesson**: When the working tree has unstaged changes from a prior session, -grep hits only committed+staged code; must grep the full tree or build-test -before declaring functions dead - -**Application**: Always run make build after deleting functions, even if grep -shows zero callers - ---- - -## [2026-03-14-180903] Stderr error messages are user-facing text that belongs in assets - -**Context**: Added fmt.Fprintf(os.Stderr) error reporting to event log, -initially with inline strings - -**Lesson**: Any string that reaches the user, including stderr warnings, routes -through assets.TextDesc() for i18n readiness - -**Application**: When adding stderr output, create text.yaml entries and asset -keys first - ---- - -## [2026-03-14-131202] Hardcoded _alt suffixes create implicit language favoritism - -**Context**: Session parser had session_prefix_alt hardcoding Turkish as a -special case alongside English default - -**Lesson**: Naming a constant _alt and hardcoding one non-English language as a -built-in default discriminates by giving that language special status. The -pattern doesn't scale (alt_2? alt_3?) and signals that adding languages requires -code changes. - -**Application**: When a feature needs multi-value support, use configurable -lists from the start — not hardcoded pairs with _alt suffixes. Default to a -single canonical value; all extensions are user-configured equally. - ---- - -## [2026-03-13-151952] sync-why mechanism existed but was not wired to build - -**Context**: assets/why/ had drifted from docs/ — the sync targets existed in -the Makefile but build did not depend on sync-why - -**Lesson**: Freshness checks that are not in the critical path will be -forgotten. Wire them as build prerequisites, not optional audit steps - -**Application**: Any derived or copied asset should be a prerequisite of build, -not just audit - ---- - -## [2026-03-12-133008] Project-root files vs context files are distinct categories - -**Context**: Tried moving ImplementationPlan constant to config/ctx assuming it -was a context file. (Note: IMPLEMENTATION_PLAN.md was removed in 2026-03-25 as a -dead file — no agent consumer.) - -**Lesson**: Files created by ctx init in the project root (Makefile) are -scaffolding, not context files loaded via ReadOrder. They belong in config/file, -not config/ctx - -**Application**: Before moving a file constant, check whether it is in ReadOrder -(context) or created by init (project-root) - ---- - -## [2026-03-12-133007] Constants belong in their domain package not in god objects - -**Context**: file.go held agent scoring constants, budget percentages, cooldown -durations — none related to file config - -**Lesson**: When a constant is only used by one domain (e.g. agent scoring), it -should live in that domain's config package - -**Application**: Check callers before placing constants; if all callers are in -one domain, the constant belongs there - ---- - -## [2026-03-07-221151] Always search for existing constants before adding new ones - -**Context**: Added ExtJsonl constant to config/file.go but ExtJSONL already -existed with the same value, causing a duplicate - -**Lesson**: Grep for the value (e.g. '.jsonl') across config/ before creating a -new constant — naming variations (camelCase vs ALLCAPS) make duplicates easy -to miss - -**Application**: Before adding any new constant to internal/config, search by -value not just by name - ---- - -## [2026-03-07-221148] SafeReadFile requires split base+filename paths - -**Context**: During system/core cleanup, persistence.go passed a full path to -validation.SafeReadFile which expects (baseDir, filename) separately - -**Lesson**: Use filepath.Dir(path) and filepath.Base(path) to split full paths -when adapting os.ReadFile calls to SafeReadFile - -**Application**: When converting os.ReadFile to SafeReadFile, always check -whether the existing code has a full path or separate components - ---- - -## [2026-03-06-141506] Stale directory inodes cause invisible files over SSH - -**Context**: Files created by Claude Code hooks were visible inside the VM but -not from the SSH terminal - -**Lesson**: If a directory is recreated (e.g. by auto-prune), an SSH shell -holding the old directory inode will not see new files — ls returns no such -file even though cat with the full path works from other shells - -**Application**: After ctx system prune or any state directory recreation, SSH -sessions need cd-dot or re-login to pick up the new inode - ---- - -## [2026-03-06-141504] Stats sort uses string comparison on RFC3339 timestamps with mixed timezones - -**Context**: ctx system stats showed only old sessions, hiding the current one - -**Lesson**: RFC3339 string comparison breaks when entries mix UTC (Z) and offset -(-08:00) formats — 13:00-08:00 sorts before 18:00Z lexicographically despite -being later in absolute time - -**Application**: Always parse to time.Time before comparing RFC3339 timestamps; -never rely on lexicographic sort - ---- - -## [2026-03-06-184820] Claude Code supports PreCompact and SessionStart hooks that ctx does not use - -**Context**: context-mode proves both hooks work in production across 5 -platforms - -**Lesson**: ctx's hook architecture only uses UserPromptSubmit, PreToolUse, and -PostToolUse — two lifecycle events are untapped - -**Application**: PreCompact snapshot plus SessionStart re-injection would -eliminate post-compaction disorientation without any new persistence layer since -ctx agent already generates the content - ---- - -## [2026-03-06-050125] Package-local err.go files invite broken windows from future agents - -**Context**: Found err.go files in 5 CLI packages with heavily duplicated error -constructors (errFileWrite, errMkdir, errZensicalNotFound repeated across -packages) - -**Lesson**: Centralizing errors in internal/err eliminates duplication and -prevents agents from continuing the pattern of adding local err.go files when -they see one exists - -**Application**: New error constructors go to internal/err/errors.go. No err.go -files in CLI packages. - ---- - -## [2026-03-05-205422] State directory accumulates silently without auto-prune - -**Context**: Found 234 files in .context/state/ from weeks of sessions with no -cleanup mechanism - -**Lesson**: Session tombstones are write-only. Without auto-prune, the state -directory grows unbounded. Added autoPrune(7) to context-load-gate so cleanup -happens once per session at startup. - -**Application**: Auto-prune is now wired into session start via -context-load-gate. Manual prune still available via ctx system prune for -aggressive cleanup. - ---- - -## [2026-03-05-205419] Global tombstones suppress hooks across all sessions - -**Context**: Memory drift nudge used memory-drift-nudged with no session ID in -filename - -**Lesson**: Any tombstone file intended to be session-scoped must include the -session ID in its filename, otherwise it suppresses across all concurrent and -future sessions. Use the UUID pattern so prune can clean them up. - -**Application**: Audit all tombstone files for session-scoping; fixed -memory-drift, but backup-reminded, ceremony-reminded, check-knowledge, -journal-reminded, version-checked, ctx-wrapped-up still have this bug - ---- - -## [2026-03-05-042157] Claude Code has two separate memory systems behind feature flags - -**Context**: Filesystem and behavioral analysis of Claude Code v2.1.69 - -**Lesson**: Claude Code has two separate memory systems behind feature flags. -Auto memory writes MEMORY.md to disk (user-visible, toggleable via settings). -Session memory is a separate background extraction pipeline with compaction and -team sync (push/pull model). The two systems serve different purposes and are -independently feature-flagged. - -**Application**: ctx memory bridge targets auto memory (MEMORY.md on disk). -Session memory is API-side and not directly accessible. Full findings in -ideas/claude-code-project-directory-structure.md. - ---- - -## [2026-03-05-023941] Blog post editorial feedback is higher-leverage than drafting - -**Context**: Draft of Agent Memory Is Infrastructure was publication-quality on -first pass; user editorial feedback (structural emphasis, rhetorical sharpening, -amnesia/archaeology bridge) elevated it significantly more than initial -generation - -**Lesson**: For narrative content, the first draft captures the argument; the -editorial pass captures the voice. Both are necessary but the editorial pass has -disproportionate impact on quality. - -**Application**: For future blog posts, invest more in the editorial cycle -(structural feedback then targeted refinements) rather than trying to nail voice -on first generation. - ---- - -## [2026-03-04-105239] CONSTITUTION hook compliance is non-negotiable — don't work around it - -**Context**: After make build, ran ./ctx deps --help which was blocked by -block-non-path-ctx. Instead of asking user to install, tried cp ctx ~/bin/ — -escalating workarounds. - -**Lesson**: When a hook blocks an action, the correct response is to follow the -hook's instruction (ask the user to sudo make install), not to find creative -bypasses. - -**Application**: Always ask the user to install when testing a freshly built -binary. Never attempt alternative install paths to circumvent a hook. - ---- - -## [2026-03-02-165039] Hook message registry test enforces exhaustive coverage of embedded templates - -**Context**: Adding billing.txt to embedded assets without a registry entry -caused TestRegistryCoversAllEmbeddedFiles to fail immediately - -**Lesson**: Every new .txt file under internal/assets/hooks/messages/ must have -a corresponding entry in registry.go — the test acts as an exhaustive -bidirectional check - -**Application**: When adding new hook message variants, update the registry -entry before running tests - ---- - -## [2026-03-02-123613] Existing Projects is ambiguous framing for migration notes - -**Context**: A doc admonition said Existing Projects: if you have an older key -at X, it auto-migrates. Every project is existing once installed — the framing -does not tell you how far behind you need to be. - -**Lesson**: Version-anchored framing (Key Folder Change v0.7.0+) is clearer than -relative framing (Existing Projects, Legacy). State the version boundary and the -concrete action. - -**Application**: When writing migration notes, anchor to a version number and -give copy-pasteable commands, not vague auto-handled assurances. - ---- - -## [2026-03-02-005217] Claude Code JSONL model ID does not distinguish 200k from 1M context - -**Context**: Heartbeat hook was reporting 16% usage at 162k tokens because it -assumed claude-opus-4-6 always has 1M context window - -**Lesson**: The JSONL model field is identical for both variants (both report -claude-opus-4-6). The 1M context requires a beta header, not a different model -ID. The user's model selection is stored in ~/.claude/settings.json with a [1m] -suffix when 1M is active. - -**Application**: Auto-detect context window from ~/.claude/settings.json model -field containing [1m]. Default to 200k for all Claude models. The .ctxrc -context_window setting is a no-op for Claude Code users. - ---- - -## [2026-03-01-222739] Gosec G306 flags test file WriteFile with 0644 permissions - -**Context**: New tests used os.WriteFile(..., 0o644) for temp context files; -lint flagged all three occurrences - -**Lesson**: Gosec enforces 0600 max on WriteFile even in test code. Use 0o600 -for test temp files - -**Application**: Default to 0o600 for os.WriteFile in tests; only use wider -permissions when testing permission behavior specifically - ---- - -## [2026-03-01-222738] Converting PersistentPreRun to PersistentPreRunE changes exit behavior - -**Context**: Boundary violation test used subprocess pattern because original -code called os.Exit(1) - -**Lesson**: With PersistentPreRunE, errors propagate through Cobra Execute() -return — no os.Exit call. Subprocess-based tests that expected exit codes need -converting to direct error assertions - -**Application**: When converting PreRun to PreRunE in Cobra commands, audit all -tests that relied on os.Exit behavior - ---- - -## [2026-03-01-161459] Test HOME isolation is required for user-level path functions - -**Context**: After adding ~/.ctx/.ctx.key as global key location, test suites -wrote real files to the developer home directory - -**Lesson**: Any code that uses os.UserHomeDir() needs t.Setenv(HOME, tmpDir) in -tests — especially test helpers called by many tests (like setupEncrypted and -helper) - -**Application**: When adding features that write to user-level paths (~/.ctx/, -~/.config/), always add HOME isolation to test setup functions first - ---- - -## [2026-03-01-133014] Task descriptions can be stale in reverse — implementation done but task not marked complete - -**Context**: ctx recall sync task said 'command is not registered in Cobra' but -the code was fully wired and all tests passed. The task description was stale. - -**Lesson**: Tasks can become stale in the opposite direction from docs: -implementation gets completed but the task is not updated. Always verify with -ctx --help before assuming work remains. - -**Application**: Before starting implementation on a 'code exists but not wired' -task, run the command first to check if it already works. - ---- - -## [2026-03-01-124921] Model-to-window mapping requires ordered prefix matching - -**Context**: Implementing modelContextWindow() for the three-tier context window -fallback. Claude model IDs use nested prefixes (claude-sonnet-4-5 vs -claude-sonnet-4-20250514). - -**Lesson**: A switch with ordered HasPrefix cases (most specific first) is -cleaner and safer than iterating separate prefix lists. The catch-all 'claude-*' -returns 200k for unrecognized Claude models. - -**Application**: When adding new model families to modelContextWindow() in -session_tokens.go, add the most specific prefix first to avoid shadowing shorter -prefixes. - ---- - -## [2026-03-01-095709] TASKS.md template checkbox syntax inside HTML comments is parsed by RegExTaskMultiline - -**Context**: Template had example checkboxes (- [x], - [ ]) in HTML comments -that the line-based regex matched as real tasks, causing -TestArchiveCommand_NoCompletedTasks to fail - -**Lesson**: RegExTaskMultiline is line-based and has no awareness of HTML -comment blocks — checkbox-like patterns inside comments get counted as real -tasks - -**Application**: Use backtick-quoted or indented references instead of actual -checkbox syntax in template comments. When adding examples to TASKS.md -templates, avoid patterns that match regExTaskPattern - ---- - -## [2026-03-01-092611] Hook logs had no rotation; event log already did - -**Context**: Investigated .context/logs/ and .context/state/ file management - -**Lesson**: eventlog already rotates at 1MB with one previous generation. -logMessage() in state.go was pure append-only with no size check. - -**Application**: When adding new log sinks, follow the established rotation -pattern (size-based, single previous generation) - ---- - -## [2026-02-28-184758] ctx pad import, ctx pad export, and ctx system resources make three hack scripts redundant - -**Context**: Audited hack/ scripts against ctx CLI surface - -**Lesson**: As ctx CLI grew, several hack scripts became wrappers around -built-in commands (pad-import.sh -> ctx pad import, pad-export-blobs.sh -> ctx -pad export, resource-watch.sh -> watch -n5 ctx system resources) - -**Application**: Periodically audit hack/ for scripts that ctx has absorbed - ---- - -## [2026-02-28-184647] Getting-started docs assumed Claude Code as the only agent - -**Context**: The installation section opened with 'A full ctx installation has -two parts' — binary + Claude Code plugin — leaving non-Claude-Code users -without a clear path - -**Lesson**: Installation docs should lead with the universal requirement (the -binary) and present agent-specific integration as conditional - -**Application**: When writing docs for multi-tool projects, frame the common -denominator first, then branch by tool - ---- - -## [2026-02-28-150701] Plugin reload script must rebuild cache, not just delete it - -**Context**: hack/plugin-reload.sh was deleting -~/.claude/plugins/cache/activememory-ctx/ without repopulating it. Claude Code's -installed_plugins.json still referenced the cache path, so the plugin appeared -enabled but hooks.json was missing — all plugin hooks silently stopped firing. - -**Lesson**: Claude Code snapshots plugin hooks from the cache directory at -session startup. If the cache is deleted, plugin hooks vanish silently with no -error. The reload script must rebuild the cache from source assets -(internal/assets/claude/) after clearing it, and warn that a session restart is -required. - -**Application**: Always rebuild the plugin cache in hack/plugin-reload.sh. When -debugging hooks that don't fire, check ~/.claude/plugins/cache/ first — a -missing hooks.json is the most likely cause. - ---- - -## [2026-02-27-231228] site/ directory must be committed with docs changes - -**Context**: The site/ directory contains generated HTML served directly from -the repo (no CI build step). Multiple sessions have committed docs/ changes -without the corresponding site/ output, or ignored site/ as 'generated noise'. - -**Lesson**: site/ is intentionally tracked in git — there is no GitHub Pages -workflow or CI step to build it. When docs change, the regenerated site/ HTML -must be staged and committed alongside the source. - -**Application**: Always git add site/ when committing changes under docs/. Never -gitignore site/. - ---- - -## [2026-02-27-230741] Doctor token_budget vs context_window confusion - -**Context**: ctx doctor reported context size against token_budget (8k) instead -of context_window (200k), making 22k tokens look alarming. - -**Lesson**: token_budget (ctx agent output trim target) and context_window -(model capacity) serve different purposes. Health checks about context fitting -should use context_window, with warning threshold proportional (e.g., 20% of -window). - -**Application**: Doctor now uses rc.ContextWindow() with 20% threshold and shows -per-file token breakdown for actionable insight into which files are heavy. - ---- - -## [2026-02-27-230738] Drift detector false positives on illustrative code examples - -**Context**: ctx drift flagged 23 warnings for backtick-quoted paths in -CONVENTIONS.md and ARCHITECTURE.md that were prose examples (loader.go, -session/run.go, sync.Once), not real file references. - -**Lesson**: Path reference detection should verify the top-level directory -exists on disk before flagging. Bare filenames and paths under non-existent -directories are almost always examples in documentation. - -**Application**: The fix checks os.Stat(topDir) on the first path component. -Future drift checks on documentation-heavy files should use the same heuristic. - ---- - -## [2026-02-27-002830] Context injection and compliance strategy (consolidated) - -**Consolidated from**: 3 entries (2026-02-26) - -- Verbal summaries with linked diagram files cut ARCHITECTURE.md from ~12K to - ~3.8K tokens. Extract diagrams to linked files outside FileReadOrder; keep - prose summaries inline. The 4-chars-per-token estimator is accurate — - optimize content, not the estimator. -- Soft instructions have a ~75-85% compliance ceiling because "don't apply - judgment" is itself evaluated by judgment. When 100% compliance is required, - don't instruct — inject via `additionalContext`. Reserve soft instructions - for ~80% acceptable compliance. -- Once ~7K tokens are auto-injected (fait accompli), the agent's rationalization - inverts from "skip to save effort" to "marginal cost is trivial." Front-load - highest-value content as injection, then use sunk cost to motivate on-demand - reads for the remainder. - ---- - -## [2026-02-26-003854] Webhook silence after ctxrc profile swap is the most common notify debugging red herring - -**Context**: Spent time investigating why webhooks weren't firing — checked -binary version, hook configs, notify.Send internals. Actual cause was .ctxrc -swapped to prod profile (notify commented out) earlier in session. - -**Lesson**: When webhooks stop, check .ctxrc profile first (`ctx config -status`). Also: not all tool uses trigger webhook-sending hooks — Read only -triggers context-load-gate (one-shot) and ctx agent (no webhook). qa-reminder -requires Edit matcher. - -**Application**: Before debugging notify internals, run `ctx config status` and -verify the event would actually match a hook with notify.Send. - ---- - -## [2026-02-26-100000] Documentation drift and auditing (consolidated) - -**Consolidated from**: 6 entries (2026-01-29 to 2026-02-24) - -- CLI reference docs can outpace implementation: ctx remind had no CLI, ctx - recall sync had no Cobra wiring, key file naming diverged between docs and - code. Always verify with `ctx --help` before releasing docs. -- Structural doc sections (project layouts, command tables, skill counts) drift - silently. Add `` markers above any - section that mirrors codebase structure. -- Agent sweeps for style violations are unreliable (8 found vs 48+ actual). - Always follow agent results with targeted grep and manual classification. -- ARCHITECTURE.md missed 4 core packages and 4 CLI commands. The /ctx-drift - skill catches stale paths but not missing entries — run /ctx-architecture - after adding new packages or commands. -- Documentation audits must compare against known-good examples and - pattern-match for the COMPLETE standard, not just presence of any comment. -- Dead link checking belongs in /consolidate's check list (check 12), not as a - standalone concern. When a new audit concern emerges, check if it fits an - existing audit skill first. - ---- - -## [2026-02-26-100002] Agent context loading and task routing (consolidated) - -**Consolidated from**: 5 entries (2026-01-20 to 2026-01-25) - -- `ctx agent` is optimized for task execution (filters pending tasks, surfaces - constitution, token-budget aware). Manual file reading is better for - exploratory/memory questions (session history, timestamps, completed tasks). -- On "Do you remember?" questions, immediately read .context/ files and run `ctx - journal source --limit 5`. Never ask "would you like me to check?" — that is - the obvious intent. -- .context/ is NOT a Claude Code primitive. Only CLAUDE.md and - .claude/settings.json are auto-loaded. The .context/ directory requires a hook - or explicit CLAUDE.md instruction to be discovered. -- ~~Orchestrator (IMPLEMENTATION_PLAN.md) and agent (.context/TASKS.md) task - lists must be separate.~~ (Superseded 2026-03-25: IMPLEMENTATION_PLAN.md - removed. TASKS.md is the single task source.) -- Only CLAUDE.md is auto-loaded by Claude Code. Projects using ctx should rely - on the CLAUDE.md -> AGENT_PLAYBOOK.md chain, not AGENTS.md. - ---- - -## [2026-02-26-100005] Go testing patterns (consolidated) - -**Consolidated from**: 7 entries (2026-01-19 to 2026-02-26) - -- Compiler-driven refactoring misses test files: `go build ./...` catches - production callsite breaks but not test files. Always run `go test ./...` - after signature changes. -- All runCmd() returns must be consumed in tests: even setup calls need `_, _ = - runCmd(...)` to satisfy errcheck. -- Set `color.NoColor = true` in a package-level init function to disable ANSI - codes for CLI test string assertions. -- Recall CLI tests isolate via HOME env var: `t.Setenv("HOME", tmpDir)` with - `.claude/projects/` structure gives full isolation from real session data. -- `formatDuration` accepts an interface with a Minutes method, not time.Duration - directly. Use a stubDuration struct for testing. -- CI tests need `CTX_SKIP_PATH_CHECK=1` env var because init checks if ctx is in - PATH. -- CGO must be disabled for ARM64 Linux (`CGO_ENABLED=0`) — CGO causes - cross-compilation issues with `-m64` flag. - ---- - -## [2026-02-26-100006] PATH and binary handling (consolidated) - -**Consolidated from**: 3 entries (2026-01-21 to 2026-02-17) - -- Always use `ctx` from PATH, never `./dist/ctx-linux-arm64` or `go run - ./cmd/ctx`. Check `which ctx` if unsure. -- Hooks must use PATH, not hardcoded paths. `ctx init` checks if ctx is in PATH - before proceeding. Tests can skip with `CTX_SKIP_PATH_CHECK=1`. -- Agent must never place binaries in any bin directory (not via cp, mv, or go - install). Build with `make build`, then ask the user to run the privileged - install step. Hooks in block-dangerous-commands.sh enforce this. - ---- - -## [2026-02-26-100007] Task management and exit criteria (consolidated) - -**Consolidated from**: 4 entries (2026-01-21 to 2026-02-17) - -- Specs get lost without cross-references from TASKS.md. Three-layer defense: - (1) playbook instruction, (2) spec reference in Phase header, (3) bold - breadcrumb in first task. -- Subtask completion is implementation progress, not delivery. Parent tasks - should have explicit deliverables; don't close until deliverable is verified. -- Exit criteria must include verification: integration tests (binary executes - correctly), coverage targets, and smoke tests. "All tasks checked off" does - not equal "implementation works." -- Reports graduate to ideas/done/ only after all items are tracked or resolved. - Cross-reference every item against TASKS.md and the codebase before moving. - ---- - -## [2026-02-26-100008] Agent behavioral patterns (consolidated) - -**Consolidated from**: 5 entries (2026-01-25 to 2026-02-22) - -- Interaction pattern capture risks softening agent rigor. Do not build implicit - user-modeling from session history. Rely on explicit, human-reviewed context - (learnings, conventions, hooks) for behavioral shaping. -- Chain-of-thought prompting improves agent reasoning accuracy (17.7% to 78.7%). - Added "Reason Before Acting" to AGENT_PLAYBOOK.md and reasoning nudges to 7 - skills. -- Say "project conventions" not "idiomatic X" to ensure Claude looks at project - files first rather than triggering training priors (stdlib conventions). -- Autonomous "YOLO mode" is effective for feature velocity but accumulates - technical debt (magic strings, monolithic tests, hardcoded paths). Schedule - periodic consolidation sessions. -- Trust the binary output over source code analysis. A single ambiguous CLI - output is not proof of absence — re-run the exact command before claiming - something is missing. - ---- - -## [2026-02-26-100009] Hook compliance and output routing (consolidated) - -**Consolidated from**: 3 entries (2026-02-22 to 2026-02-25) - -- Plain-text hook output is silently ignored by the agent. Claude Code parses - hook stdout starting with `{` as JSON directives; plain text is disposable. - All hooks should return JSON via `printHookContext()`. -- Hook compliance degrades on narrow mid-session tasks (~15-25% partial skip - rate). Root cause: CLAUDE.md's "may or may not be relevant" system reminder - competes with hook authority. Fix: CLAUDE.md explicitly elevates hook - authority. The mandatory checkpoint relay block is the compliance canary. -- No reliable agent-side before-session-end event exists. SessionEnd fires after - the agent is gone. Mid-session nudges and explicit /ctx-wrap-up are the only - reliable persistence mechanisms. - ---- - -## [2026-02-26-100010] ctx add and decision recording (consolidated) - -**Consolidated from**: 4 entries (2026-01-27 to 2026-02-14) - -- `ctx add learning` requires `--context`, `--lesson`, `--application` flags. - `ctx add decision` requires `--context`, `--rationale`, `--consequence`. A - bare string only sets the title and the command will fail without required - flags. -- Structured entries with Context/Lesson/Application are more useful than - one-liners. Agents are guided via AGENT_PLAYBOOK.md. -- Always complete decision record sections — placeholder text like "[Add - context here]" is a code smell. Decisions without rationale lose their value - over time. -- Slash commands using `!` bash syntax require matching permissions in - settings.local.json. When adding new /ctx-* commands, ensure ctx init - pre-seeds the required `Bash(ctx :*)` permissions. - ---- - -## [2026-02-24-032945] CLI tools don't benefit from in-memory caching of context files - -**Context**: Discussed whether ctx should read and cache LEARNINGS.md, -DECISIONS.md etc. in memory - -**Lesson**: ctx is a short-lived CLI process, not a daemon. Context files are -tiny (few KB), sub-millisecond to read. Cache invalidation complexity exceeds -the read cost. Caching only makes sense if ctx becomes a long-lived process (MCP -server, watch daemon). - -**Application**: Don't add caching layers to ctx's file reads. If an MCP server -mode is ever added, revisit then. - ---- - -## [2026-02-22-120000] Hook behavior and patterns (consolidated) - -**Consolidated from**: 8 entries (2026-01-25 to 2026-02-17) - -- Hook scripts receive JSON via stdin (not env vars); parse with - `HOOK_INPUT=$(cat)` then jq -- Hook key names are case-sensitive: `PreToolUse` and `SessionEnd` (not - `PreToolUseHooks`) -- Use `$CLAUDE_PROJECT_DIR` in hook paths, never hardcode absolute paths -- Hook regex can overfit: `ctx` as binary vs directory name differ; anchor - patterns to command-start positions with `(^|;|&&|\|\|)\s*` -- grep patterns match inside quoted arguments — test with `ctx add learning - "...blocked words..."` to verify no false positives -- Hook scripts can silently lose execute permission; verify with `ls -la - .claude/hooks/*.sh` after edits -- Two-tier output is sufficient: unprefixed (agent context, may or may not - relay) and `IMPORTANT: Relay VERBATIM` (guaranteed relay); don't add new - severity prefixes -- Repeated injection causes agent repetition fatigue; use `--session $PPID - --cooldown 10m` and pair with a readback instruction - ---- - -## [2026-02-22-120001] UserPromptSubmit hook output channels (consolidated) - -**Consolidated from**: 2 entries (2026-02-12) - -- UserPromptSubmit hook stdout is prepended as AI context (not shown to user); - stderr with exit 0 is swallowed entirely -- User-visible output requires `{"systemMessage": "..."}` JSON on stdout - (warning banner) or exit 2 (blocks prompt) -- There is no non-blocking user-visible output channel for this hook type -- Design hooks for their actual audience: AI-facing = plain stdout, user-facing - = systemMessage JSON - ---- - -## [2026-02-22-120002] Linting and static analysis (consolidated) - -**Consolidated from**: 7 entries (2026-01-25 to 2026-02-20) - -- Full pre-commit gate: (1) `CGO_ENABLED=0 go build ./cmd/ctx`, (2) - `golangci-lint run`, (3) `CGO_ENABLED=0 go test` — all three, every time -- Own the codebase: fix pre-existing lint issues even if you didn't introduce - them -- gosec G301/G306: use 0o750 for dirs, 0o600 for files everywhere including - tests -- gosec G304 (file inclusion): safe to suppress with `//nolint:gosec` in test - files using `t.TempDir()` paths -- golangci-lint errcheck: use `cmd.Printf`/`cmd.Println` in Cobra commands - instead of `fmt.Fprintf` -- `defer os.Chdir(x)` fails errcheck; use `defer func() { _ = os.Chdir(x) }()` -- golangci-lint Go version mismatch in CI: use `install-mode: goinstall` to - build linter from source - ---- - -## [2026-02-22-120006] Permission and settings drift (consolidated) - -**Consolidated from**: 4 entries (2026-02-15) - -- Permission drift is distinct from code drift — settings.local.json is - gitignored, no review catches stale entries -- `Skill()` permissions don't support name prefix globs — list each skill - individually -- Wildcard trusted binaries (`Bash(ctx:*)`, `Bash(make:*)`), but keep git - commands granular (never `Bash(git:*)`) -- settings.local.json accumulates session debris; run periodic hygiene via - `/sanitize-permissions` and `/ctx-drift` - ---- - -## [2026-02-22-120008] Gitignore and filesystem hygiene (consolidated) - -**Consolidated from**: 3 entries (2026-02-11 to 2026-02-15) - -- Gitignored directories are invisible to `git status`; stale artifacts persist - indefinitely — periodically `ls` gitignored working directories -- Add editor artifacts (*.swp, *.swo, *~) to .gitignore alongside IDE - directories from day one -- Gitignore entries for sensitive paths are security controls, not documentation - — never remove during cleanup sweeps - ---- - -## [2026-01-28-051426] IDE is already the UI - -**Context**: Considering whether to build custom UI for .context/ files - -**Lesson**: Discovery, search, and editing of .context/ markdown files works -better in VS Code/IDE than any custom UI we'd build. Full-text search, -git integration, extensions - all free. - -**Application**: Don't reinvent the editor. Let users use their preferred IDE. - ---- - - -*Module-specific, niche, and historical learnings: -[learnings-reference.md](learnings-reference.md)* + diff --git a/.context/TASKS.md b/.context/TASKS.md index 88b8237a7..77fcbbc98 100644 --- a/.context/TASKS.md +++ b/.context/TASKS.md @@ -25,2114 +25,12 @@ TASK STATUS LABELS: `#in-progress`: currently being worked on (add inline, don't move task) --> -### Misc +### Phase 1: [Name] `#priority:high` +- [ ] Task 1 +- [ ] Task 2 -- [x] If context is not initialized, hooks should not run. Right now they run - and give a "context diretory outside project root" (that's a side effect). But - the issue is the project does not have a .context folder and we don't detect - it. **Progress 2026-04-13**: Boundary side effect resolved by git-anchored - walk - (commit e24941d2). `state.Initialized()` guards added to `check_resource` and - `check_backup_age` — the two user-visible relay-nag hooks that were missing - them. **Completed 2026-04-16**: `state.Initialized()` guards added to - `mark_journal`, `mark_wrapped_up`, `pause`, `resume`. `bootstrap` left - unguarded intentionally — its job is to report context dir status, and it - already has its own `os.Stat` guard. Safety hooks - (`block_dangerous_command`, `block_non_path_ctx`) intentionally run - regardless. +### Phase 2: [Name] `#priority:medium` +- [ ] Task 1 +- [ ] Task 2 -- [x] Move `ctx bootstrap` back to `ctx system bootstrap` (hidden). Bootstrap is - agent-only plumbing — no human types it interactively. It was incorrectly - promoted to top-level in the namespace cleanup. Move the package back to - `internal/cli/system/cmd/bootstrap/`, restore - `UseSystemBootstrap`/`DescKeySystemBootstrap` constants, re-add `Hidden: - true`, update CLAUDE.md templates and skills back to `ctx system bootstrap`, - remove from `docs/cli/bootstrap.md` and `docs/cli/index.md` Diagnostics group, - remove from `zensical.toml` nav. Spec: specs/cli-namespace-cleanup.md - #priority:high #added:2026-04-11 #done:2026-04-12 - -- [x] Rename `ctx stats` to `ctx usage`. "Stats for what?" — the current name - lost its anchor when promoted from `ctx system stats`. `ctx usage` - communicates intent: "show me my token usage." `ctx session stats` was - considered but rejected as premature — a parent with one child is worse than - a flat command. Revisit when `ctx session` has 2+ children. Spec: - specs/cli-namespace-cleanup.md #priority:medium #added:2026-04-11 - #done:2026-04-12 - -- [x] Rename `ctx resource` to `ctx sysinfo`. Without the `system` prefix, - "resource" sounds like it manages project resources (files, assets, - infrastructure). It's actually a system-health snapshot: memory, swap, disk, - CPU load. `sysinfo` matches the internal package name (`internal/sysinfo`) and - is unambiguous. `health` was considered but rejected — too similar to `ctx - doctor` and `ctx doctor health` reads wrong. Same rename pattern. Spec: - specs/cli-namespace-cleanup.md #priority:medium #added:2026-04-11 - #done:2026-04-12 - -- [x] Remove `ctx dep`. Utility is marginal: agents rarely need a flat - dependency inventory to make decisions, and `go list -m all` / `npm ls` - already cover the use case. Doesn't clear the "would I miss it if it - vanished?" bar. Removed command, all support packages, docs, and recipes. - #priority:low #added:2026-04-11 #done:2026-04-12 - -- [x] Introduce `ctx hook` parent command — consolidate hook-related - user-facing commands under a single namespace: `ctx hook message - list/show/edit/reset` (currently `ctx message`), `ctx hook notify` (currently - `ctx notify`), `ctx hook pause` / `ctx hook resume` (currently top-level `ctx - pause` / `ctx resume`). "What are we pausing?" — hooks. The current - top-level `pause` loses that context. Clarifies the `ctx trigger` vs `ctx - hook` distinction: `trigger` = user-authored scripts, `hook` = plugin-shipped - machinery + its user-facing controls. Future children: `ctx hook status` - (which hooks fired recently), `ctx hook test` (dry-run), `ctx hook event` - (currently `ctx event`). Same rename pattern as previous namespace cleanups. - Spec: specs/cli-namespace-cleanup.md #priority:medium #added:2026-04-11 - #done:2026-04-12 - -### Agents - -- [-] Add `ctx explore` command — scaffolds `.arch-explorer/` in a workspace - directory with manifest.json, PROMPT.md (from - `hack/agents/architecture-explorer.md`), run-log.md, and a README. Similar to - `ctx init` but for multi-repo architecture exploration. The prompt template - lives in `hack/agents/architecture-explorer.md` and ships embedded. - #priority:low #added:2026-04-13 - **Skipped 2026-04-16**: Superseded by - `docs/operations/runbooks/architecture-exploration.md`. A runbook is the right - weight — a CLI scaffolding command was speculative abstraction for a - workflow - that's better served by a discoverable doc with an embedded prompt. - -### Runbooks - -- [x] Create `docs/operations/runbooks/release-checklist.md` — canonical - pre-release - sequence: run codebase-audit, docs-semantic-audit, sanitize-permissions, `make - test`, bump version, generate release notes, tag, push. Today this lives in - the operator's head + scattered across docs/operations/. Cross-link with - `_ctx-release` skill. #priority:high #added:2026-04-11 - **Completed 2026-04-16**: Created at - `docs/operations/runbooks/release-checklist.md`. - All runbooks moved from `hack/runbooks/` to `docs/operations/runbooks/` for - discoverability on ctx.ist. - -- [x] Create `docs/operations/runbooks/breaking-migration.md` — template for - users - upgrading across breaking CLI renames. What commands changed, how to - regenerate CLAUDE.md (`ctx init --force`), how to update personal scripts and - hook configs. One instance per breaking release, or a generic template with a - per-release appendix. #priority:medium #added:2026-04-11 - **Completed 2026-04-16**: Created at - `docs/operations/runbooks/breaking-migration.md`. - -- [x] Create `docs/operations/runbooks/hub-deployment.md` — linear runbook for - setting up - a ctx Hub for a team: generate admin token, distribute, register clients, - verify sync, configure TLS (when H-01/H-02 land). Consolidates pieces - currently scattered across hub recipes. #priority:medium #added:2026-04-11 - **Completed 2026-04-16**: Created at - `docs/operations/runbooks/hub-deployment.md`. - -- [x] Create `docs/operations/runbooks/new-contributor.md` — onboarding - sequence: clone - → `ctx init` → `make build && sudo make install` → verify hooks (`claude - mcp list`) → run first session → verify context persistence. Currently - scattered across README, contributing.md, and setup docs. #priority:medium - #added:2026-04-11 - **Completed 2026-04-16**: Created at - `docs/operations/runbooks/new-contributor.md`. - -- [x] Create `docs/operations/runbooks/plugin-release.md` — plugin-specific - release - procedure: update hooks.json, bump version, test against fresh Claude Code - install, publish to marketplace, verify `claude mcp list` shows updated - version. Not covered by the general release checklist. #priority:low - #added:2026-04-11 - **Completed 2026-04-16**: Created at - `docs/operations/runbooks/plugin-release.md`. - -### Misc - -- [ ] Human: Read the entire documentation page-by-page, line-by-line, with a - critical mind, including blog posts. Take notes for agent to rectify, or - directly update the docs whenever it makes sense. - -- [ ] Human: Do a documentation audit for AI-generated artifacts. #important - #not-urgent - -- [ ] Human: test `ctx init` on a fresh ubuntu install. - -- [ ] Improve hub failover client: distinguish auth errors - (Unauthenticated/PermissionDenied) from connection errors. Fail fast on auth - failures instead of cycling through all peers with the same invalid token. - #priority:low #added:2026-04-08-194612 - -- [ ] Add file locking to ctx connect sync state to prevent concurrent sync - races. Two sync processes (hook + manual) can both load the same LastSequence, - process the same entries, and write duplicate content to .context/shared/. - #priority:medium #added:2026-04-08-194557 - -- [ ] Fix fanout broadcast entry loss: non-blocking send drops entries to slow - listeners silently. Log when entries are dropped. Consider per-listener - backpressure or disconnect-on-lag. Buffer of 64 is too small for busy hubs. - #priority:medium #added:2026-04-08-194542 - -- [ ] Prevent duplicate client registration in hub store: RegisterClient should - reject if ProjectName already exists. Add token revocation support (delete - client by ID/project). Currently tokens are valid forever with no way to - disable compromised ones. #priority:medium #added:2026-04-08-194529 - -- [ ] Fix hub cluster: NewCluster result is discarded (not stored on Server), so - Raft runs but leadership status is never queryable. Store cluster reference on - Server, wire IsLeader/LeaderAddr into Status RPC and hub status command. - #priority:medium #added:2026-04-08-194511 - -- [ ] Use crypto/subtle.ConstantTimeCompare for hub token validation instead of - string equality. Current Store.ValidateToken uses == which is vulnerable to - timing attacks. Also replace O(n) linear scan with a map[string]*ClientInfo - for O(1) lookup. #priority:high #added:2026-04-08-194458 - -- [ ] Fix silent error suppression in hub: (1) ctx add --share silently ignores - publish failures — warn user on failure, (2) hubsync hook swallows all - errors — log to event system, (3) replication loop drops errors silently — - add structured logging for debug. #priority:high #added:2026-04-08-194443 - -- [ ] Add input validation to hub Publish handler: reject empty ID, validate - Type against allowed set (decision/learning/convention/task), enforce Content - length limit (1MB), require non-empty Origin. Prevents garbage data and DoS - via unbounded content. #priority:high #added:2026-04-08-194430 - -- [ ] Fix ctx connect listen: currently only does initial sync then blocks on - ctx.Done() without ever calling the Listen RPC. Must stream entries in - real-time via the server-streaming Listen RPC, writing to .context/shared/ as - entries arrive. #priority:high #added:2026-04-08-194415 - -- [x] Remove any superpowers library references and implement all needed - workflow mechanisms (brainstorm, plan, execute, review, subagent dispatch) - natively in ctx. No external plugin libraries should be used — ctx must be - self-contained. Clean up docs/superpowers/ directory and any remaining - references. #priority:high #added:2026-04-06-121002 #done:2026-04-06 - -- [ ] Deprecate and remove `ctx backup`: hub handles cross-machine persistence, - backup is environment-specific (SMB/GVFS/rsync), and it is the wrong layer - for ctx to own. Replace with a backup-strategy runbook. About 60 files to - remove across CLI, config, hooks, docs, skills. Implementation order: runbook - first, then hook removal, then command removal, then docs cleanup. - Spec: specs/deprecate-ctx-backup.md #priority:medium - #added:2026-04-04-010000 #updated:2026-04-16 - -### Architecture Docs - -- [ ] Publish architecture docs to docs/: copy ARCHITECTURE.md, - DETAILED_DESIGN domain files, and CHEAT-SHEETS.md to docs/reference/. - Sanitize intervention points into docs/contributing/. - Exclude DANGER-ZONES.md and ARCHITECTURE-PRINCIPAL.md (internal only). - Spec: specs/publish-architecture-docs.md #priority:medium - #added:2026-04-03-150000 - -- [ ] Update ctx-architecture skill to append discovered terms to GLOSSARY.md - during Phase 3. Additive only, max 10 terms per run, project-specific only, - alphabetical insertion, skip if GLOSSARY.md empty. Print added terms in - convergence report. Spec: specs/publish-architecture-docs.md #priority:low - #added:2026-04-03-153000 - -### Code Cleanup Findings - - -- [x] Extend flagbind helpers (IntFlag, DurationFlag, DurationFlagP, StringP, - BoolP) and migrate ~50 call sites to unblock TestNoFlagBindOutsideFlagbind - #added:2026-04-01-233250 - -- [ ] Implement journal compaction: Elastic-style tiered storage with tar.gz - backup. Spec: specs/journal-compact.md #added:2026-03-31-110005 - -- [x] Refactor 28 grandfathered cmd/ purity violations found by - TestCmdDirPurity: move unexported helpers, exported non-Cmd/Run functions, - and types from cmd/ directories to core/. See grandfathered map in - compliance_test.go for the full list. #priority:medium - #added:2026-03-31-005115 - - -- [x] PD.4.5: Update AGENT_PLAYBOOK.md — add generic "check available skills" - instruction #priority:medium #added:2026-03-25-203340 - -**PD.5 — Validate:** - - -### Phase -3: DevEx - -- [x] Plugin enablement gap: Ref: - `ideas/plugin-enablement-gap.md`. Local-installed plugins get - registered in `installed_plugins.json` but not auto-added to - `enabledPlugins`, so slash commands are invisible in non-ctx - projects. - -- [x] Add cobra Example fields to CLI commands via - examples.yaml #added:2026-03-20-163413 - -- [x] Add CLI YAML drift detection test: verify flag names in - examples.yaml match actual registered flags, and Use: patterns - in commands.yaml match Use constants. Structural linkage is - already tested; this covers content-level drift. Semantic - accuracy (does the description match behavior?) needs periodic - LLM audit — not automatable. #priority:medium #added:2026-04-05 - -- [-] Create ctx-docstrings skill: audit and fix docstrings - against CONVENTIONS.md Documentation section. Superseded by - TestDocCommentStructure compliance test (68 grandfathered). - #added:2026-03-20-163413 - #added:2026-03-16-114445 - -### Phase -2: Task completion nudge: - -- [x] Move 6 grandfathered cross-package MCP types to entity/ #session:cc97cb0d - #branch:main #commit:e8d5c60a #added:2026-04-08-074620 - -- [ ] Design UserPromptSubmit hook that runs `make audit` at - session start and surfaces failures as a consolidation-debt - warning before the agent acts on stale assumptions. - Project-level hook (not bundled in ctx), configurable via - .ctxrc or settings.json. Related: consolidation nudge hook - spec. #added:2026-03-23-223500 - -- [ ] Design UserPromptSubmit hook that runs go build and - surfaces compilation errors before the agent acts on stale - assumptions #added:2026-03-23-120136 - -- [ ] Architecture Mapping (Enrichment): - **Context**: Skill that incrementally builds and maintains - ARCHITECTURE.md and DETAILED_DESIGN.md. Coverage tracked in - map-tracking.json. Spec: `specs/ctx-architecture.md` - - [x] Create ctx-architecture-enrich skill: takes existing - /ctx-architecture principal-mode artifacts as baseline, runs - comprehensive enrichment pass via GitNexus MCP (blast radius - verification, registration site discovery, execution flow - tracing, domain clustering comparison, shallow module - deep-dive). Spec: `ideas/spec-architecture-enrich.md`. - Reference implementation: kubernetes-service enrichment pass - 2026-03-25. #added:2026-03-25-120000 - -- [ ]: ctx-architecture-failure-analysis - **Context**: Adversarial analysis skill that identifies where - a codebase will silently betray you. Requires - `ctx-architecture` artifacts as input (ARCHITECTURE.md, - DETAILED_DESIGN*.md, map-tracking.json). Does its own - targeted deep reads focusing on mutation points, shared - mutable state, error swallowing, concurrency, implicit - ordering, missing enforcement, and scaling cliffs. Uses - available tooling (GitNexus, Gemini Search) to - cross-reference patterns. - - Produces `DANGER-ZONES.md` — a ranked inventory of silent - failure points with: location, failure mode, blast radius, - detection gap, and suggested fix. Two tiers: "most likely to - cause production incidents" and "less likely but equally - dangerous." - - Distinct from a security threat model (which would be - `ctx-threat-model` — a separate skill for auth bypass, - injection, privilege escalation, supply chain). This skill - focuses on correctness: race conditions, ordering - assumptions, cache staleness, fan-out amplification, - non-atomic ownership, inverted logic, force-delete orphans, - global state mutation. - - - [x] Design SKILL.md for ctx-architecture-failure-analysis: - inputs (architecture artifacts), analysis phases, output - format (DANGER-ZONES.md), quality checklist - #added:2026-03-25-060000 - - [x] Define the adversarial analysis framework: categories - of silent failure (concurrency, ordering, cache, - amplification, ownership, error swallowing, global state) - with heuristics for each #added:2026-03-25-060000 - - [x] Implement skill with GitNexus integration: use impact - analysis for blast radius estimation, use context for - shared-state detection #added:2026-03-25-060000 - - [x] Add Gemini Search integration: cross-reference - discovered patterns against known failure modes in similar - systems. #added:2026-03-25-060000 - -- [-] ctx-architecture-extend - Skipped: extension point analysis is covered by /ctx-architecture - DETAILED_DESIGN (per-module) and /ctx-architecture-enrich - (registration sites). A fourth skill fragments the pipeline - without enough distinct value. Three is the right number: - map, enrich, hunt. - **Context**: Companion to `ctx-architecture` and - `ctx-failure-analysis`, completing a trilogy: how does it - work → where will it break → where does it grow. Reads - architecture artifacts → identifies registration patterns - (interfaces, factory functions, plugin systems, ordered - slices, scheme registrations) → traces recent additions via - git log to confirm which extension points are actually used - → produces `EXTENSION-POINTS.md` ranked by frequency, with - exact file locations, function signatures, and the typical - feature pattern (e.g., "most features require a variable + - a mutator + a machine-agent task"). - - Valuable for onboarding ("I need to add feature X, where do - I start?") and architecture review ("are we adding features - in the right places?"). - - - [-] Design SKILL.md for ctx-extension-map - Skipped: parent task skipped. - #added:2026-03-25-062000 - - [-] Define extension point detection heuristics - Skipped: parent task skipped. - #added:2026-03-25-062000 - - [-] Add git log frequency analysis - Skipped: parent task skipped. - #added:2026-03-25-062000 - - [-] Integrate with GitNexus for registration sites - Skipped: parent task skipped. - #added:2026-03-25-062000 - -### Phase CT: Companion Tool Integration - -Session-start checks, suppressibility, and registry for companion MCP tools. - -- [ ] ctx-remember preflight: verify ctx binary in PATH, - plugin installed and enabled, binary version matches plugin - version #priority:medium #added:2026-03-25-234514 - -- [ ] Design suppressible companion check system: .ctxrc - configures which companion tools to check (one search MCP, - one graph MCP), smoke tests only run for configured tools, - not auto-discovered. Keeps bootstrap fast and predictable. - #priority:medium #added:2026-03-25-234516 - -- [ ] Add per-tool suppression for ctx-remember checks: allow - suppressing individual preflight checks (ctx binary, plugin, - search MCP, graph MCP) via .ctxrc fields, not just - companion_check: false blanket toggle - #priority:low #added:2026-03-25-234518 - -### Phase CLI-FIX: CLI Infrastructure Fixes - -- [x] Bug: ctx add task appends to the last Phase section instead of a dedicated - location. Tasks added via CLI land inside whatever Phase happens to be last in - TASKS.md, breaking Phase structure. Fix: add mandatory --section flag to ctx - add - task. If the named section does not exist, create it. If --section is - omitted, error with message. Heading level fixed from ## to ### to match - TASKS.md structure. - #priority:high #added:2026-03-25-234813 #done:2026-04-06 - -### Phase BLOG: Blog Posts - -- [ ] Write blog post about architecture analysis + enrichment two-pass design - after dogfooding run on ctx itself. Cover: the 5.2x depth observation, - constraint-as-feature principle, watermelon-rind anti-pattern, and results - from the ctx self-analysis. #priority:medium #added:2026-03-25-233650 - -- [ ] Blog post: "Writing a CONSTITUTION for your AI agent" — showcase ctx's - CONSTITUTION.md as a pattern for hard invariants that agents cannot violate. - Cover: why advisory rules fail (agents game qualifiers), what belongs in a - constitution vs conventions, the spec-at-commit enforcement story from this - session, examples of good rules (absolute, binary, no interpretation needed). - Include a recipe for writing your own. - #priority:medium #added:2026-03-27-115500 - -- [ ] Recipe: "How to write a good CONSTITUTION.md" — practical guide with - categories (security, quality, process, structure), anti-patterns (vague - qualifiers, unenforced rules), enforcement mechanisms (hooks, commit gates), - and a starter template. #priority:medium #added:2026-03-27-115500 - -- [ ] Import grouping compliance test: parse all .go files, verify imports - follow stdlib — external — ctx three-group ordering. Add to - internal/compliance/. Catches violations that goimports misses (it merges - external and ctx into one group). #priority:medium #added:2026-03-27-120000 - -- [ ] drift check should notify if claude permissions have insecure stuff in it. - -- [ ] task: sync workspace to ARI_INBOX - -### Phase -1: Hack Script Absorption - -Absorb remaining `hack/` scripts into Go subcommands. Eliminates shell -dependencies, improves portability, and makes the skill layer call `ctx` -directly instead of `make` targets. - -### Phase 0.9: Suppress Nudges After Wrap-Up - -Spec: `specs/suppress-nudges-after-wrap-up.md`. Read the spec before starting -any P0.9 task. - -**Phase 3 — Skill integration:** - -- [-] P0.9.2: Split cli-reference.md — moved to Future - #added:2026-02-24-204208 - -- [-] P0.9.3: Investigate proactive content suggestions — moved to Future - #added:2026-02-24-185754 - -### Phase 0.8: RSS/Atom Feed Generation (`ctx site feed`) - -Spec: `specs/rss-feed.md`. Read the spec before starting any P0.8 task. - -### Phase 0.4: Hook Message Templates - -Spec: `specs/future-complete/hook-message-templates.md`. Read the spec before -starting any P0.4 task. - -**Phase 2 — Discoverability + documentation:** - -Spec: `specs/future-complete/hook-message-customization.md`. - -- [ ] Migrate hook message templates from .txt files to YAML - localization #added:2026-03-20-163801 - -### Phase 0.4.9: Injection Oversize Nudge - -Spec: `specs/injection-oversize-nudge.md`. Read the spec before starting -any P0.4.9 task. - -### Phase 0.4.10: Context Window Token Usage - -Spec: `specs/context-window-usage.md`. Read the spec before starting any -P0.4.10 task. - -### Phase 0.5 Cleanup - -* Human: internal/recall/parser requires a serious refactoring; for example - the parser object and its private and public methods need to go to its own - package and other helper functions need to go to a different adjacent package. -* Human: internal/notify/notify.go requires refactoring (all functions bagged in - one file; types need to go to types.go per convention etc etc) -* Human: split err package into sub packages. - - -- [ ] Refactor site/cmd/feed: extract helpers and types to core/, make Run - public #added:2026-03-21-074859 - -- [ ] Add Use* constants for all cobra subcommand Use - strings #added:2026-03-20-184639 - -- [ ] Systematic audit: extract all magic flag name strings across CLI commands - into config/flag constants #added:2026-03-20-175155 - -- [-] Move generic string helpers from cli/add/core/strings.go to - internal/format — file no longer exists, helpers already moved or deleted - #added:2026-03-20-175046 - -- [ ] Add missing flag name constants (priority, section, file) and priority - level constants (high, medium, low) to config/flag #added:2026-03-20-170842 - -### Phase 0: Ideas - -**User-Facing Documentation** (from `ideas/done/REPORT-7-documentation.md`): -Docs are feature-organized, not problem-organized. Key structural improvements: - -**Agent Team Strategies** (from `ideas/REPORT-8-agent-teams.md`): -8 team compositions proposed. Reference material, not tasks. Key takeaways: - -- [ ] Scan all config/**/* constants and catalog which ones should be ctxrc - entries for user configurability #priority:medium #added:2026-03-22-095552 - -- [ ] Update user-facing documentation for changed CLI flag - shorthands #added:2026-03-21-102755 - -- [ ] Add Unicode-aware slugification for non-ASCII - content #added:2026-03-21-070953 - -- [ ] Make TitleSlugMaxLen configurable via .ctxrc #added:2026-03-21-070944 - -- [ ] Spec and implement CRLF-to-LF newline normalization for journal and - context files #added:2026-03-20-224845 - -- [ ] Test ctx on Windows — validate build, init, agent, drift, journal - pipeline #added:2026-03-20-224835 - -- [ ] Evaluate Windows support for sysinfo.Collect and path - handling #added:2026-03-20-194930 - -- [ ] Make doctor thresholds configurable via .ctxrc #added:2026-03-20-194923 - -- [ ] Evaluate cross-platform path handling in change/core/scan.go — git - always - uses "/" but UniqueTopDirs should consider filepath.ToSlash for Windows - robustness #added:2026-03-20-182103 - -- [ ] Replace English-only Pluralize helper in change/core/detect.go with - i18n-safe approach #added:2026-03-20-180502 - -- [ ] Replace ASCII-only alnum check in agent/core/score.go with - unicode.IsLetter/IsDigit #added:2026-03-20-175943 - -### Phase S-0: Memory Bridge Groundwork - -Prerequisites that unblocked the memory bridge phases. - - -### Phase MB: Memory Bridge Foundation (`ctx memory`) - -Spec: `specs/memory-bridge.md`. Read the spec before starting any MB task. - -Bridge Claude Code's auto memory (MEMORY.md) into `.context/` with discovery, -mirroring, and drift detection. Foundation for future import/publish phases. - -### Phase MI: Memory Import Pipeline (`ctx memory import`) - -Spec: `specs/memory-import.md`. Read the spec before starting any MI task. - -Import entries from Claude Code's MEMORY.md into structured `.context/` files -using heuristic classification. Builds on Phase MB foundation (discover, -mirror, state). - -- [-] MI.future: `--interactive` mode for agent-assisted classification — - skipped: `--dry-run` covers review; agents can use `ctx add` directly for - overrides; interactive CLI prompts don't compose with agent workflows - -### Phase S-3: Blog Post — "Agent Memory is Infrastructure" - -Spec: `specs/blog-agent-memory-infrastructure.md`. - - -### Phase MP: Memory Publish (`ctx memory publish`) - -Spec: `specs/memory-publish.md`. Read the spec before starting any MP task. - -Push curated context from `.context/` into Claude Code's MEMORY.md so the agent -sees structured project context on session start without needing hooks. - -### Phase 9: Context Consolidation Skill `#priority:medium` - -**Context**: `/ctx-consolidate` skill that groups overlapping entries by keyword -similarity and merges them with user approval. Originals archived, not deleted. -Spec: `specs/context-consolidation.md` -Ref: https://github.com/ActiveMemory/ctx/issues/19 (Phase 3) - -- [ ] Implement consolidation nudge hook: count sessions since last - consolidation, nudge after 6. Spec: - `specs/consolidation-nudge-hook.md` #added:2026-03-23-223000 - -- [ ] Auto-record consolidation baseline commit: `/ctx-consolidate` and `ctx - system mark-consolidation` should stamp HEAD hash + date into - `.context/state/consolidation.json` only on first invocation (write-once until - reset). Subsequent consolidation sessions preserve the original baseline. The - baseline resets only when the consolidation nudge counter resets (i.e., when a - new feature cycle begins). This way multi-pass consolidation keeps the true - starting point. Related: - `specs/consolidation-nudge-hook.md` #added:2026-03-23-224000 - -### Phase EM: Extension Map Skill (`/ctx-extension-map`) - -question: is this done; or needs planning? - -### Phase WC: Write Consolidation - -Baseline commit: `4ec5999` (Auto-prune state directory on session start). -Goal: consolidate user-facing messages into `internal/write/` as the central -output package. All CLI commands should route printed output through -this package. - -- [ ] Migrate moc.go hardcoded strings to YAML or Go - templates #added:2026-03-20-214922 - -- [ ] Design terminal-aware truncation for CLI output #added:2026-03-20-184509 - -### Phase SP: Configurable Session Prefixes - -Spec: `specs/session-prefixes.md`. Read the spec before starting any SP task. - -Replace hardcoded `session_prefix` / `session_prefix_alt` pair with a -user-extensible `session_prefixes` list in `.ctxrc`. Parser vocabulary -is not i18n text — it belongs in runtime config. - -### Phase EH: Error Handling Audit - -Systematic audit of silently discarded errors across the codebase. -Many call sites use `_ =` or `_, _ =` to discard errors without -any feedback. Some are legitimate (best-effort cleanup), most are -lazy escapes that hide failures. - - -- [ ] EH.1: Catalogue all silent error discards — recursive walk of - `internal/` - for patterns: `_ = `, `_, _ = `, `//nolint:errcheck`, bare `return` after - error-producing calls. Group by category: - (a) file close in defer — often legitimate but should log on failure - (b) file write/read — data loss risk, must surface - (c) os.Remove/Rename — state corruption risk - (d) fmt.Fprint to stderr — truly best-effort, acceptable - Commands: `grep -rn '_ =' internal/`, `grep -rn - 'nolint:errcheck' internal/` - Output: spreadsheet in `.context/` with file, line, expression, category, - and recommended action (log-stderr, return-error, acceptable-as-is). - DoD: every `_ =` in the codebase is categorised and has a - recommended action - #priority:high #added:2026-03-14 - -- [ ] EH.2: Address category (b) — file write/read discards. These risk silent - data loss. Fix: return the error, or at minimum emit to stderr with - `fmt.Fprintf(os.Stderr, "ctx: ...: %v\n", err)` following the pattern - established in `internal/log/event.go`. - DoD: no write/read error is silently discarded - #priority:high #added:2026-03-14 - -- [ ] EH.3: Address category (a) — file close in defer. Most are `defer func() - { _ = f.Close() }()`. For read-only files, close errors are rare but - should still surface. For write/append files, close can fail if the - final flush fails — these are data loss. Fix: `if err := f.Close(); - err != nil { fmt.Fprintf(os.Stderr, "ctx: close %s: %v\n", path, err) }`. - DoD: all defer-close sites log failures to stderr - #priority:medium #added:2026-03-14 - -- [ ] EH.4: Address category (c) — os.Remove/Rename discards. These are state - operations (rotation, pruning, temp file cleanup). Silent failure leaves - stale state. Fix: stderr warning at minimum; for rotation/rename, consider - returning the error. - DoD: no Remove/Rename error is silently discarded - #priority:medium #added:2026-03-14 - -- [ ] EH.5: Validate — `grep -rn '_ =' internal/` returns only category (d) - entries (fmt.Fprint to stderr) and entries explicitly annotated as - acceptable. Run `make lint && make test` to confirm no regressions. - DoD: grep output is clean or fully annotated; CI green - #priority:high #added:2026-03-14 - -- [ ] Add AST-based lint test to detect exported functions with no external - callers #added:2026-03-21-070357 - -- [ ] Audit exported functions used only within their own package and make them - private #added:2026-03-21-070346 - -- [ ] Audit and remove side-effect output from error-returning - functions #added:2026-03-20-212212 - -### Phase ET: Error Package Taxonomy (`internal/err/`) - -`errors.go` is 1995 lines with 188 functions in a single file. Split into -domain-grouped files. No API changes — same package, same function signatures, -just file reorganization. - -Taxonomy (from prefix analysis): - -| File | Prefixes / Domain | ~Count | -|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| -| `memory.go` | Memory*, Discover* | 17 | -| `parser.go` | Parser* | 7 | -| `crypto.go` | Crypto*, Encrypt*, Decrypt*, GenerateKey, SaveKey, LoadKey, NoKeyAt | 14 | -| `task.go` | Task*, NoTaskSpecified, NoTaskMatch, NoCompletedTasks | 8 | -| `journal.go` | LoadJournalState*, SaveJournalState*, ReadJournalDir, NoJournalDir, NoJournalEntries, ScanJournal, UnknownStage, StageNotSet | 10 | -| `session.go` | Session*, FindSessions, NoSessionsFound, All*, Ambiguous* | 8 | -| `pad.go` | Edit*, Blob*, ReadScratchpad, OutFlagRequiresBlob, NoConflict*, Resolve* | 10 | -| `recall.go` | Reindex*, Stats*, EventLog* | 6 | -| `fs.go` | Read*, Write*, Open*, Stat*, File*, Mkdir*, CreateDir, DirNotFound, NotDirectory, Boundary* | 30 | -| `backup.go` | Backup*, CreateBackup*, CreateArchive* | 6 | -| `prompt.go` | Prompt*, NoPromptTemplate, ListTemplates, ReadTemplate, NoTemplate | 7 | -| `hook.go` | Embedded*, Override*, UnknownHook, UnknownVariant, MarkerNotFound | 6 | -| `skill.go` | Skill* | 2 | -| `config.go` | UnknownProfile, ReadProfile, UnknownFormat, UnknownProjectType, InvalidTool, UnsupportedTool, NotInitialized, ContextNotInitialized, ContextDirNotFound, FlagRequires* | 12 | -| `errors.go` | Remaining general-purpose: WorkingDirectory, CtxNotInPath, ReadInput, InvalidDate*, Reminder*, Drift*, Git*, Webhook*, etc. | ~25 | - -- [ ] Add freshness_files to .ctxrc defaults seeded by ctx init — currently - the - freshness config is only in the gitignored .ctxrc, so new clones don't get it. - Consider a .ctxrc.defaults pattern or seeding via ctx init template. - #priority:medium #added:2026-03-14-105143 - -- [ ] SEC.1: Security-sensitive file change hook — PostToolUse on Edit/Write - matching security-critical paths (.claude/settings.local.json, - .claude/settings.json, CLAUDE.md, .claude/CLAUDE.md, - .context/CONSTITUTION.md). Three actions: (1) nudge user in-session, (2) relay - to webhook for out-of-band alerting (autonomous loops), (3) append to - dedicated security log (.context/state/security-events.jsonl) for forensics. - Separate from general event log. Spec needed. #priority:high #added:2026-03-13 - -- [ ] O.5: Session timeline view — add --sessions flag to ctx system events. - Per-session breakdown of eval/fired counts with hook list. See - ideas/spec-hook-observability.md Phase 5 #added:2026-03-12-145401 - -- [ ] O.4: Doctor hook health check — surface hook activity in ctx doctor - output - (active/evaluated-never-fired/never-evaluated). See - ideas/spec-hook-observability.md Phase 4 #added:2026-03-12-145401 - -- [ ] O.3: Skip reason logging — add eventlog.Skip() with standard reason - constants (paused, throttled, condition-not-met). Instrument 19 hook - early-exit paths. See ideas/spec-hook-observability.md Phase - 3 #added:2026-03-12-145401 - -- [ ] O.2: Event summary view — add --summary flag to ctx system events. - Aggregates eval/fired counts per hook, shows last-eval/last-fired timestamps, - lists never-evaluated hooks. See ideas/spec-hook-observability.md Phase - 2 #added:2026-03-12-145401 - -- [ ] O.1: Hook eval logging — wrap hook cobra commands to log 'eval' events - on - every invocation. Refactor Run() signatures from os.Stdin to io.Reader - (peek+replay pattern). Adds eventlog.Eval(), EventTypeEval constant. See - ideas/spec-hook-observability.md Phase 1 #added:2026-03-12-145401 - -- [ ] Companion intelligence recommendation: implement spec from - ideas/spec-companion-intelligence.md — ctx doctor companion detection, ctx - init recommendation tip, ctx agent awareness in - packets #added:2026-03-12-133008 - -- [ ] Add configurable assets layer: allow users to plug their own YAML files - for localization (language selection, custom text overrides). Currently all - user-facing text is hardcoded in commands.yaml; need a mechanism to load - user-provided YAML that overlays or replaces built-in text. This enables i18n - without forking. #priority:low #added:2026-03-07-233756 - -- [-] Cleanup internal/cli/system/core/persistence.go: move 10 (base for - ParseInt) to config constant — not actionable, 10 is stdlib decimal base - convention, not a magic number #priority:low #added:2026-03-07-220825 - -- [-] Cleanup internal/cli/system/core/session_tokens.go: move SessionStats from - state.go to types.go — file and type no longer exist, refactored away - #priority:low #added:2026-03-07-220825 - - - - -- [-] SMB mount path support: add `CTX_BACKUP_MOUNT_PATH` env var so - `ctx backup` can use fstab/systemd automounts instead of requiring GVFS. - Spec: specs/smb-mount-path-support.md #priority:medium - #added:2026-04-04-010000 - **Skipped 2026-04-16**: Duplicate of line 214. Superseded by - specs/deprecate-ctx-backup.md (full removal, not mount path fix). - -- [ ] Make AutoPruneStaleDays configurable via ctxrc. Currently hardcoded to 7 - days in config.AutoPruneStaleDays; add a ctxrc key (e.g., auto_prune_days) and - fallback to the default. #priority:low #added:2026-03-07-220512 - -- [-] Refactor check_backup_age/run.go: move consts (lines 23-24) to config, - magic directories (line 59) to config, symbolic constants for strings (line - 72), messages to assets (lines 79, 90-91), extract non-Run functions to - system/core, fix docstrings #priority:medium #added:2026-03-07-180020 - **Skipped 2026-04-16**: Superseded by specs/deprecate-ctx-backup.md - (check_backup_age will be removed entirely, not refactored). - -- [ ] Add ctxrc support for recall.list.limit to make the default --limit for - recall list configurable. Currently hardcoded as config.DefaultRecallListLimit - (20). #priority:low #added:2026-03-07-164342 - -- [ ] Extract journal/core into a standalone journal parser package — - functionally isolated enough for its own package rather than remaining as - core/ #added:2026-03-07-093815 - -- [ ] Move PluginInstalled/PluginEnabledGlobally/PluginEnabledLocally from - initialize to internal/claude — these are Claude Code plugin detection - functions, not init-specific #added:2026-03-07-091656 - -- [ ] Move guide/cmd/root/run.go text to assets, listCommands to separate file + - internal/write #added:2026-03-07-090322 - -- [ ] Move drift/core/sanitize.go strings to assets #added:2026-03-07-090322 - -- [ ] Move drift/core/out.go output functions to internal/write per - convention #added:2026-03-07-090322 - -- [ ] Move drift/core/fix.go fmt.Sprintf strings to assets — user-facing - output - text for i18n #added:2026-03-07-090322 - -- [ ] Move drift/cmd/root/run.go cmd.Print* output strings to internal/write per - convention #added:2026-03-07-084152 - -- [ ] Extract doctor/core/checks.go strings — 105 inline Name/Category/Message - values to assets (i18n) and config (Name/Category - constants) #added:2026-03-07-083428 - -- [ ] Split deps/core builders into per-ecosystem packages — go.go, node.go, - python.go, rust.go are specific enough for their own packages under deps/core/ - or deps/builders/ #added:2026-03-07-082827 - -- [ ] Audit git graceful degradation — verify all exec.Command(git) call sites - degrade gracefully when git is absent, per project guide - recommendation #added:2026-03-07-081625 - -- [ ] Fix 19 doc.go quality issues: system (13 missing subcmds), agent (phantom - refs), load/loop (header typo), claude (stale migration note), 13 minimal - descriptions (pause, resume, task, notify, decision, learnings, remind, - context, eventlog, index, rc, recall/parser, - task/core) #added:2026-03-07-075741 - -- [ ] Move cmd.Print* output strings in compact/cmd/root/run.go to - internal/write per convention #added:2026-03-07-074737 - -- [ ] Extract changes format.go rendering templates to assets — headings, - labels, and format strings are user-facing text for - i18n #added:2026-03-07-074719 - -- [ ] Lift HumanAgo and Pluralize to a common package — reusable time - formatting, used by changes and potentially - status/recall #added:2026-03-07-074649 - -- [ ] Extract isAlnum predicate for localization — currently ASCII-only in - agent - keyword extraction (score.go:141) #added:2026-03-07-073900 - -- [ ] Make stopwords configurable via .ctxrc — currently embedded in assets, - domain users need custom terms #added:2026-03-07-073900 - -- [ ] Make recency scoring thresholds and relevance match cap configurable via - .ctxrc — currently hardcoded in config (7/30/90 days, cap - 3) #added:2026-03-07-073900 - -- [ ] Make DefaultAgentCooldown configurable via .ctxrc — currently hardcoded - at - 10 minutes in config #added:2026-03-07-073106 - -- [ ] Make TaskBudgetPct and ConventionBudgetPct configurable via .ctxrc — - currently hardcoded at 0.40 and 0.20 in config #added:2026-03-07-072714 - -- [ ] Localization inventory: audit config constants, write package templates, - and assets YAML for i18n mapping — low priority, most users are - English-first - developers #added:2026-03-06-192419 - -- [ ] Consider indexing tasks and conventions in TASKS.md and CONVENTIONS.md - (currently only decisions and learnings have index - tables) #added:2026-03-06-190225 - -- [ ] Implement journal compaction: Elastic-style tiered storage with tar.gz - backup. Spec: specs/journal-compact.md #added:2026-03-31-110005 - -- [ ] Validate .ctxrc against ctxrc.schema.json at load time — schema is - embedded but never enforced, doctor does field-level checks without using - it #added:2026-03-06-174851 - - -- [ ] Add PostToolUse session event capture. Append lightweight event records - (tool name, files touched, timestamp) to .context/state/session-events.jsonl - on significant PostToolUse events (file edits, git operations, errors). Not - SQLite — just JSONL append. This feeds the PreCompact snapshot hook with - richer input so it can report what the agent was actively working on, not just - static file state. #added:2026-03-06-185126 - -- [ ] Add next-step hints to ctx agent and ctx status output. Append actionable - suggestions based on context health (e.g. stale tasks, high completion ratio, - drift findings). Pattern learned from GitNexus self-guiding agent - workflows. #added:2026-03-06-184829 - -- [ ] Implement PreCompact and SessionStart hooks for session continuity across - compaction. Wire ctx agent --budget 4000 to both events: PreCompact outputs - context packet before compaction so compactor preserves key info; SessionStart - re-injects context packet so fresh/post-compact sessions start oriented. Two - thin ctx system subcommands, two entries in hooks.json. See - ideas/gitnexus-contextmode-analysis.md for design - rationale. #added:2026-03-06-184825 - -- [ ] Audit fatih/color removal across ~35 files — removed from recall/run.go, - recall/lock.go, write/validate.go; ~30 files remain. Separate consolidation - pass. #added:2026-03-06-050140 - -- [ ] Audit remaining 2006-01-02 usages across codebase — 5+ files still use - the - literal instead of config.DateFormat. Incremental - migration. #added:2026-03-06-050140 - -- [ ] WC.2: Audit CLI packages for direct fmt.Print/Println usage — candidates - for migration #added:2026-03-06 - -### Phase WC2: Write Output Block Consolidation - -Spec: `specs/write-output-consolidation.md`. Read the spec before starting any -WC2 task. - -Consolidate multi-line imperative `cmd.Println` sequences in `internal/write/` -into pre-computed single-print block patterns. Separates conditional logic from -I/O and replaces 4-8 individual Tpl\* constants per function with one -block template. - -- [ ] WC2.1: Tier 1 — Consolidate multi-line functions with no conditionals: - `InfoInitNextSteps`, `InfoObsidianGenerated`, `InfoJournalSiteGenerated`, - `InfoDepsNoProject`, `ArchiveDryRun`, `ImportScanHeader`. Add `TplXxxBlock` - YAML entries, wire through embed.go + config.go, remove replaced individual - constants. #added:2026-03-17 -- [ ] WC2.2: Tier 2a — Consolidate conditional functions in info.go: - `InfoLoopGenerated` (pre-compute iterLine). Prove the pre-computation pattern - on the function that motivated this spec. #added:2026-03-17 -- [ ] WC2.3: Tier 2b — Consolidate conditional functions in - sync/recall/notify: - `SyncResult`, `CtxSyncHeader`, `CtxSyncAction`, `SessionMetadata`, - `TestResult`, `SyncDryRun`, `PruneSummary`. Each needs 1-3 pre-computed - strings before the single print call. #added:2026-03-17 -- [ ] WC2.4: Constant cleanup — verify all replaced individual `TplXxx*` - config - vars, `TextDescKey*` constants, and YAML entries are removed. Run `make lint` - and `go test ./internal/write/...` to confirm no - regressions. #added:2026-03-17 -- [ ] WC2.5: Update CONVENTIONS.md — add a "Write Package Output" subsection - documenting the pre-compute-then-print pattern for future functions with 4+ - Printlns and conditionals. #added:2026-03-17 - -## MCP-related - -### Phase MCP-V3: MCP v0.3 Expansion - -- [ ] Add drift check: MCP prompt coverage vs bundled skills — programmatic - check comparing config/mcp/prompt constants against assets.ListSkills() to - detect skills without MCP prompt equivalents. Pair with the tool coverage - drift check. @CoderMungan #priority:medium #added:2026-03-15-120519 - -- [ ] MCP v0.3: expand MCP prompts to cover more skills — current 5 prompts - (session-start, add-decision, add-learning, reflect, checkpoint) are a subset - of ~30 bundled skills. Evaluate which skills benefit from protocol-native MCP - prompt equivalents. Decision 2026-03-06 established 'Skills stay CLI-based; - MCP Prompts are the protocol equivalent.' @CoderMungan - #priority:medium #added:2026-03-15-120519 - -- [ ] Add drift check: MCP tool coverage vs CLI commands — programmatic check - that compares registered MCP tool names (config/mcp/tool) against ctx CLI - subcommands to detect newly added CLI commands without MCP equivalents. Could - be a drift detector check or a compliance test. @CoderMungan - #priority:medium #added:2026-03-15-120116 - -- [ ] MCP v0.3: expose additional CLI commands as MCP tools — candidates: - ctx_load (full context packet), ctx_agent (token-budgeted packet), ctx_reindex - (rebuild indices), ctx_sync (reconcile docs/code), ctx_doctor (health check). - Evaluate which provide value over the protocol vs requiring terminal - interaction. @CoderMungan #priority:medium #added:2026-03-15-120025 - -- [ ] Make MCP defaults configurable via .ctxrc — add mcp_recall_limit, - mcp_truncate_len, mcp_truncate_content_len, mcp_min_word_len, - mcp_min_word_overlap fields to .ctxrc schema; expose via rc.MCP*() with - fallback to config/mcp/cfg defaults; update tools.go to read from rc instead - of cfg constants. @CoderMungan #priority:medium #added:2026-03-15-114700 - -- [ ] MCP tools.go cleanup pass: magic strings, duplicated fragments, nested - templates. Lines: 461:481 + 186:196 duplicated code; 335 magic number; 382:385 - nested TextDescs → single template; 390+851 magic time literal; 443+499+800 - magic words; 557+892+902 magic numbers; 590+638 nested TextDesc templating; - 820 prefixed %s; 854 suffix %s #priority:high #added:2026-03-15-110429 - -### Phase MCP-SAN: MCP Server Input Sanitization - -[ ] Assignee: @CoderMungan -- https://github.com/ActiveMemory/ctx/issues/49 - -### Phase MCP-COV: MCP Test Coverage - -[ ] Assignee: @CoderMungan -- https://github.com/ActiveMemory/ctx/issues/50 - -## Later - -### Phase PR: State Pruning (`ctx system prune`) - -Clean stale per-session state files from `.context/state/`. Files with UUID -session ID suffixes accumulate ~6-8 per session with no cleanup. Strategy: -age-based — prune files older than N days (default 7). - -- [ ] Regenerate site/ for state-maintenance recipe - (docs/recipes/state-maintenance.md added but site not - rebuilt) #added:2026-03-05-205425 - -- [ ] Audit remaining global tombstones for session-scoping: - backup-reminded, ceremony-reminded, check-knowledge, - journal-reminded, version-checked, ctx-wrapped-up all have - the same cross-session suppression bug as - memory-drift-nudged #added:2026-03-05-205425 - -- [ ] F.2: ctx journal import (remote) — import Claude Code - session JSONLs from local or remote (~/.claude/projects/) - into local ~/.claude/projects/. Pure Go: local copy with - os.CopyFS-style walk, remote via os/exec ssh+scp (no rsync - dependency). --source flag accepts local path or user@host. - --dry-run shows what would be copied. Skips existing files - (content-addressed by UUID filenames). Enables journal export - from sessions that ran on other machines. - #added:2026-03-05-141912 - -- [ ] P0.5: Blog: "Building a Claude Code Marketplace Plugin" - — narrative from session history, journals, and git diff of - feat/plugin-conversion branch. Covers: motivation (shell - hooks to Go subcommands), plugin directory layout, - marketplace.json, eliminating make plugin, bugs found during - dogfooding (hooks creating partial .context/), and the fix. - Use /ctx-blog-changelog with branch diff as source material. - #added:2026-02-16-111948 -- [ ] P9.2: Test manually on this project's LEARNINGS.md (20+ entries). - #priority:medium #added:2026-02-19 -- [ ] P0.8.1: Install golangci-lint on the integration server #for-human - #priority:medium #added:2026-02-23 #added:2026-02-23-170213 -- [ ] PM.3: Review hook diagnostic logs after a long session. Check - `.context/logs/check-persistence.log` and - `.context/logs/check-context-size.log` to verify hooks fire correctly. - Tune nudge frequency if needed. #priority:medium #added:2026-02-09 -- [ ] PM.4: Run `/consolidate` to address codebase drift. Considerable drift has - accumulated (predicate naming, magic strings, hardcoded permissions, - godoc style). #priority:medium #added:2026-02-06 -- [ ] Improve test coverage for core packages at 0% #added:2026-03-20-164324 - -- [ ] PM.7: Aider/Cursor parser implementations: the recall architecture was - designed for extensibility (tool-agnostic Session type with - tool-specific parsers). Adding basic Aider and Cursor parsers would - validate the parser interface, broaden the user base, and fulfill - the "works with any AI tool" promise. Aider format is simpler than - Claude Code's. #priority:medium #source:report-6 #added:2026-02-17 - -## Future - -- [ ] P0.8.5: Enable webhook notifications in worktrees. Currently `ctx notify` - silently fails because `.context.key` is gitignored and absent in - worktrees. For autonomous runs with opaque worktree agents, notifications - are the one feature that would genuinely be useful. Possible approaches: - resolve the key via `git rev-parse --git-common-dir` to find the main - checkout, or copy the key into worktrees at creation time (ctx-worktree - skill). #priority:medium #added:2026-02-22 -- [ ] P0.9.2: Split cli-reference.md (1633 lines) into command group pages: - cli-overview, cli-init-status, cli-context, cli-recall, cli-tools, - cli-system — - each page covers a natural command group with its subcommands and flags - #added:2026-02-24-204208 -- [ ] P0.9.3: Investigate proactive content suggestions: - docs/recipes/publishing.md claims - agents suggest blog posts and journal rebuilds at natural moments, but no hook - or playbook mechanism exists to trigger this — either wire it up (e.g. - post-task-completion nudge) or tone down the docs to match reality - #added:2026-02-24-185754 -- [ ] PG.1: Add agent/tool compatibility matrix to prompting guide — - document which - patterns degrade gracefully when agents lack file access, CLI tools, or - ctx integration. Treat as a "works best with / degrades to" table. - #priority:medium #added:2026-02-25 -- [ ] PG.2: Add versioning/stability note to prompting guide — "these - principles are - stable; examples evolve" + doc date in frontmatter. Needed once the guide - becomes canonical and people start quoting it. - #priority:low #added:2026-02-25 -- [ ] P0.1: Brainstorm: Standardize drift-check comment format and - integrate with - `/ctx-drift` — formalize ad-hoc `` markers, teach - drift skill to parse/execute them, publish pattern in docs/recipes. Benefits - tooling/CLI but AI handles ad-hoc fine for now. - #priority:medium #added:2026-02-28 -- [ ] F.1: MCP server integration: expose context as tools/resources via Model - Context Protocol. Would enable deep integration with any - MCP-compatible client. #priority:low #source:report-6 -- [ ] Q.1: Docstring cross-reference audit — compliance test that - flags docstrings - mentioning domains that don't match their callers. Start with `write/**`, - extend to all `internal/`. Spec: `specs/docstring-cross-reference-audit.md` - #priority:medium #added:2026-03-17 - -- [ ] Migrate Sprintf-based templates (tpl_*.go) to Go text/template or embedded - template files — ObsidianReadme, LoopScript, and other multi-line format - strings that can't move to YAML #added:2026-03-18-163629 - -- [ ] Split internal/assets/embed_test.go — tests that call read/ packages - must - move to their respective read/ package to avoid import - cycles #added:2026-03-18-192914 - -- [ ] Improve recall/core format tests — replace hardcoded string assertions - (e.g. Contains Tokens) with semantic checks that verify structure and values, - not label text #added:2026-03-19-194645 - -### Phase BT: Build Tooling — `cmd/ctxctl` - -Replace shell-based build scripts (Makefile shell -expansions, `hack/build-all.sh`, -`hack/release.sh`, `hack/tag.sh`, `sync-*`/`check-*` targets) with a first-class -Go binary at `cmd/ctxctl`. Shares internal packages with `ctx` (version, assets, -embed FS). Installable: `go -install github.com/ActiveMemory/ctx/cmd/ctxctl@latest`. -Eliminates `jq` build dependency. Testable, cross-platform. - -- [ ] Bug: release script versions.md table insertion fails silently. The sed - pattern on line 133 uses `$` anchor but the actual Markdown table header has - column padding spaces before the trailing `|`. The row is never inserted. Fix: - relax the header match pattern or switch to a simpler approach (e.g., insert - after the separator line directly). Also verify the "latest stable" sed - handles trailing `).\n` correctly. #priority:high #added:2026-03-23-221500 - -- [ ] Replace hack/lint-drift.sh with AST-based Go tests in internal/audit/. - Spec: `specs/ast-audit-tests.md` #added:2026-03-23-210000 - -- [ ] Rewrite lint-style scripts in Go as ctxctl subcommands — - blocked: prerequisite ctxctl does not exist yet. Deferred. - #added:2026-03-29-082958 - -Dividing line: `ctx` is the user/agent tool, `ctxctl` is -the maintainer/contributor -tool. If a developer clones the repo and needs to build, test, release, -or validate -— that's `ctxctl`. If a user is working in a project and needs context — -that's `ctx`. - -Strong fits beyond build/release: -- `ctxctl plugin package` — package .claude-plugin for marketplace publishing -- `ctxctl plugin validate` — validate plugin.json, hooks.json, skill structure -- `ctxctl doctor` — contributor pre-flight (Go version, tools, GPG, hooks); - absorbs `hack/gpg-fix.sh` and `hack/gpg-test.sh` -- `ctxctl changelog` — deterministic release notes from git log - -Reasonable fits if project grows: -- `ctxctl test smoke` — replaces the shell pipeline in `make smoke` -- `ctxctl site build/serve` — wraps zensical + feed generation -- `ctxctl mcp register` — replaces `hack/gemini-search.sh` and future - MCP registrations - -Not a fit (keep in `ctx`): -- Anything user-facing in a project context (status, agent, drift, recall) -- Anything Claude Code hooks call — hooks must call `ctx`, not `ctxctl` - -- [ ] Design `ctxctl` CLI surface: `ctxctl sync`, `ctxctl build`, `ctxctl - release`, `ctxctl check`, `ctxctl tag` #added:2026-03-25-050000 -- [ ] Implement `ctxctl sync` — stamps VERSION into plugin.json + syncs why - docs; replaces `sync-version`, `sync-why` #added:2026-03-25-050000 -- [ ] Implement `ctxctl check` — drift checks: version sync, why docs, - lint-drift, lint-docs; replaces `check-*` targets #added:2026-03-25-050000 -- [ ] Implement `ctxctl build` — cross-platform builds with version stamping; - replaces `build-all.sh` #added:2026-03-25-050000 -- [ ] Implement `ctxctl release` — full release flow (sync, build, tag, - checksums); replaces `release.sh` + `tag.sh` #added:2026-03-25-050000 -- [ ] Simplify Makefile to thin wrappers: `make build` → `go run ./cmd/ctxctl - build` #added:2026-03-25-050000 -- [ ] Remove `jq` build dependency once ctxctl handles JSON - natively #added:2026-03-25-050000 - -- [ ] Implement MCP warm-up in /ctx-remember session ceremony — when a - graph/RAG - tool is configured in .ctxrc, run one orientation query at session start to - build procedural familiarity. Spec: - `ideas/spec-mcp-warm-up-ceremony.md` #added:2026-03-25-120000 - -- [ ] Update ctx doctor to check for graph tool availability — detect if a - graph/RAG MCP is configured in .ctxrc, verify connection status, recommend - installation if missing #added:2026-03-25-120000 - -- [ ] Explore pluggable graph tool interface — replace hardcoded GitNexus - references in skill text with configurable .ctxrc graph_tool key. Skills use - template placeholder instead of literal tool names. Define minimum interface - contract (query, context, impact). Spec: - `ideas/spec-mcp-warm-up-ceremony.md` #added:2026-03-25-120000 - -- [x] HUB-1: Define hub.proto — gRPC service definition with Register, - Publish, Sync, Listen, Status RPCs. Generate Go code. Spec: - specs/context-hub.md #priority:high #added:2026-04-06-113020 #done:2026-04-06 - -- [x] HUB-2: Implement internal/hub/store.go — JSONL append-only entry storage - with sequence assignment, type filtering, and since-sequence queries. Spec: - specs/hub_implementation.md #priority:high #added:2026-04-06-113021 - #done:2026-04-06 - -- [x] HUB-3: Implement internal/hub/auth.go — admin token generation on first - run, client token issuance via Register RPC, gRPC interceptor for Bearer token - validation. Spec: specs/context-hub.md #priority:high #added:2026-04-06-113022 - #done:2026-04-06 - -- [x] HUB-4: Implement internal/hub/server.go — gRPC server with Register, - Publish, Sync RPCs. Wire auth interceptor, JSONL store, TLS support. Spec: - specs/context-hub.md #priority:high #added:2026-04-06-113024 #done:2026-04-06 - -- [x] HUB-5: Implement ctx serve --shared CLI command — starts gRPC hub server - on specified port, generates admin token on first run, supports - --tls-cert/--tls-key flags. Spec: specs/context-hub.md #priority:high - #added:2026-04-06-113030 #done:2026-04-06 - -- [x] HUB-6: Implement internal/hub/client.go — gRPC client with Register, - Sync, Publish, Listen methods. Connection config encrypted storage via - internal/crypto (same pattern as notify). Spec: specs/context-hub.md - #priority:high #added:2026-04-06-113032 #done:2026-04-06 - -- [x] HUB-7: Implement ctx connect register — one-time registration with hub, - stores encrypted connection config in .context/.connect.enc. Spec: - specs/context-hub.md #priority:high #added:2026-04-06-113033 #done:2026-04-06 - -- [x] HUB-8: Implement ctx connect subscribe — set entry type filters - (decisions, learnings, conventions), persist in local connection config. Spec: - specs/context-hub.md #priority:medium #added:2026-04-06-113035 - #done:2026-04-07 - -- [x] HUB-9: Implement ctx connect sync — initial full pull of matching - entries from hub, write to .context/shared/ as markdown files with origin - tags, record last-seen sequence in .sync-state.json. Spec: - specs/context-hub.md #priority:medium #added:2026-04-06-113041 - #done:2026-04-07 - -- [x] HUB-10: Implement ctx connect publish and --share flag — push local - entries to hub. Add --share flag to ctx add so entries go to local file AND - hub simultaneously. Spec: specs/context-hub.md #priority:medium - #added:2026-04-06-113043 #done:2026-04-07 - -- [x] HUB-11: Implement Listen RPC with fan-out — server-streaming RPC that - pushes new entries to connected clients in real-time. ctx connect listen with - auto-reconnect on disconnect. Spec: specs/context-hub.md #priority:medium - #added:2026-04-06-113044 #done:2026-04-07 - -- [x] HUB-12: Implement ctx connect status — show server address, connection - state, last sync time, subscription config, entry counts by type. Includes - hub-side Status RPC. Spec: specs/context-hub.md #priority:medium - #added:2026-04-06-113046 #done:2026-04-07 - -- [x] HUB-13: Implement ctx agent --include-shared — add Tier 8 budget for - shared knowledge in agent packet assembly. Shared entries from - .context/shared/ included when --include-shared flag is passed. Spec: - specs/context-hub.md #priority:medium #added:2026-04-06-113053 - #done:2026-04-07 - -- [x] HUB-14: Implement --daemon flag for ctx serve --shared — background - process with PID file, --stop to kill, graceful shutdown. Required for - federation. Spec: specs/hub-federation.md #priority:medium - #added:2026-04-06-113054 - -- [x] HUB-15: Integrate hashicorp/raft for leader election — Raft-lite: use - Raft ONLY for master election, not data consensus. --peers flag for cluster - membership. Single-node mode auto-elects. Spec: specs/hub-federation.md - #priority:medium #added:2026-04-06-113056 - -- [x] HUB-16: Implement master-to-follower replication — master pushes entries - to followers via gRPC stream. Followers catch up via sequence-based sync on - reconnect. Spec: specs/hub-federation.md #priority:medium - #added:2026-04-06-113058 - -- [x] HUB-17: Implement client failover — clients maintain ordered peer list, - auto-reconnect to new master on connection failure. Follower redirects client - to current master address. Spec: specs/hub-federation.md #priority:medium - #added:2026-04-06-113104 - -- [x] HUB-18: Implement ctx hub status/peer/stepdown — cluster status display - (role, peers, sync state, entries, uptime), runtime peer add/remove, graceful - leadership transfer. Spec: specs/hub-federation.md #priority:low - #added:2026-04-06-113106 - -- [x] HUB-19: Update compliance test — add internal/hub/ to allowed-net-import - list alongside internal/notify/. Core packages remain network-free. Spec: - specs/hub_implementation.md #priority:high #added:2026-04-06-113107 - -- [x] HUB-20: End-to-end integration test — spin up hub, register 2 clients, - publish from one, verify sync on other. Test --share flag, Listen stream, and - reconnect behavior. Spec: specs/context-hub.md #priority:medium - #added:2026-04-06-113109 - -- [x] HUB-2a: Implement hub client registry and meta persistence — - clients.json for registered client tokens/project names, meta.json for - sequence counter and hub metadata. Separate from entries.jsonl. Spec: - specs/context-hub.md #priority:high #added:2026-04-06-114131 - -- [x] HUB-9a: Implement shared file renderer — convert Entry objects to - markdown with origin tags and date headers, create/append to - .context/shared/*.md files. Reused by both ctx connect sync and ctx connect - listen. Spec: specs/context-hub.md #priority:medium #added:2026-04-06-114131 - -- [x] HUB-21: Unit tests for internal/hub/ — store (append, query, rotation), - auth (token generation, validation, interceptor), client (connect, reconnect), - renderer (markdown output). Each package tested independently. Spec: - specs/hub_implementation.md #priority:medium #added:2026-04-06-114131 - -- [x] HUB-22: Documentation — create docs/cli/connect.md and docs/cli/serve.md - for new commands, update docs/cli/agent.md for --include-shared flag and - --shared-budget option. Spec: specs/context-hub.md #priority:low - #added:2026-04-06-114131 - -### Phase: ctx Hub follow-ups (PR #60) - -**Context**: PR #60 `feat: ctx Hub for cross-project knowledge -sharing` (parlakisik) merged despite open review feedback from @bilersan and -a pending review request. Author is heads-down on his Ph.D.; these tasks -capture the cleanup and documentation debt we accepted by merging. -PR: https://github.com/ActiveMemory/ctx/pull/60 -Review with findings: -https://github.com/ActiveMemory/ctx/pull/60#pullrequestreview-PRR_kwDOQ9VoNc7ze3nA - -#### Build / platform - -- [x] Fix Windows build: `internal/exec/daemon/daemon.go` uses - `syscall.SysProcAttr{Setsid: true}` (Unix-only). Split into - `daemon.go` (platform-agnostic), `detach_unix.go` (`//go:build !windows`, - `Setsid`), `detach_windows.go` (`//go:build windows`, - `CREATE_NEW_PROCESS_GROUP | HideWindow`). Verified with - `GOOS=windows go build ./...`. #priority:high #added:2026-04-11 #pr:60 - #done:2026-04-11 -- [ ] Add Windows job to CI so this class of regression is caught at PR time, - not by reviewers running local builds. #priority:high #added:2026-04-11 #pr:60 -- [ ] Triage the 16 package-level test failures @bilersan reported on Windows - — classify as platform-specific vs genuine bugs. #added:2026-04-11 #pr:60 - -#### Convention drift - -- [x] Fix 38 `types.go` convention violations introduced by `internal/hub` - and related packages. Resolved upstream in commit `9efe1a94 fix: reconcile - hub code with main's audit tests after rebase` — `make audit` now reports - "All checks passed!" on Linux, and `make lint` is 0 issues. - #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [ ] Audit `internal/hub`, `internal/cli/connect`, `internal/cli/hub`, - `internal/cli/serve` against CONVENTIONS.md (godoc format, import aliases, - error wrapping, package layout). #added:2026-04-11 #pr:60 -- [ ] Run `/ctx-code-review` over the hub subsystem for edge cases missed in - the merge: token rotation, connection-config migration, Raft leader - handoff failure modes, sync cursor corruption recovery. #added:2026-04-11 - #pr:60 - -#### User-facing docs (cornerstone — scope first) - -- [x] Enumerate all doc surfaces touched by the hub: `docs/cli/connect.md`, - `docs/cli/hub.md`, `docs/cli/serve.md`, `docs/cli/init-status.md`, - `docs/cli/index.md` already existed from the PR but the three new CLI - pages were NOT wired into `zensical.toml` nav — fixed. Added three new - recipes, two operations docs, and one security doc; wired all into nav. - #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Write a **Getting Started: Shared Hub** recipe: single-node hub on - localhost, register first project, publish a decision, sync from a second - project, `ctx agent --include-shared`. Written to - `docs/recipes/hub-getting-started.md` and wired into nav. - #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Write a **Multi-machine hub** recipe: `ctx serve --shared --daemon` - on a LAN host, firewall/port guidance, bearer token provisioning, - `.connect.enc` distribution, `ctx connect register` from clients. - Written to `docs/recipes/hub-multi-machine.md`, wired into nav. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Write a **High-availability cluster** recipe: Raft peers with - `--peers`, `ctx hub peer add/remove`, `ctx hub stepdown`, failure-mode - walkthrough (leader loss, split brain, recovery). Written to - `docs/recipes/hub-cluster.md`, wired into nav. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Write a **Security model** doc: bearer token lifecycle, AES-256-GCM - `.connect.enc` at-rest, constant-time comparison, 1 MB content cap, type - allowlist. Threat model and operational hardening checklist. Written to - `docs/security/hub.md`, wired into nav. #priority:high - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Write an **Operations** doc: starting/stopping the daemon, log - locations, `ctx serve --stop`, `ctx hub status`, JSONL store layout, - backup/restore of the append-only log, systemd unit, log rotation. - Written to `docs/operations/hub.md`, wired into nav. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [ ] Document the auto-sync-on-session-start hook: what it does, how to - opt out, interaction with existing UserPromptSubmit hooks, performance - impact on large hubs. Partially covered in connect.md (`check-hub-sync` - mention); a dedicated section is still owed. #added:2026-04-11 #pr:60 -- [x] Document `ctx add --share` and `ctx agent --include-shared` — already - covered in `docs/cli/connect.md` (`--share`) and `docs/cli/init-status.md` - (`--include-shared` flag + Tier 8 explanation); playbook update deferred - until a dedicated "shared knowledge in agent packets" section is written. - #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [ ] Add an **architecture** section to `ARCHITECTURE.md` / - `DETAILED_DESIGN.md` covering: JSONL append-only store, JSON-over-gRPC - codec (no protoc), fan-out broadcaster, Raft-lite (election only, data - via gRPC sync), sequence-based replication. #added:2026-04-11 #pr:60 -- [x] Add a **failure analysis** page for the hub: what happens on network - partition, disk full, corrupted JSONL, token rotation during active - streams, clock skew between peers. Written to - `docs/operations/hub-failure-modes.md`, wired into nav. Covers - reminder [7]. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [ ] Record a DECISION explaining why we merged PR #60 with known Windows - breakage and convention drift — trade-off, author context, mitigation - plan (this task group). #added:2026-04-11 #pr:60 -- [ ] Update CONVENTIONS.md if any new patterns from the hub are worth - canonicalizing (gRPC handler layout, JSONL store access, bearer-token - middleware). #added:2026-04-11 #pr:60 - -#### Framing and mental model (2026-04-11 follow-up) - -- [x] Write `docs/recipes/hub-overview.md` — mental model in one - paragraph, what flows / what does not flow, two explicit user stories - (personal cross-project brain vs small trusted team), "when not to - use it" section. Wired as the first entry in the ctx Hub - nav section. #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rewrite the opening of `docs/recipes/hub-getting-started.md` - to plant stakes ("what you'll get out of this recipe", "what this - recipe does not cover") and point at the overview before any commands. - #priority:high #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Add a "read the overview first" signpost to the top of - `hub-multi-machine.md` and `hub-cluster.md`, naming - each recipe as Story 2 (trusted team) shape. #added:2026-04-11 #pr:60 - #done:2026-04-11 -- [x] Give `docs/cli/connect.md` a real "what is this" intro — unit of - identity is a project not a user, only four entry types flow, link - to the recipes. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Give `docs/cli/hub.md` a real "who needs this page" intro — - operator commands only, link to `ctx connect` for clients and to the - overview for the mental model. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Give `docs/operations/hub.md` an operator-cheat-sheet - intro (four entry types, project identity, append-only model) and - link to the overview. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Fix factual error in `docs/security/hub.md`: - `clients.json` stores client tokens **plaintext**, not hashed. - Replaced the "hashed" claim with a prominent warning admonition, - listed filesystem-level mitigations, and referenced the follow-up - task for hashing / keyring storage. Updated - `docs/operations/hub-failure-modes.md` compromise scenarios - to match (including a new "Compromised hub host" entry). Also - documented that `Origin` is self-asserted on publish, so attribution - cannot be trusted after token compromise. #priority:high - #added:2026-04-11 #pr:60 #done:2026-04-11 - -#### Design follow-ups surfaced by the brainstorm (2026-04-11) - -- [ ] Decide the product story: "personal cross-project brain", - "small trusted team", or both — then align the overview, recipes, - and CONTRIBUTING guidance to match. #priority:high #added:2026-04-11 - #pr:60 -- [ ] Server-enforce `Origin` on publish: reject entries whose - `Origin` does not match the authenticated client's `ProjectName`. - Closes a spoofing vector and eliminates accidental mislabeling. - Small change in `internal/hub/handler.go publish()`. - #priority:high #added:2026-04-11 #pr:60 -- [ ] Hash `clients.json` tokens or move them behind the local - keyring (reuse `internal/crypto`). Removes the plaintext-token - footgun documented in the security page. - #priority:high #added:2026-04-11 #pr:60 -- [x] Decide the fate of `Entry.Author`: keep, drop, or promote to - a real identity field. **Decided**: server-authoritative. The - server stamps Author from the authenticated identity source on - every publish; client input is ignored. Pre-registry: stamp with - `ClientInfo.ProjectName`. Registry MVP: stamp with - `users.json.user_id`. PKI stretch: stamp with signed-claim `sub`. - See `.context/DECISIONS.md` [2026-04-11-180000]. Implementation - tasks land under H-22 in the security audit phase. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [ ] Explore journal-entry → `learning` export path: the density - users expect from "shared context" lives in enriched journal - entries, not in manually written `ctx add learning`. Would let - the hub surface the lessons agents already recorded in sessions - without actually replicating journals. #added:2026-04-11 #pr:60 - -#### Phase: Hub identity layer for public-internet usage (2026-04-11) - -**Context**: The current hub has no concept of user identity. -Tokens identify **projects**, not humans. `Origin` is -self-asserted on publish. `clients.json` stores tokens in -plaintext. For the "personal" and "small trusted team" stories -(overview.md Stories 1 and 2) this is acceptable — the trust -model is "everyone holding a token is friendly." - -For public-internet usage (the "Story 3" shape we explicitly -declared out of scope in the overview) these become real gaps: -no per-user attribution, no way to revoke individual humans, no -audit trail that proves who published what, and `clients.json` -compromise equals total hub compromise. - -**Near-term MVP**: a pre-seeded identity registry owned by the -sysadmin. Instead of dynamic token issuance via admin token, -the hub reads a `users.json` file the sysadmin hand-edits, and -client registration validates against that pre-seeded list. -This is simpler than OAuth/OIDC, doesn't require a separate -identity service, and matches how internal services at small -orgs usually start before adopting an SSO. - -**Eventual design requirements** (decision record TBD): - -- Per-human identity, not per-project -- Tokens tied to a user ID, not a project name -- Server-enforced `Origin` matches the authenticated user (or - a user's declared project list, with server validation) -- Revocation by removing a user row from the registry and - forcing token rotation -- Hashed token storage at rest -- Optional: attribution-bearing audit log distinct from - `entries.jsonl` - -The following tasks feed into this track (they already exist -in the "Design follow-ups surfaced by the brainstorm" section -above; do not duplicate here): - -- Server-enforce `Origin` on publish (blocks spoofing) -- Hash `clients.json` tokens (blocks plaintext compromise) -- Decide the fate of `Entry.Author` (promote, drop, or keep - unauthenticated) - -Tasks unique to this phase: - -- [ ] Write a spec for the sysadmin-curated identity registry: - filename, format, schema, bootstrap flow, revocation - procedure, migration path from today's `clients.json`. - `specs/hub-identity-registry.md`. Must resolve: - - - **Token issuance**: out-of-band on the server - (`ctx hub users add` prints the plaintext token once - on stdout; only a hash is persisted). - - **Client pickup**: user receives the token out-of-band - and runs `ctx connect register --token - ctx_cli_... --project `; hub validates against - the registry. - - **TTL decision** (pick one, document in the spec): - * **Option A** (recommended): no TTL, manual revocation - only. `ctx hub users remove ` is the only - expiry path. Matches today's `clients.json` - semantics, zero surprise breakage on migration. - * **Option B**: optional `expires_at` per user row. - Tokens without it are valid forever (Option A - behavior); tokens with it are rejected after the - timestamp. Ship as an additive follow-up. - * **Option C** (explicitly rejected): rolling - expiry based on `last_used_at`. Garbage-collects - dormant tokens but breaks users who take long - vacations. Not worth the support cost. - - **Revocation procedure**: sysadmin edits `users.json`, - signals the hub to reload, affected tokens fail - immediately on next RPC. - - **Migration from `clients.json`**: one-shot converter - that reads today's `clients.json`, prompts the - sysadmin for a `user_id` per row, and writes - `users.json`. Leave `clients.json` in place as a - read fallback during migration, delete once - everyone is on the new path. - - #priority:high #added:2026-04-11 #pr:60 -- [ ] Implement `users.json` format: `{user_id: {project_ids: - [...], token_hash: "...", created_at: "...", notes: "..."}}`. - Read on hub start and on each Register RPC. Hot-reload via - SIGHUP or file watcher. #added:2026-04-11 #pr:60 -- [ ] Change `Register` RPC semantics: instead of minting a - new client token from the admin token, look up the - requested `ProjectName` in `users.json`. Reject if not - pre-seeded. Return the pre-hashed token only if the caller - presents an initial-provisioning credential the sysadmin - seeded alongside the registry row. #added:2026-04-11 #pr:60 -- [ ] Add `ctx hub users` subcommand group for sysadmin - operations: `add`, `remove`, `rotate`, `list`. These edit - `users.json` directly and signal the running hub to - reload. #added:2026-04-11 #pr:60 -- [ ] Add per-user audit log (`audits.jsonl` beside - `entries.jsonl`). Each RPC records user_id, method, result - status, timestamp. Separate from `entries.jsonl` so it can - be retained on a different schedule. #added:2026-04-11 - #pr:60 -- [ ] Write `docs/security/hub-identity.md` explaining the - registry-based identity model, the threat model it closes, - the threats it still doesn't close, and the operational - procedures (seed the registry, rotate a token, revoke a - user). #added:2026-04-11 #pr:60 -- [ ] Decide whether to ship the identity layer as a - **breaking change** (existing `clients.json` deployments - must migrate) or as an **opt-in flag** (`ctx hub start - --identity users.json`). Document in the spec above. - #added:2026-04-11 #pr:60 -- [ ] Update the hub overview and team recipe to name the - identity registry as the "upgrade path to larger teams" - story: "once your team grows past ~10 people or you need - auditable attribution, enable the identity registry." The - current overview treats Story 3 as unsupported — with the - registry this becomes Story 2.5: "small trusted team with - real attribution." #added:2026-04-11 #pr:60 -- [ ] Stretch: OIDC/OAuth bridge. Once the registry layer is - stable, consider adding an optional provider bridge so - `users.json` can be auto-populated from an external - identity source (Google Workspace, GitHub orgs, etc.). Not - a near-term priority — registry-only covers the first - order of magnitude of users. #added:2026-04-11 #pr:60 -- [ ] Stretch: signed-claim / PKI authentication. The - sysadmin-registry MVP and the OIDC bridge are both - **bearer token** models — possession of the token bytes - is identity. This is fine for trusted orgs but has - well-known replay/rotation/identity limits for true - public-internet usage. - - The next tier up is **asymmetric / signed-claim** auth: - sysadmin holds a private signing key, issues short-lived - claims `{user, project, expiry}` signed with that key, - clients present the signed claim on each RPC, server - verifies with the public key. Benefits: - - - Private key never leaves the sysadmin's machine. - - Claims expire in minutes → revocation is automatic. - - Each claim carries identity cryptographically. - - No per-RPC registry lookup — signature verification - is cheap. - - Reference designs to evaluate: JWT (RS256/ES256/EdDSA), - mTLS client certificates, SPIFFE/SPIRE workload - identities. Decision driver: does ctx ever want to run - as a real public-internet service, or does "trusted - team" always remain the upper bound? - - This is the Story 3 → true multi-tenant upgrade. Not a - near-term priority; captured here so the registry-first - MVP doesn't get confused for a final-state solution. - #added:2026-04-11 #pr:60 - -#### Phase: "dependency-free" claim cleanup (2026-04-11) - -**Context**: The design-invariant list in marketing and -reference docs historically included "dependency-free" -as one of five properties (alongside local-first, -file-based, CLI-driven, developer-controlled). This was -accurate when ctx was a single Go binary with no -external services. PR #60 (hub), the zensical -integration (`ctx serve`), the Claude Code plugin + -MCP, and future networked features make the blanket -claim false. - -**Replacement framing (adopted 2026-04-11)**: -"**single-binary core**". The context persistence path -(`init`, `add`, `agent`, `status`, `drift`, `load`, -`sync`, `compact`, `task`, `decision`, `learning`, and -siblings) remains a single Go binary with no required -runtime dependencies. Optional integrations — `ctx -trace` (needs `git`), `ctx serve` (needs `zensical`), -`ctx` Hub (needs a running hub), Claude Code plugin -(needs `claude`) — are opt-in and each declares its -dependency explicitly. - -This framing is load-bearing: it communicates the -design intent (nothing you don't opt into) without -claiming a literal falsehood. - -- [x] Update `docs/reference/comparison.md` bullet list - from "dependency-free" to "single-binary core" with - an explicit list of optional integrations and their - dependencies. #added:2026-04-11 #done:2026-04-11 -- [x] Update `docs/thesis/index.md:73` (the five-property - claim) from "zero runtime dependencies" to "a - single-binary core with zero required runtime - dependencies for the persistence path". - #added:2026-04-11 #done:2026-04-11 -- [-] `docs/thesis/index.md:412` (the primitive - comparison table saying "Document: Zero-dependency: - Yes"): left intact. The claim is about the document - primitive itself (markdown files have no runtime - deps), not about ctx as an implementation. Accurate. - #added:2026-04-11 #skipped:primitive-claim-is-correct -- [ ] Add a design-invariants reference note: the - blanket claim "dependency-free" MUST NOT be - reintroduced in new docs. Any new framing should use - "single-binary core" or name the specific path - (e.g., "persistence path", "agent packet assembly"). - #priority:medium #added:2026-04-11 -- [ ] Pre-release re-sweep: before each minor release, - grep `docs/`, `README.md`, and any blog drafts for - `dependency-free|dependency free|zero dependencies| - no dependencies` and verify each occurrence is - scoped to a path that is still dependency-free. Add - to the release runbook. #priority:medium - #added:2026-04-11 -- [ ] Update `docs/reference/design-invariants.md` to - explicitly list "single-binary core" as an invariant - with the scope definition, so future doc authors - have a canonical source to reference instead of - re-deriving the phrase. #priority:medium - #added:2026-04-11 - -#### Phase: Hub security audit (2026-04-11) - -**Context**: Full security audit of the hub subsystem, -completed during the PR #60 follow-up brainstorm as a -precondition for any public-internet deployment. 30 -findings total — 5 Critical, 12 High, 7 Medium, 4 Low, 2 -Info — covering transport security, identity, -attribution, DoS surface, Raft cluster integrity, and -storage integrity. - -The audit lives at `specs/hub-security-audit.md` and is -the canonical reference for the rest of the hub security -work. Each finding has a concrete remediation, -complexity estimate, and cross-reference to existing -tasks where applicable. The spec also contains -recommendations grouped by timeline (do-now / short / -medium / long). - -**Per-story verdicts from the audit**: - -- **Story 1** (personal cross-project brain, localhost): - acceptable as-is. No adversary in scope. -- **Story 2** (small trusted team on LAN): acceptable - with documented caveats — LAN private, hub host - hardened, admin token held only by the sysadmin. The - `hub-team.md` recipe already names these. -- **Story 3** (public-internet / multi-user): **UNSAFE**. - Do not deploy. Five critical findings apply, several - high-severity findings compound catastrophically - without transport security, and the Raft cluster is - a remote unauthenticated DoS surface. - -**This phase tracks the findings as actionable work**. -Individual findings are numbered H-01 through H-30 in -the spec; this task list references them by number and -links back to the spec for detail. - -- [ ] Read and internalize - [`specs/hub-security-audit.md`](../specs/hub-security-audit.md) - before starting any hub-security implementation. - The spec is the single source of truth for findings, - severity, and remediation patterns. #priority:high - #added:2026-04-11 #pr:60 - -**Do-now track** (prerequisites for non-localhost deployments): - -- [ ] **H-01** Add server-side TLS: `--tls-cert` and - `--tls-key` flags on `ctx hub start`, wire into - `grpc.NewServer` via `grpc.Creds`. Keep plaintext - default for Story 1. #priority:critical - #added:2026-04-11 #pr:60 #audit:H-01 -- [ ] **H-02** Add client-side TLS: accept `grpc://` - and `grpcs://` schemes in `hub_addr`. Update - `NewClient`, `replicateOnce`, `NewFailoverClient` to - switch credentials per scheme. Optional `--ca-cert` - for self-signed. Update - `docs/recipes/hub-multi-machine.md` to document both - forms (the current nginx-reverse-proxy recommendation - is un-implementable until this ships). #priority:critical - #added:2026-04-11 #pr:60 #audit:H-02 -- [ ] **H-04** Server-enforce `Origin` on publish: - `validateBearer` attaches `ClientInfo` to context; - `handler.go publish()` overwrites `pe.Origin` with - the authenticated `ClientInfo.ProjectName` before - store. Add a test that a client authenticated as - `alpha` cannot publish as `beta`. #priority:high - #added:2026-04-11 #pr:60 #audit:H-04 -- [ ] **H-15** Fix `appendFile` in `internal/hub/persist.go` - to use real `O_APPEND` instead of read-all-rewrite. - Closes both a performance bug (O(N²) publishes) and - a data-loss risk (partial write can truncate history). - #priority:high #added:2026-04-11 #pr:60 #audit:H-15 - -**Short-term track** (Story 2 hardening): - -- [ ] **H-03** Hash `clients.json` tokens with argon2id. - One-shot migration reads old file, hashes each token, - rewrites. Plaintext token only passes through memory - at registration time; disk only stores hashes. - Already referenced in the design-follow-ups section - above; this entry ties it to the audit. #priority:high - #added:2026-04-11 #pr:60 #audit:H-03 -- [ ] **H-08** Per-token Publish rate limiting using - `golang.org/x/time/rate`. Starting target: 10 entries/sec - per token, 100 burst. Return `ResourceExhausted` with - Retry-After hint. #priority:high #added:2026-04-11 #pr:60 - #audit:H-08 -- [ ] **H-09** Per-token Listen stream cap (suggested - limit: 4 concurrent streams per token, 256 total). - Track in the `fanOut` struct; reject further subscribes - with `ResourceExhausted`. #priority:high - #added:2026-04-11 #pr:60 #audit:H-09 -- [ ] **H-17** Cap `PublishRequest.Entries` at 32 per - request; reject larger batches with - `InvalidArgument`. Document the limit. #priority:high - #added:2026-04-11 #pr:60 #audit:H-17 -- [ ] **H-18** Add `audits.jsonl` as a per-RPC audit log - distinct from `entries.jsonl`. Records - `{ts, method, user, project, status, entry_count}` - per call, including authentication failures. Exposed - via `ctx hub status --audit`. Independent rotation - cadence. Already referenced in the identity-layer - phase; this entry ties it to the audit. #priority:high - #added:2026-04-11 #pr:60 #audit:H-18 -- [ ] **H-19** Implement real revocation: `ctx hub users - remove ` edits the registry and signals the hub - to reload via `fsnotify`. Revoked tokens fail - immediately on next RPC. Revocation events logged to - `audits.jsonl`. Merged with the Hub identity layer - phase implementation. #priority:high #added:2026-04-11 - #pr:60 #audit:H-19 -- [x] **H-22 (decide)** Decide `Entry.Author` fate. - **Decided** 2026-04-11: server-authoritative — stamp - from the authenticated identity source, ignore client - input. See `.context/DECISIONS.md` [2026-04-11-180000]. - #added:2026-04-11 #pr:60 #audit:H-22 #done:2026-04-11 -- [ ] **H-22 (implement)** Implement server-authoritative - `Entry.Author`. Identical mechanism to H-04 (Origin - enforcement): `validateBearer` attaches `ClientInfo` - to the gRPC context; `handler.go publish()` reads - `ClientInfo` and stamps `entries[i].Author` from the - server-known identity before calling `store.Append`. - Pre-registry the stamping source is - `ClientInfo.ProjectName`; after the registry MVP the - source becomes `users.json` row's `user_id`; after - the PKI stretch it becomes the signed-claim `sub`. - Same commit as H-04 is fine — they share the - `authFromContext` plumbing. Add a test that a client - authenticated as project `alpha` cannot publish an - entry whose stored `Author` differs from `alpha`. - Audit client-side callers in `ctx connect publish` - and `ctx add --share` for any that populate - `pe.Author` from local config and remove them (or - document them as ignored). #priority:high - #added:2026-04-11 #pr:60 #audit:H-22 -- [x] **H-22 (meta type + wire + validation)** Schema - update landed on the `feature/ctx-hub-next` branch: - - - `EntryMeta` struct defined in - `internal/hub/types.go` with fields `DisplayName`, - `Host`, `Tool`, `Via` (all optional strings). - - `Entry.Author`, `PublishEntry.Author`, - `EntryMsg.Author` all **removed**. Replaced with - `Meta EntryMeta` on each of the three structs. - - `handler.go publish()` copies `pe.Meta` into - `entries[i].Meta` verbatim. - - `message.go entryToMsg()` copies `e.Meta` into the - wire `EntryMsg.Meta`. - - `sync_helper.go replicateOnce()` copies - `msg.Meta` into the replicated `Entry.Meta`. - - `entry_validate.go` enforces: - `maxMetaFieldLen = 256` per field, - `maxMetaTotalLen = 2048` total, no C0 control - characters (newline, carriage return, NUL, DEL, - bell, etc.) except horizontal tab. - - `internal/hub/entry_validate_test.go` added with - six regression tests: empty accepted, round-trip, - field oversize rejected, total at cap accepted, - each control char rejected (nul/lf/cr/bell/del), - tab allowed. - - JSON wire key is `"meta"` on all three structs. - Pre-existing `entries.jsonl` entries with `author` - fields load cleanly (JSON ignores unknown fields) - and silently lose the hint — acceptable on the - feature branch with no production data. - - Still open as follow-up tasks below (H-22 a through - e). #priority:high #added:2026-04-11 #pr:60 - #audit:H-22 #done:2026-04-11 -- [ ] **H-22a (server-authoritative Origin stamping)** - Implement H-04-style server-enforcement for - `Entry.Origin`: `validateBearer` attaches - `ClientInfo` to the gRPC context; - `handler.go publish()` reads `ClientInfo` and - overwrites `entries[i].Origin` with - `ClientInfo.ProjectName` before `store.Append`. - Client's `pe.Origin` becomes advisory and is - ignored. This is the actual security property - the Author→Meta split was enabling — the - schema change made room for it but the - enforcement still needs to land. Add a test: - client authenticated as `alpha` cannot publish - an entry whose stored Origin is `beta`. - #priority:high #added:2026-04-11 #pr:60 #audit:H-22 -- [ ] **H-22b (renderer labels Meta as advisory)** - Update `internal/cli/connect/core/render/` (and any - other place that writes fanned-out entries to - `.context/hub/*.md`) so `Meta`-sourced values are - labeled as "client label" or "client-reported" in - prose. The word "Origin" is reserved for the - server-authoritative project name. Example output: - - ```markdown - ## [2026-04-11] Use UTC timestamps everywhere - **Origin**: alpha (client label: Alice via ctx@0.8.1) - ``` - - Add a test verifying that a Meta.DisplayName of - `"bob"` does NOT cause the rendered output to show - `Origin: bob`. #priority:high #added:2026-04-11 - #pr:60 #audit:H-22 -- [ ] **H-22c (client publish path supports Meta)** - Update `ctx connect publish` (and `ctx add --share` - if it reaches the hub) to accept `--display-name`, - `--host`, `--tool`, `--via` flags (or a single - `--meta key=val` repeatable flag — implementation - choice). Defaults: `--tool=ctx@`, - `--host=`, `--via=` left empty, - `--display-name=` left empty. Document in - `docs/cli/connect.md`. #priority:medium - #added:2026-04-11 #pr:60 #audit:H-22 -- [ ] **H-22d (docs: `Meta` is advisory)** Add a - prominent note to `docs/cli/connect.md`, - `docs/security/hub.md`, and - `docs/recipes/hub-overview.md` explaining that - `Meta` fields are client-reported hints, not - attribution. Cross-reference the decision record - [2026-04-11-180000]. #added:2026-04-11 #pr:60 - #audit:H-22 -- [ ] **H-22e (audit spec update)** Update - `specs/hub-security-audit.md` H-22 finding to - reflect the landed schema change: the "decide" - phase is done, the "meta type" phase is done, the - remaining work is the Origin stamping (a), the - renderer labels (b), and the client-side plumbing - (c). Also note the six regression tests as "partial - coverage" of the finding. #added:2026-04-11 #pr:60 - #audit:H-22 -- [ ] **H-30** gRPC server hardening: `KeepaliveEnforcementPolicy`, - `KeepaliveParams`, `MaxConcurrentStreams`, total - concurrent connection limit at the listener level. - #priority:medium #added:2026-04-11 #pr:60 #audit:H-30 - -**Medium-term track** (correctness + cluster integrity): - -- [ ] **H-12** Deterministic Raft bootstrap: single - `--bootstrap` node calls `BootstrapCluster`, others - join via `AddVoter`. Persist a `bootstrapped` flag - in the raft data dir to avoid double-bootstrapping - on restart. #priority:medium #added:2026-04-11 #pr:60 - #audit:H-12 -- [ ] **H-13** Follower-side replication validation: - call `validateEntry` on every entry received from - master before appending. Defense-in-depth against a - compromised master (which becomes possible under any - Raft transport compromise — see H-10/H-11). - #priority:medium #added:2026-04-11 #pr:60 #audit:H-13 -- [ ] **H-14** Preserve master sequence on replication: - add `masterSequence` field to Entry, followers - remember master-assigned sequences alongside local - ones. Clients cursor by master sequence so failover - doesn't re-replicate the entire log. #priority:medium - #added:2026-04-11 #pr:60 #audit:H-14 -- [ ] **H-24** `ctx hub redact ` subcommand: mark - the entry in `entries_redacted.jsonl`, broadcast a - redaction notice via Listen, filter on queries, log - to `audits.jsonl`. #priority:medium #added:2026-04-11 - #pr:60 #audit:H-24 -- [ ] **H-29** Bounded in-memory entry cache: LRU over - `entries.jsonl` with a persistent offset index - (`entries.idx`). O(log N) seeks without full-file - reads. Secondary: entries.jsonl rotation at threshold. - #priority:medium #added:2026-04-11 #pr:60 #audit:H-29 - -**Long-term track** (Story 3 enablement): - -- [ ] **H-10 + H-11** Authenticated + encrypted Raft - transport. Replace `raft.NewTCPTransport` with a - TLS-wrapped transport using mTLS between cluster - peers. Peer certs issued from a cluster CA managed - by the sysadmin. Precondition for any non-localhost - multi-node deployment. #priority:critical - #added:2026-04-11 #pr:60 #audit:H-10,H-11 -- [ ] **H-28** Decouple Raft bind port from gRPC port. - Accept a dedicated `--raft-bind` flag; default to a - random high port or refuse to start. Makes port - scanning less productive. #priority:low - #added:2026-04-11 #pr:60 #audit:H-28 -- [ ] Signed-entry mode: publishing clients sign their - entries with a per-client signing key; followers - verify on replication. Eliminates the "trust the - master" assumption even if H-10 fails. Merged with - the PKI stretch task in the Hub identity layer - phase. #added:2026-04-11 #pr:60 #audit:H-13 - -**Low-priority polish** (defense-in-depth): - -- [ ] **H-16** Escape / fence `Content` when the - client-side renderer writes to `.context/hub/*.md`. - Wrap every entry in explicit markers - (``) so malicious - triple-dash patterns can't inject fake frontmatter. - #added:2026-04-11 #pr:60 #audit:H-16 -- [ ] **H-20** Strict constant-time token validation: - iterate all `ClientInfo` entries and OR the results - of `subtle.ConstantTimeCompare` instead of a map - lookup followed by a constant-time compare. Rolled - into the H-03 hashing work. #added:2026-04-11 #pr:60 - #audit:H-20 -- [ ] **H-21** Require exact `Bearer ` prefix in the - `authorization` header; reject otherwise with - `Unauthenticated`. Trivial one-line tightening. - #added:2026-04-11 #pr:60 #audit:H-21 -- [ ] **H-23** Offer passphrase-derived admin token - storage (argon2id) instead of plaintext `admin.token` - on disk. Optional; document in - `docs/operations/hub.md`. #added:2026-04-11 #pr:60 - #audit:H-23 -- [ ] **H-25** Collapse auth error messages to a single - generic `Unauthenticated` reason ("authentication - required"). Log the specific reason server-side - only. #added:2026-04-11 #pr:60 #audit:H-25 - -**Informational (no action needed)**: - -- H-26: daemon re-exec flag — already fixed earlier in - this session as part of the `ctx serve --hub` → `ctx - hub start` split. Recorded in the audit for audit- - trail completeness. -- H-27: mTLS / asymmetric auth discussion — covered by - the PKI stretch task in the Hub identity layer - phase. No separate task needed. - -**Out of scope for this audit** (tracked elsewhere): - -- Supply chain (Go module pinning, CVE monitoring, - reproducible builds) -- Build integrity (signed binaries, transparency log) -- Third-party library CVEs (`hashicorp/raft`, `grpc`, - `raft-boltdb`) -- AI-agent misbehavior (accidental secret publishing - via `--share` — covered by the "secret-leak runbook" - task in the PR #60 follow-up section above) -- Per-project read ACLs (still out of scope even after - the identity layer MVP) - -#### Rename "Shared Context Hub" → "`ctx` Hub" (2026-04-11) - -Brainstorm outcome: "shared" was overloaded (shared memory, -shared journal, shared state) and actively primed the wrong -mental model in docs. `ctx` Hub is the canonical name; `Hub` is -used alone in nav and operator contexts where surrounding text -disambiguates. - -- [x] Rename nav entries: Recipes subsection "Shared Context Hub" - → "Hub"; add `docs/home/hub.md` as a home-level intro; split - Operations section into `Hub` / `Operating ctx` / `Maintainers` - subsections. `zensical.toml` updated. #added:2026-04-11 #pr:60 - #done:2026-04-11 -- [x] Create `docs/home/hub.md` — home-level introduction, names - the two user stories, lists what flows vs what does not, - points readers at recipes/overview for the five-minute - walkthrough. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rewrite `docs/operations/index.md` with three audience-keyed - sections (Hub / Operating ctx / Maintainers). Matches the new - nav structure. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename doc files: `docs/recipes/shared-hub-*.md` → - `docs/recipes/hub-*.md`, `docs/operations/shared-hub*.md` → - `docs/operations/hub*.md`, `docs/security/shared-hub.md` → - `docs/security/hub.md`. Updated all internal cross-links. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename spec files: `specs/shared-context-hub.md` → - `specs/context-hub.md`, `specs/shared-hub-federation.md` → - `specs/hub-federation.md`. Updated prose and cross-refs in - remaining spec files (`hub_implementation.md`, - `task-allocation.md`, `hub-federation.md`). #added:2026-04-11 - #pr:60 #done:2026-04-11 -- [x] Rename Go packages: `internal/cli/agent/core/shared` → - `internal/cli/agent/core/hub`; `internal/cli/serve/core/shared` - → `internal/cli/serve/core/hub`. Resolved the package-name - collision with `internal/hub` by aliasing the inner import to - `hublib` in the two files that need both. Updated audit-test - exempt lists (`magic_strings_test.go`, `magic_values_test.go`) - to match. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename Go constants and flag definitions: - `cFlag.Shared` → `cFlag.Hub`, `cFlag.IncludeShared` → - `cFlag.IncludeHub`, `DescKeyServeShared` → `DescKeyServeHub`, - `DescKeyAgentIncludeShared` → `DescKeyAgentIncludeHub`. - YAML keys in `flags.yaml` and `commands.yaml` updated to - match. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename CLI flags: `ctx serve --shared` → `ctx serve --hub`, - `ctx agent --include-shared` → `ctx agent --include-hub`. - `ctx add --share` kept (verb form is still correct). - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename on-disk directory: `.context/shared/` → - `.context/hub/`. Updated constants (`sharedDir` → `hubDir` in - `agent/core/hub/load.go` and `connect/core/render/render.go`), - path literals in `connect/core/sync/state.go`, and all test - fixtures in `connect/core/render/render_test.go`. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Rename `packet.Shared` / `AssembledPacket.Shared` struct - field → `Hub`, with matching json tag. Updated - `assemble.go`, `out.go`, `render.go`, `types.go` in - `internal/cli/agent/core/budget`. Tier 8 comment updated. - #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Fix a bug surfaced by the rename: `internal/cli/serve/core/hub/daemon.go` - was spawning child processes with the stale flag - `ctx serve --shared` — now correctly passes `--hub`. Without - this fix, `ctx serve --hub --daemon` would have failed silently - on the re-exec. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Fix stuttery function name flagged by audit: - `hub.hubDir()` → `hub.defaultDataDir()` in - `internal/cli/serve/core/hub/setup.go`. #added:2026-04-11 - #pr:60 #done:2026-04-11 -- [x] Prose sweep across all hub docs: "Shared Context Hub" → - "`ctx` Hub", "shared hub" → "hub", `--shared` → `--hub`, - `.context/shared/` → `.context/hub/`. Covered - `docs/home/`, `docs/recipes/`, `docs/operations/`, - `docs/security/`, `docs/cli/`. #added:2026-04-11 #pr:60 - #done:2026-04-11 -- [x] Verify all nav targets exist after rename. All sixteen - hub-related paths referenced in `zensical.toml` resolve to - real files. #added:2026-04-11 #pr:60 #done:2026-04-11 -- [x] Full QA gate: `go build ./...` (Linux), - `GOOS=windows go build ./...`, `make lint` (0 issues), - `make test` (0 failures including the audit exempt-list - update and the `gofmt` round-trip on `serve/cmd/root/cmd.go`). - #added:2026-04-11 #pr:60 #done:2026-04-11 - -### Later - -- [ ] Optional follow-up doc.go pass: a handful of tiny per-subcommand wrappers - under internal/cli/*/cmd/* still have ~5-line bodies. Most are - accurate-but-brief; expand only if the brief form proves insufficient in - review. #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased - #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 - -- [ ] Extend internal/audit/stuttery_functions_test.go to cover *ast.GenDecl - (consts, vars, types). Current implementation walks *ast.FuncDecl only and - missed tpl.TplEntryMarkdown (since renamed to HubEntryMarkdown). - #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased - #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 - -- [ ] Decide whether to delete docs/cli/connect.md — verified dead duplicate - of docs/cli/connection.md (uses old ctx connect command name; zero inbound - references; not in zensical.toml). Awaiting explicit user OK before git rm. - #session:4b37e2f6 #branch:feat/copilot-cli-skill-parity-rebased - #commit:edaac81786c9379333b352dae0d55df0ae0f72bb #added:2026-04-14-010311 - -- [-] PROMPT.md design — belongs in another project; skipped here. - #session:4b37e2f6 #added:2026-04-14-010311 #skipped:2026-04-14 +## Blocked diff --git a/.context/steering/product.md b/.context/steering/product.md new file mode 100644 index 000000000..f0f86d1a1 --- /dev/null +++ b/.context/steering/product.md @@ -0,0 +1,50 @@ +--- +name: product +description: Product context, goals, and target users +inclusion: always +priority: 10 +--- + + +# Product Context + +Describe the product, its goals, and target users. + +- **What is this project?** +- **Who uses it?** +- **What problem does it solve?** +- **What is explicitly out of scope?** \ No newline at end of file diff --git a/.context/steering/structure.md b/.context/steering/structure.md new file mode 100644 index 000000000..25a014e5f --- /dev/null +++ b/.context/steering/structure.md @@ -0,0 +1,49 @@ +--- +name: structure +description: Project structure and directory conventions +inclusion: always +priority: 10 +--- + + +# Project Structure + +Describe the project layout and directory conventions. + +- **Top-level directories and their purpose** +- **Where new files should go** (and where they should not) +- **Naming conventions** for files, packages, modules \ No newline at end of file diff --git a/.context/steering/tech.md b/.context/steering/tech.md new file mode 100644 index 000000000..acfe5736a --- /dev/null +++ b/.context/steering/tech.md @@ -0,0 +1,50 @@ +--- +name: tech +description: Technology stack, constraints, and dependencies +inclusion: always +priority: 10 +--- + + +# Technology Stack + +Describe the technology stack, constraints, and key dependencies. + +- **Languages and versions** +- **Frameworks and key libraries** +- **Runtime / deployment target** +- **Hard constraints** (e.g. no CGO, no network at test time) \ No newline at end of file diff --git a/.context/steering/workflow.md b/.context/steering/workflow.md new file mode 100644 index 000000000..4b9b43a4d --- /dev/null +++ b/.context/steering/workflow.md @@ -0,0 +1,50 @@ +--- +name: workflow +description: Development workflow and process rules +inclusion: always +priority: 10 +--- + + +# Development Workflow + +Describe the development workflow, branching strategy, and process rules. + +- **Branch strategy** (main-only, trunk-based, feature branches) +- **Commit conventions** (message format, signed-off-by) +- **Pre-commit / pre-push checks** +- **Review expectations** \ No newline at end of file diff --git a/.ctxrc.dev b/.ctxrc.dev index dc64d9748..dc856af01 100644 --- a/.ctxrc.dev +++ b/.ctxrc.dev @@ -11,7 +11,6 @@ profile: dev # auto_archive: true # archive_after_days: 7 # scratchpad_encrypt: true -# allow_outside_cwd: false # entry_count_learnings: 30 # entry_count_decisions: 20 # convention_line_count: 200 diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 3cf453f91..8c1754315 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -6,7 +6,7 @@ ## Context System This project uses Context (`ctx`) for persistent AI context -management. Your memory is NOT ephemeral — it lives in `.context/` files. +management. Your memory is NOT ephemeral; it lives in `.context/` files. ## On Session Start @@ -50,7 +50,7 @@ After completing meaningful work, save a session summary to Create a file named `YYYY-MM-DD-topic.md`: ```markdown -# Session: YYYY-MM-DD — Brief Topic Description +# Session: YYYY-MM-DD: Brief Topic Description ## What Was Done - Describe completed work items @@ -90,7 +90,7 @@ Periodically ask yourself: > "If this session ended right now, would the next session know what happened?" -If no — save a session file or update context files before continuing. +If no, save a session file or update context files before continuing. ## CLI Commands diff --git a/CLAUDE.md b/CLAUDE.md index 4c43e7967..2a20b1430 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -11,11 +11,15 @@ This project uses Context (`ctx`) for context persistence across sessions. ## On Session Start 1. **Run `ctx system bootstrap`**: CRITICAL, not optional. - This tells you where the context directory is. If it fails or returns - no context_dir, STOP and warn the user. + This tells you where the context directory is. + If it returns any error, relay the error output to the user + verbatim, point them at + https://ctx.ist/recipes/activating-context/ for setup, and STOP. + Do not try to activate, initialize, or otherwise recover: **those + are the user's decisions**. Wait for their next instruction. 2. **Read AGENT_PLAYBOOK.md** from the context directory: it explains how to use this system -3. **Run `ctx agent --budget 4000`** for a content summary +3. **Run `ctx agent`** for a content summary ## When Asked "Do You Remember?" @@ -41,7 +45,7 @@ Read them silently, then present what you found as recall, not as a search. ```bash # Get AI-optimized context packet (what you should know) -ctx agent --budget 4000 +ctx agent # Or see full status ctx status @@ -59,6 +63,26 @@ ctx status All files live in the context directory reported by `ctx system bootstrap`. +## Context Directory Lives at the Project Root + +The project root is the parent of `.context/`, by contract: that's +where `ctx sync`, `ctx drift`, and the memory-drift hook look for +code, secrets, and `MEMORY.md`. + +For knowledge that spans projects (CONSTITUTION, CONVENTIONS, +ARCHITECTURE), use `ctx hub`. + +Recommended layout: + +``` +~/WORKSPACE/my-project + ├── .git + ├── .context + ├── Makefile + └── specs + └── ... +``` + ## Hook Authority Instructions from PreToolUse hooks regarding `.context/` files are ALWAYS diff --git a/CONTRIBUTING-SKILLS.md b/CONTRIBUTING-SKILLS.md index c3f2d69f0..506abb001 100644 --- a/CONTRIBUTING-SKILLS.md +++ b/CONTRIBUTING-SKILLS.md @@ -30,7 +30,7 @@ For ctx plugin skills (`/ctx-status`, `/ctx-history`, etc.), see the ### absorb Extracts a diff between two copies of the same project and applies it as a -patch. The companion to `/ctx-worktree` — worktree splits work apart, absorb +patch. The companion to `/ctx-worktree`: worktree splits work apart, absorb merges it back. Useful when `git push/pull` isn't practical (USB copies, disconnected machines, worktrees without a shared remote). @@ -40,20 +40,12 @@ disconnected machines, worktrees without a shared remote). ### audit Detects code-level drift: predicate naming, magic strings, hardcoded values, -missing godoc. Follows the 3:1 consolidation ratio — run after every ~3 +missing godoc. Follows the 3:1 consolidation ratio; run after every ~3 rapid implementation sessions. - **Use when**: after YOLO sprints, before releases - **Skip when**: mid-feature with intentionally incomplete code -### backup - -Backs up `.context/`, `.claude/`, and global Claude data to a configured SMB -share. Requires `CTX_BACKUP_SMB_URL` environment variable. - -- **Use when**: before risky operations, end of productive sessions -- **Skip when**: SMB is not configured, or no changes since last backup - ### brainstorm Structured design thinking before implementation. Transforms vague ideas into diff --git a/GITNEXUS.md b/GITNEXUS.md index 4f0d2d759..6adeaa310 100644 --- a/GITNEXUS.md +++ b/GITNEXUS.md @@ -1,5 +1,5 @@ -# GitNexus — Code Intelligence +# GitNexus: Code Intelligence This project is indexed by GitNexus as **ctx** (13443 symbols, 67145 relationships, 257 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely. @@ -11,18 +11,18 @@ This project is indexed by GitNexus as **ctx** (13443 symbols, 67145 relationshi - **MUST run `gitnexus_detect_changes()` before committing** to verify your changes only affect expected symbols and execution flows. - **MUST warn the user** if impact analysis returns HIGH or CRITICAL risk before proceeding with edits. - When exploring unfamiliar code, use `gitnexus_query({query: "concept"})` to find execution flows instead of grepping. It returns process-grouped results ranked by relevance. -- When you need full context on a specific symbol — callers, callees, which execution flows it participates in — use `gitnexus_context({name: "symbolName"})`. +- When you need full context on a specific symbol (callers, callees, which execution flows it participates in), use `gitnexus_context({name: "symbolName"})`. ## When Debugging -1. `gitnexus_query({query: ""})` — find execution flows related to the issue -2. `gitnexus_context({name: ""})` — see all callers, callees, and process participation -3. `READ gitnexus://repo/ctx/process/{processName}` — trace the full execution flow step by step -4. For regressions: `gitnexus_detect_changes({scope: "compare", base_ref: "main"})` — see what your branch changed +1. `gitnexus_query({query: ""})`: find execution flows related to the issue +2. `gitnexus_context({name: ""})`: see all callers, callees, and process participation +3. `READ gitnexus://repo/ctx/process/{processName}`: trace the full execution flow step by step +4. For regressions: `gitnexus_detect_changes({scope: "compare", base_ref: "main"})`: see what your branch changed ## When Refactoring -- **Renaming**: MUST use `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` first. Review the preview — graph edits are safe, text_search edits need manual review. Then run with `dry_run: false`. +- **Renaming**: MUST use `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` first. Review the preview: graph edits are safe, text_search edits need manual review. Then run with `dry_run: false`. - **Extracting/Splitting**: MUST run `gitnexus_context({name: "target"})` to see all incoming/outgoing refs, then `gitnexus_impact({target: "target", direction: "upstream"})` to find all external callers before moving code. - After any refactor: run `gitnexus_detect_changes({scope: "all"})` to verify only expected files changed. @@ -30,7 +30,7 @@ This project is indexed by GitNexus as **ctx** (13443 symbols, 67145 relationshi - NEVER edit a function, class, or method without first running `gitnexus_impact` on it. - NEVER ignore HIGH or CRITICAL risk warnings from impact analysis. -- NEVER rename symbols with find-and-replace — use `gitnexus_rename` which understands the call graph. +- NEVER rename symbols with find-and-replace; use `gitnexus_rename` which understands the call graph. - NEVER commit changes without running `gitnexus_detect_changes()` to check affected scope. ## Tools Quick Reference @@ -48,9 +48,9 @@ This project is indexed by GitNexus as **ctx** (13443 symbols, 67145 relationshi | Depth | Meaning | Action | |-------|---------|--------| -| d=1 | WILL BREAK — direct callers/importers | MUST update these | -| d=2 | LIKELY AFFECTED — indirect deps | Should test | -| d=3 | MAY NEED TESTING — transitive | Test if critical path | +| d=1 | WILL BREAK: direct callers/importers | MUST update these | +| d=2 | LIKELY AFFECTED: indirect deps | Should test | +| d=3 | MAY NEED TESTING: transitive | Test if critical path | ## Resources @@ -83,7 +83,7 @@ If the index previously included embeddings, preserve them by adding `--embeddin npx gitnexus analyze --embeddings ``` -To check whether embeddings exist, inspect `.gitnexus/meta.json` — the `stats.embeddings` field shows the count (0 means no embeddings). **Running analyze without `--embeddings` will delete any previously generated embeddings.** +To check whether embeddings exist, inspect `.gitnexus/meta.json`: the `stats.embeddings` field shows the count (0 means no embeddings). **Running analyze without `--embeddings` will delete any previously generated embeddings.** > Claude Code users: A PostToolUse hook handles this automatically after `git commit` and `git merge`. diff --git a/README.md b/README.md index 441836ec1..a25cd601b 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,11 @@ instructions. # Initialize context directory in your project ctx init +# Activate it for the current shell (binds CTX_DIR). Required +# before every other command: ctx no longer walks up the +# filesystem looking for .context/. +eval "$(ctx activate)" + # Check context status ctx status @@ -114,16 +119,33 @@ ctx add decision "Use PostgreSQL for primary database" \ ctx add learning "Mock functions must be hoisted in Jest" ``` +`ctx activate` emits `export CTX_DIR=...` for your shell; one-shot +callers can prefix the binding inline as `CTX_DIR= ctx ...`. +The value must be an absolute path with `.context` as its basename; +relative paths and other names are rejected on first use. A small +allowlist (`init`, `activate`, `deactivate`, `version`, `help`, +`system bootstrap`, `doctor`, `guide`, `why`, `config switch/status`, +`hub *`) runs without CTX_DIR declared; every other command exits +with a next-step hint when it is unset. + ## Documentation +This README is a map, not the territory. The full documentation +lives at **[ctx.ist](https://ctx.ist)** and carries the recipes, +runbooks, threat model, and design rationale that this file +intentionally doesn't try to fit. If you're past install and +wondering "*how do I actually use this in a real session,*" the +recipes are the right next stop. + | Guide | Description | |-------------------------------------------------|----------------------------------------| | [Getting Started](https://ctx.ist) | Installation, quick start, first steps | +| [Recipes](https://ctx.ist/recipes/) | Practical workflow guides | | [CLI Reference](https://ctx.ist/cli-reference/) | All commands and options | | [Context Files](https://ctx.ist/context-files/) | File formats and structure | | [Integrations](https://ctx.ist/integrations/) | Claude Code, Cursor, Aider setup | -| [Recipes](https://ctx.ist/recipes/) | Practical guides and workflows | -| [Security](https://ctx.ist/security/) | Threat model, encryption, permissions | +| [Operations](https://ctx.ist/operations/) | Runbooks, day-to-day, hub deployment | +| [Security](https://ctx.ist/security/) | Trust model, audit trail, permissions | ## Contributing diff --git a/SECURITY.md b/SECURITY.md index 9978f84d2..bc396331c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -72,7 +72,7 @@ This means: ### The `--file` Flag The `ctx add` subcommands accept a `--file` flag that reads content from an -arbitrary file path. **No boundary check is enforced** — any file readable by +arbitrary file path. **No boundary check is enforced**; any file readable by the current user can be supplied. This is by design: `ctx` runs as the local user and does not elevate privileges. diff --git a/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md b/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md index db0eb5735..b84884eca 100644 --- a/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md +++ b/docs/blog/2026-02-15-ctx-v0.3.0-the-discipline-release.md @@ -203,8 +203,8 @@ failures. | Skills with "When NOT to Use" | 0 | 21 | | Average skill body | ~15 lines | ~80 lines | | Hooks using `$CLAUDE_PROJECT_DIR` | 0 | All | -| Documentation commits | -- | 35+ | -| Feature/fix commits | -- | ~15 | +| Documentation commits | n/a | 35+ | +| Feature/fix commits | n/a | ~15 | That ratio (*35+ documentation and quality commits to ~15 feature commits*) is the defining characteristic of this release: diff --git a/docs/cli/backup.md b/docs/cli/backup.md deleted file mode 100644 index 04670a320..000000000 --- a/docs/cli/backup.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -# / ctx: https://ctx.ist -# ,'`./ do you remember? -# `.,'\ -# \ Copyright 2026-present Context contributors. -# SPDX-License-Identifier: Apache-2.0 - -title: Backup -icon: lucide/archive ---- - -![ctx](../images/ctx-banner.png) - -### `ctx backup` - -Create timestamped tar.gz archives of project context and/or global -Claude Code data. Optionally copies archives to an SMB share via GVFS. - -```bash -ctx backup [flags] -``` - -**Flags**: - -| Flag | Description | -|-----------|----------------------------------------------------| -| `--scope` | Backup scope: `project`, `global`, or `all` (default: `all`) | -| `--json` | Output results as JSON | - -**Scopes**: - -| Scope | What's archived | -|-----------|-----------------------------------------------| -| `project` | `.context/`, `.claude/`, `ideas/`, `~/.bashrc` | -| `global` | `~/.claude/` (excludes `todos/`) | -| `all` | Both project and global (default) | - -**Environment**: - -| Variable | Purpose | -|-------------------------|--------------------------------------------------| -| `CTX_BACKUP_SMB_URL` | SMB share URL (e.g. `smb://host/share`) | -| `CTX_BACKUP_SMB_SUBDIR` | Subdirectory on share (default: `ctx-sessions`) | - -**Examples**: - -```bash -ctx backup # Back up everything (default: all) -ctx backup --scope project # Project context only -ctx backup --scope global # Global Claude data only -ctx backup --scope all --json # Both, JSON output -``` diff --git a/docs/cli/bootstrap.md b/docs/cli/bootstrap.md index 00274127f..06ee3b819 100644 --- a/docs/cli/bootstrap.md +++ b/docs/cli/bootstrap.md @@ -41,6 +41,6 @@ ctx system bootstrap -q # Just the context directory path ctx system bootstrap --json # Structured output for automation ``` -**Scripting tip**: `CTX_DIR=$(ctx system bootstrap -q)` is the -canonical way for skills and scripts to find the project's context -directory without hardcoding `.context/`. +**Note**: `-q` prints just the resolved directory path. See +[Activating a Context Directory](../recipes/activating-context.md) +if you hit a "*no context directory specified*" error. diff --git a/docs/cli/index.md b/docs/cli/index.md index 89507128f..19b3e519f 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -19,33 +19,51 @@ Complete reference for all `ctx` commands, grouped by function. All commands support these flags: -| Flag | Description | -|------------------------|-----------------------------------------------------------| -| `--help` | Show command help | -| `--version` | Show version | -| `--context-dir ` | Override context directory (default: `.context/`) | -| `--allow-outside-cwd` | Allow context directory outside current working directory | -| `--tool ` | Override active AI tool identifier (e.g. `kiro`, `cursor`) | - -**Initialization required.** Most commands require a `.context/` directory -created by `ctx init`. Running a command without one produces: - -``` -ctx: not initialized - run "ctx init" first -``` - -Commands that work before initialization: `ctx init`, `ctx setup`, -`ctx doctor`, and grouping commands that only show help. +| Flag | Description | +|-----------------|------------------------------------------------------------| +| `--help` | Show command help | +| `--version` | Show version | +| `--tool ` | Override active AI tool identifier (e.g. `kiro`, `cursor`) | + +**Context declaration required.** ctx does not walk the filesystem +looking for `.context/`. Every non-exempt command requires `CTX_DIR` +to be declared explicitly before it runs. The single declaration +channel is the environment variable: + +- `eval "$(ctx activate)"`: binds `CTX_DIR` for the current shell. +- `CTX_DIR=/abs/path/to/.context` exported in the environment, or + inlined as `CTX_DIR=/abs/path/to/.context ctx ` for a + one-shot. + +`CTX_DIR` must be an absolute path with `.context` as its basename. +Relative paths and other names are rejected on first use; the +basename guard catches the common footgun +(`export CTX_DIR=$(pwd)`) before stray writes can leak to the +project root. + +Commands fail fast with a linkable error +(see [Activating a Context Directory](../recipes/activating-context.md)) +when none is declared. The exempt allowlist (commands that run without +a declared context directory) is: `ctx init`, `ctx activate`, +`ctx deactivate`, `ctx version`, `ctx help`, `ctx system bootstrap`, +`ctx doctor`, `ctx guide`, `ctx why`, `ctx config switch/status`, +`ctx hub *`. + +**Initialization required.** Once declared, the target must already +have been initialized by `ctx init` (otherwise commands return +`ctx: not initialized`). ## Getting Started -| Command | Description | -|-----------------------------------------------|----------------------------------------------------------| -| [`ctx init`](init-status.md#ctx-init) | Initialize `.context/` directory with templates | -| [`ctx status`](init-status.md#ctx-status) | Show context summary (files, tokens, drift) | -| [`ctx guide`](guide.md#ctx-guide) | Quick-reference cheat sheet | -| [`ctx why`](why.md#ctx-why) | Read the philosophy behind `ctx` | +| Command | Description | +|-----------------------------------------------------|----------------------------------------------------------| +| [`ctx init`](init-status.md#ctx-init) | Initialize `.context/` directory with templates | +| [`ctx activate`](init-status.md#ctx-activate) | Emit `export CTX_DIR=...` to bind context for the shell | +| [`ctx deactivate`](init-status.md#ctx-deactivate) | Emit `unset CTX_DIR` to clear the binding | +| [`ctx status`](init-status.md#ctx-status) | Show context summary (files, tokens, drift) | +| [`ctx guide`](guide.md#ctx-guide) | Quick-reference cheat sheet | +| [`ctx why`](why.md#ctx-why) | Read the philosophy behind `ctx` | ## Context @@ -107,7 +125,6 @@ Commands that work before initialization: `ctx init`, `ctx setup`, | Command | Description | |-----------------------------------------------|----------------------------------------------------------| | [`ctx config`](config.md#ctx-config) | Manage runtime configuration profiles | -| [`ctx backup`](backup.md#ctx-backup) | Back up context and Claude data to tar.gz / SMB | | [`ctx prune`](prune.md#ctx-prune) | Clean stale per-session state files | | [`ctx hook`](hook.md#ctx-hook) | Hook message, notification, and lifecycle controls | | [`ctx system`](system.md#ctx-system) | Hook plumbing and agent-only commands (not user-facing) | @@ -136,8 +153,6 @@ Commands that work before initialization: `ctx init`, `ctx setup`, |-------------------------|-----------------------------------------------------| | `CTX_DIR` | Override default context directory path | | `CTX_TOKEN_BUDGET` | Override default token budget | -| `CTX_BACKUP_SMB_URL` | SMB share URL for backups (e.g. `smb://host/share`) | -| `CTX_BACKUP_SMB_SUBDIR` | Subdirectory on SMB share (default: `ctx-sessions`) | | `CTX_SESSION_ID` | Active AI session ID (used by `ctx trace` for context linking) | @@ -147,7 +162,6 @@ Optional `.ctxrc` (*YAML format*) at project root: ```yaml # .ctxrc -context_dir: .context # Context directory name token_budget: 8000 # Default token budget priority_order: # File loading priority - TASKS.md @@ -156,7 +170,6 @@ priority_order: # File loading priority auto_archive: true # Auto-archive old items archive_after_days: 7 # Days before archiving tasks scratchpad_encrypt: true # Encrypt scratchpad (default: true) -allow_outside_cwd: false # Skip boundary check (default: false) event_log: false # Enable local hook event logging companion_check: true # Check companion tools at session start entry_count_learnings: 30 # Drift warning threshold (0 = disable) @@ -193,13 +206,11 @@ hooks: # Hook system configuration | Field | Type | Default | Description | |-------------------------|------------|----------------|----------------------------------------------------------------------------------------------------------------| -| `context_dir` | `string` | `.context` | Context directory name (relative to project root) | | `token_budget` | `int` | `8000` | Default token budget for `ctx agent` | | `priority_order` | `[]string` | *(all files)* | File loading priority for context packets | | `auto_archive` | `bool` | `true` | Auto-archive completed tasks | | `archive_after_days` | `int` | `7` | Days before completed tasks are archived | | `scratchpad_encrypt` | `bool` | `true` | Encrypt scratchpad with AES-256-GCM | -| `allow_outside_cwd` | `bool` | `false` | Skip boundary check for external context dirs | | `event_log` | `bool` | `false` | Enable local hook event logging to `.context/state/events.jsonl` | | `companion_check` | `bool` | `true` | Check companion tool availability (Gemini Search, GitNexus) during `/ctx-remember` | | `entry_count_learnings` | `int` | `30` | Drift warning when `LEARNINGS.md` exceeds this count | diff --git a/docs/cli/init-status.md b/docs/cli/init-status.md index 69ed52951..239903eb9 100644 --- a/docs/cli/init-status.md +++ b/docs/cli/init-status.md @@ -52,6 +52,77 @@ ctx init --force ctx init --merge ``` +After `ctx init` succeeds, the final output includes a hint showing +the exact `eval "$(ctx activate)"` line to bind the new directory +for your shell. Every other `ctx` command requires that binding +(or an equivalent direct `CTX_DIR=/abs/path/.context` export) before +it will run. + +--- + +### `ctx activate` + +Emit a shell-native `export CTX_DIR=...` line for the target +`.context/` directory. `ctx` does not walk the filesystem during +operating commands; every non-exempt command requires `CTX_DIR` +set before it will run. `activate` is the convenience that figures +out the path and lets you bind it with one line. + +```bash +# Walk up from CWD, emit if exactly one candidate visible. +eval "$(ctx activate)" +``` + +**Flags**: + +| Flag | Description | +|-----------|------------------------------------------------------------------------------------------| +| `--shell` | Shell dialect override. POSIX-family (`bash`, `zsh`, `sh`) all share one syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from `$SHELL`. | + +**Resolution**: + +| Candidate count from CWD | Behavior | +|--------------------------|--------------------------------------------------------------------------| +| Zero | Error. Use `ctx init` to create one, or `cd` closer to the project root. | +| One | Emit `export CTX_DIR=` for that candidate. | +| Two or more | Refuse. List every candidate. Re-run from a more specific cwd. | + +`activate` is args-free under the single-source-anchor model; the +explicit-path mode was removed because hub-client / hub-server +scenarios store at `~/.ctx/hub-data/` and never read `.context/`, +so they activate from the project root like everyone else. Direct +binding without a project-local scan is still available via +`export CTX_DIR=/abs/path/.context` or the inline form. + +If the parent shell already has `CTX_DIR` set to a different value, +the output gains a leading `# ctx: replacing stale CTX_DIR=...` +comment so the user sees the change in `eval` output before the +replacement takes effect. + +**See also**: [Activating a Context Directory](../recipes/activating-context.md) +for the full recipe including direnv setup and CI patterns. + +--- + +### `ctx deactivate` + +Emit a shell-native `unset CTX_DIR` line. Pairs with `activate`. + +```bash +eval "$(ctx deactivate)" +``` + +**Flags**: + +| Flag | Description | +|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--shell` | Shell dialect override. POSIX-family (`bash`, `zsh`, `sh`) all share one `unset` syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from `$SHELL`. | + +`deactivate` does not touch the filesystem, doesn't require a +declared context directory, and never fails under normal operation; +unsetting an already-unset variable is a no-op across supported +shells. + --- ### `ctx status` diff --git a/docs/cli/mcp.md b/docs/cli/mcp.md index b2976132c..bb4843d39 100644 --- a/docs/cli/mcp.md +++ b/docs/cli/mcp.md @@ -35,8 +35,9 @@ by MCP clients (Claude Desktop, Cursor, VS Code Copilot), **not run directly from a shell**. See [Configuration](#configuration) below for how each host launches it. -**Flags:** None. The server uses the configured context directory -(from `--context-dir`, `CTX_DIR`, `.ctxrc`, or the default `.context`). +**Flags:** None. The server uses the declared context directory +from `CTX_DIR`. As with every other ctx command, that variable +must be set: the server does not walk the filesystem. **Examples**: @@ -45,7 +46,7 @@ for how each host launches it. ctx mcp serve # Pin a context directory for a specific workspace -ctx --context-dir /path/to/project/.context mcp serve +CTX_DIR=/path/to/project/.context ctx mcp serve # Verify the binary starts without a client attached (Ctrl-C to exit) ctx mcp serve < /dev/null diff --git a/docs/cli/system.md b/docs/cli/system.md index e97ec15de..419200603 100644 --- a/docs/cli/system.md +++ b/docs/cli/system.md @@ -26,7 +26,6 @@ ctx system Several user-facing maintenance commands used to live under `ctx system` and were promoted to top-level: - - `ctx system backup` → **`ctx backup`** - `ctx system events` → **`ctx hook event`** - `ctx system message` → **`ctx hook message`** - `ctx system prune` → **`ctx prune`** @@ -113,7 +112,7 @@ UserPromptSubmit hooks: `context-load-gate`, `check-context-size`, `check-persistence`, `check-ceremony`, `check-journal`, `check-version`, `check-resource`, `check-knowledge`, `check-map-staleness`, `check-memory-drift`, `check-reminder`, `check-freshness`, -`check-hub-sync`, `check-backup-age`, `check-skill-discovery`, +`check-hub-sync`, `check-skill-discovery`, `heartbeat`. PreToolUse hooks: `block-non-path-ctx`, `block-dangerous-command`, diff --git a/docs/home/common-workflows.md b/docs/home/common-workflows.md index 7668611d8..ed0c46bfb 100644 --- a/docs/home/common-workflows.md +++ b/docs/home/common-workflows.md @@ -353,6 +353,7 @@ These have no CLI equivalent. They require the agent's reasoning. | `/ctx-reflect` | Pause and assess session progress | | `/ctx-consolidate` | Merge overlapping learnings or decisions | | `/ctx-prompt-audit` | Analyze prompting patterns for improvement | +| `/ctx-plan` | Stress-test an existing plan through adversarial interview | | `/ctx-plan-import` | Import Claude Code plan files into project specs | | `/ctx-implement` | Execute a plan step-by-step with verification | | `/ctx-worktree` | Manage parallel agent worktrees | @@ -390,7 +391,6 @@ These are infrastructure: used in scripts, CI, or one-time setup. | `ctx site` | Site management commands | | `ctx config` | Manage runtime configuration profiles | | `ctx system` | System diagnostics and hook commands | -| `ctx backup` | Back up context and Claude data to tar.gz / SMB | | `ctx completion` | Generate shell autocompletion scripts | !!! tip "Rule of Thumb" diff --git a/docs/home/configuration.md b/docs/home/configuration.md index 58518c04b..d11b49d87 100644 --- a/docs/home/configuration.md +++ b/docs/home/configuration.md @@ -43,9 +43,12 @@ my-project/ └── src/ ``` -`ctx` looks for `.ctxrc` in the current working directory when any command runs. -There is no global or user-level config file: Configuration is always -per-project. +`ctx` reads `.ctxrc` from the **project root** (*i.e. the parent of +`CTX_DIR`, or `dirname(CTX_DIR)/.ctxrc`*). It does not walk up from CWD. +That means whichever project you've activated via `eval "$(ctx activate)"` +(or by exporting `CTX_DIR` directly), its paired `.ctxrc` is what governs the +invocation. There is no global or user-level config file: configuration is +always per-project. !!! note "Contributors: Dev Configuration Profile" The ctx repo ships two `.ctxrc` source profiles (`.ctxrc.base` and @@ -53,13 +56,14 @@ per-project. via `ctx config switch dev` / `ctx config switch base`. See [Contributing: Configuration Profiles](contributing.md#configuration-profiles). -!!! tip "Using a Different .Context Directory" - The default `.context/` directory can be changed per-project via the - `context_dir` key in `.ctxrc`, the `CTX_DIR` environment variable, or the - `--context-dir` CLI flag. +!!! tip "Using a Different `.context` Directory" + The context directory is declared via the `CTX_DIR` environment variable; + not via `.ctxrc`. `ctx` does not walk the filesystem; every non-exempt + command requires `CTX_DIR` to be set. Use `eval "$(ctx activate)"` to + bind it for your shell. `CTX_DIR` must be an absolute path with + `.context` as its basename. - See [Environment Variables](#environment-variables) - and [CLI Global Flags](#cli-global-flags) below for details. + See [Environment Variables](#environment-variables) below for details. ### Full Reference @@ -73,12 +77,10 @@ A commented `.ctxrc` showing all options and their defaults: # All settings are optional. Missing values use defaults. # Priority: CLI flags > environment variables > .ctxrc > defaults # -# context_dir: .context # token_budget: 8000 # auto_archive: true # archive_after_days: 7 # scratchpad_encrypt: true -# allow_outside_cwd: false # event_log: false # entry_count_learnings: 30 # entry_count_decisions: 20 @@ -130,12 +132,10 @@ A commented `.ctxrc` showing all options and their defaults: | Option | Type | Default | Description | |-------------------------|------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------| -| `context_dir` | `string` | `.context` | Context directory name (relative to project root) | | `token_budget` | `int` | `8000` | Default token budget for `ctx agent` and `ctx load` | | `auto_archive` | `bool` | `true` | Auto-archive completed tasks during `ctx compact` | | `archive_after_days` | `int` | `7` | Days before completed tasks are archived | | `scratchpad_encrypt` | `bool` | `true` | Encrypt scratchpad with AES-256-GCM | -| `allow_outside_cwd` | `bool` | `false` | Allow context directory outside the current working directory | | `event_log` | `bool` | `false` | Enable local hook event logging to `.context/state/events.jsonl` | | `entry_count_learnings` | `int` | `30` | Drift warning when `LEARNINGS.md` exceeds this entry count (0 = disable) | | `entry_count_decisions` | `int` | `20` | Drift warning when `DECISIONS.md` exceeds this entry count (0 = disable) | @@ -180,10 +180,10 @@ behind this ordering. Environment variables override `.ctxrc` values but are overridden by CLI flags. -| Variable | Description | Equivalent `.ctxrc` key | -|--------------------|---------------------------------------------|-------------------------| -| `CTX_DIR` | Override the context directory path | `context_dir` | -| `CTX_TOKEN_BUDGET` | Override the default token budget | `token_budget` | +| Variable | Description | Equivalent `.ctxrc` key | +|--------------------|-------------------------------------------------------------|-------------------------| +| `CTX_DIR` | Declare the context directory path (required, no fallback) | *(none)* | +| `CTX_TOKEN_BUDGET` | Override the default token budget | `token_budget` | ### Examples @@ -203,22 +203,17 @@ CTX_TOKEN_BUDGET=16000 ctx agent CLI flags have the highest priority and override both environment variables and `.ctxrc` settings. These flags are available on every `ctx` command. -| Flag | Description | -|------------------------|-----------------------------------------------------------| -| `--context-dir ` | Override context directory (default: `.context/`) | -| `--allow-outside-cwd` | Allow context directory outside current working directory | -| `--tool ` | Override active AI tool identifier (e.g. `kiro`, `cursor`) | -| `--version` | Show version and exit | -| `--help` | Show command help and exit | +| Flag | Description | +|-----------------|------------------------------------------------------------| +| `--tool ` | Override active AI tool identifier (e.g. `kiro`, `cursor`) | +| `--version` | Show version and exit | +| `--help` | Show command help and exit | ### Examples ```bash -# Point to a different context directory: -ctx status --context-dir /path/to/shared/.context - -# Allow external context directory (skips boundary check): -ctx status --context-dir /mnt/nas/project-context --allow-outside-cwd +# Point to a different context directory inline: +CTX_DIR=/path/to/project/.context ctx status ``` --- @@ -233,20 +228,18 @@ CLI flags > Environment variables > .ctxrc > Built-in defaults (highest) (lowest) ``` -**Example resolution for `context_dir`:** - -| Layer | Value | Wins? | -|--------------------|--------------------|-------| -| `--context-dir` | `/tmp/ctx` | Yes | -| `CTX_DIR` | `/shared/context` | No | -| `.ctxrc` | `.my-context` | No | -| Default | `.context` | No | +The context directory itself is resolved differently: it lives *outside* +this priority chain. `CTX_DIR` (env) must be declared; `.ctxrc` does not +carry a fallback for it, and there is no built-in default. See +[Activating a Context Directory](../recipes/activating-context.md). -The CLI flag `/tmp/ctx` is used because it has the highest priority. +**Example resolution for `token_budget`:** -If the CLI flag were absent, `CTX_DIR=/shared/context` would win. If neither -the flag nor the env var were set, the `.ctxrc` value `.my-context` would be -used. With nothing configured, the default `.context` applies. +| Layer | Value | Wins? | +|--------------------|--------|-------| +| `CTX_TOKEN_BUDGET` | `4000` | Yes | +| `.ctxrc` | `8000` | No | +| Default | `8000` | No | --- @@ -254,14 +247,24 @@ used. With nothing configured, the default `.context` applies. ### External `.context` Directory -Store context outside the project tree (*useful for monorepos or shared context*): +Store a project's context outside the project tree (*useful when a +repo is read-only, or when you want to keep notes adjacent rather +than checked in*). Declare the path via `CTX_DIR`: -```yaml -# .ctxrc -context_dir: /home/team/shared-context -allow_outside_cwd: true +```bash +export CTX_DIR=/home/you/ctx-stores/my-project/.context ``` +!!! warning "One `.context/` per project" + The parent of the context directory is the project root by + contract: `ctx sync`, `ctx drift`, and the memory-drift hook + all read the codebase from `filepath.Dir(ContextDir())`. + Pointing two projects at the same `.context/` directory will + collide their journals, state, and secrets. To share knowledge + (CONSTITUTION / CONVENTIONS / ARCHITECTURE) across projects, + use [`ctx hub`](../recipes/hub-overview.md), not a shared + `.context/`. + ### Custom Token Budget Increase the token budget for projects with large context: diff --git a/docs/home/contributing.md b/docs/home/contributing.md index a68a7926b..aa7609a70 100644 --- a/docs/home/contributing.md +++ b/docs/home/contributing.md @@ -139,7 +139,6 @@ never distributed to users. |------------------------------|---------------------------------------------------------------| | `/_ctx-absorb` | Merge deltas from a parallel worktree or separate checkout | | `/_ctx-audit` | Detect code-level drift after YOLO sprints or before releases | -| `/_ctx-backup` | Backup context and Claude data to SMB share | | `/_ctx-qa` | Run QA checks before committing | | `/_ctx-release` | Run the full release process | | `/_ctx-release-notes` | Generate release notes for `dist/RELEASE_NOTES.md` | @@ -349,17 +348,12 @@ See [Configuration](configuration.md) for the full `.ctxrc` option reference. ### Backups -Back up project context and global Claude Code data with: - -```bash -ctx backup # both project + global (default) -ctx backup --scope project # .context/, .claude/, ideas/ only -ctx backup --scope global # ~/.claude/ only -``` - -Archives are saved to `/tmp/`. When `CTX_BACKUP_SMB_URL` is configured, -they are also copied to an SMB share. See -[CLI Reference: backup](../cli/backup.md) for details. +`ctx` does not ship a backup command. File-level backup is an OS / +infrastructure concern; `ctx hub` handles the cross-machine +knowledge persistence that matters most. For everything else, see +[Backup Strategy](../operations/runbooks/backup-strategy.md): +rsync, Time Machine, Borg, or whichever tool already handles the +rest of your files. ### Running Tests diff --git a/docs/operations/runbooks/architecture-exploration.md b/docs/operations/runbooks/architecture-exploration.md index 5783711d5..ebd8a9bc4 100644 --- a/docs/operations/runbooks/architecture-exploration.md +++ b/docs/operations/runbooks/architecture-exploration.md @@ -78,7 +78,7 @@ Save this as `.arch-explorer/PROMPT.md` and invoke with your agent. The prompt is self-contained: the agent reads the manifest, picks the next unit of work, executes it, updates tracking, and stops. -```` +~~~ You are an autonomous architecture exploration agent. Your job is to systematically build and evolve architecture documentation across all repositories in this workspace using ctx skills. @@ -142,22 +142,38 @@ focus as input upfront. ### Step 3: Do the Work -1. `cd` into the repo directory (`~/WORKSPACE/`) -2. If phase is `bootstrap`: - - Run `ctx init`, confirm `.context/` exists - - Then run `/ctx-architecture` (structural baseline) -3. If phase is `principal` or `frontier-*`: - - Run `/ctx-architecture` (add `principal` argument for principal phase) - - The skill will read existing artifacts and build on them -4. If phase is `enriched`: - - Verify GitNexus is connected: call `mcp__gitnexus__list_repos` - - Success = non-empty list returned with no error +1. `cd` into the sub-repo directory (`~/WORKSPACE/`, NOT + `~/WORKSPACE` itself). +2. Verify `CTX_DIR` already points at THIS sub-repo's `.context/`: + + ```bash + test "$CTX_DIR" = "$PWD/.context" || { + echo "STOP: CTX_DIR=$CTX_DIR but this sub-repo needs $PWD/.context." + echo "Re-launch the agent with CTX_DIR set to the sub-repo:" + echo " cd $PWD && CTX_DIR=\"\$PWD/.context\" claude --print 'Follow .arch-explorer/PROMPT.md' --allowedTools '*'" + exit 1 + } + ``` + + If it fails, STOP. The agent cannot change `CTX_DIR` for itself: + child shells and skill invocations inherit the parent Claude + process environment, which only the caller can control. Do not + proceed, do not run `ctx` commands, do not skip the check. +3. If phase is `bootstrap`: + - Run `ctx init`, confirm `.context/` exists. + - Then run `/ctx-architecture` (structural baseline). +4. If phase is `principal` or `frontier-*`: + - Run `/ctx-architecture` (add `principal` argument for principal phase). + - The skill will read existing artifacts and build on them. +5. If phase is `enriched`: + - Verify GitNexus is connected: call `mcp__gitnexus__list_repos`. + - Success = non-empty list returned with no error. - If GitNexus unavailable, log as `enriched-skipped` and advance - to `frontier-1` - - Run `/ctx-architecture-enrich` -5. If phase is a lens run (`lens-security`, etc.): + to `frontier-1`. + - Run `/ctx-architecture-enrich`. +6. If phase is a lens run (`lens-security`, etc.): - Run `/ctx-architecture` with lens focus prepended as instruction - (see lens table above for exact wording) + (see lens table above for exact wording). ### Step 4: Extract Results @@ -267,17 +283,24 @@ When every repo has reached its stopping condition, print: - portal: 0.87 convergence, 6 runs, 3 lenses ... ``` -```` +~~~ --- ## Invocation +The caller MUST set `CTX_DIR` to the sub-repo the agent will work on. +The agent verifies this at Step 3.2 and stops if it does not match. +The wrapper reads the manifest to pick the current sub-repo, then +launches `claude` with `CTX_DIR` pinned to that sub-repo's `.context/`. + **Single run (safest for quota):** ```bash cd ~/WORKSPACE -claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*' +REPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json) +CTX_DIR="$PWD/$REPO/.context" \ + claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*' ``` **Batch of N runs:** @@ -285,15 +308,18 @@ claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*' ```bash cd ~/WORKSPACE for i in $(seq 1 5); do - claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*' - echo "--- Run $i complete ---" + REPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json) + CTX_DIR="$PWD/$REPO/.context" \ + claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*' + echo "--- Run $i complete (repo: $REPO) ---" done ``` **Resume after interruption:** -Just run again. The manifest tracks state; the agent picks up where -it left off. +Just run the wrapper again. The manifest tracks state; the agent picks +up where it left off. `CTX_DIR` is recomputed from the manifest on +each invocation, so the right sub-repo is always bound. ## Tips @@ -313,3 +339,6 @@ it left off. - 2026-04-07: Original prompt created as `hack/agents/architecture-explorer.md`. - 2026-04-16: Moved to docs as a runbook for discoverability. +- 2026-04-20: Added `CTX_DIR` verification at Step 3.2 and per-invocation + `CTX_DIR` binding in the wrapper, so the agent writes artifacts to the + sub-repo's `.context/` instead of the inherited workspace one. diff --git a/docs/operations/runbooks/backup-strategy.md b/docs/operations/runbooks/backup-strategy.md new file mode 100644 index 000000000..3dbe5adf2 --- /dev/null +++ b/docs/operations/runbooks/backup-strategy.md @@ -0,0 +1,125 @@ +# Backup Strategy + +`ctx backup` was removed. File-level backup is not `ctx`'s +responsibility; your OS or a dedicated backup tool handles it +better and without locking you into a specific mount strategy. + +This runbook explains what to back up, how `ctx hub` reduces the +surface, and what options exist for the rest. + +## What To Back Up + +Per project: + +- `.context/`: all context files, journal, state, scratchpad. +- `.claude/`: Claude Code settings, hooks, skills specific to the + project. Skip this entry when it lives in git; the repo is the + backup. + +Per user: + +- `~/.ctx/`: global config, the encryption key (`~/.ctx/.ctx.key`), + hub data directory (if running a local hub). + +## How Hub Reduces Backup Needs + +`ctx hub` replicates the knowledge surface across machines: + +- `DECISIONS.md` +- `LEARNINGS.md` +- `CONVENTIONS.md` +- `CONSTITUTION.md` +- `ARCHITECTURE.md` +- Task items promoted to hub + +If you run `ctx hub` (as a server or by subscribing to someone +else's), the data that matters most survives losing any single +machine. + +## What Hub Does *Not* Replicate + +Hub is not a file-level backup. The following still live only on +the machine that produced them: + +- Journal entries (`.context/journal/*.md`) +- Runtime state (`.context/state/*`) +- Session event log (`.context/events.jsonl`) +- Scratchpad (`.context/.pad`) +- Encrypted notify/webhook config (`.context/.notify.enc`) +- The encryption key itself (`~/.ctx/.ctx.key`) + +If you need those to survive a disk failure, use a file-level +backup. + +## Example Strategies + +### 1. cron + rsync to NAS or External Drive + +```cron +# Daily at 03:00, mirror ~/WORKSPACE and ~/.ctx to NAS +0 3 * * * rsync -a --delete \ + --exclude='node_modules' \ + --exclude='dist' \ + --exclude='.context/state' \ + ~/WORKSPACE/ /mnt/nas/backup/workspace/ +0 3 * * * rsync -a --delete ~/.ctx/ /mnt/nas/backup/ctx-global/ +``` + +Adjust excludes for the trash you don't want to back up. The +`.context/state/` dir is ephemeral per-session; skip it. + +### 2. cron + cp to a Cloud-Synced Directory + +iCloud Drive, Dropbox, or any directory watched by a sync client: + +```cron +0 3 * * * cp -a ~/WORKSPACE/some-project/.context \ + ~/CloudDrive/ctx-backups/some-project/$(date +\%Y-\%m-\%d) +``` + +Daily snapshots, cloud provider handles the replication. + +### 3. Time Machine (macOS) + +If you already run Time Machine, ensure `~/WORKSPACE` and `~/.ctx` +are not in its exclusion list. Time Machine handles versioning; +you get point-in-time recovery for free. + +### 4. Borg or restic for Versioned Backups + +For deduplicated, versioned, encrypted backups: + +```bash +# Borg init (once) +borg init --encryption=repokey /mnt/nas/borg-ctx + +# Daily backup +borg create /mnt/nas/borg-ctx::'ctx-{now}' \ + ~/WORKSPACE ~/.ctx \ + --exclude '*/node_modules' \ + --exclude '*/.context/state' +``` + +Use `restic` if you prefer S3-compatible targets. + +## When You Still Need File-Level Backup Even With Hub + +- **Journal**: session histories are local-only until exported. +- **Scratchpad**: private notes, encrypted locally. +- **Encryption key**: losing `~/.ctx/.ctx.key` means losing access + to every encrypted file in every project. +- **Non-hub projects**: projects that never called `ctx hub + register` have zero cross-machine persistence. + +For these, pick one strategy above and forget about it. + +## Why ctx No Longer Ships a Backup Command + +Backup is inherently environment-specific: SMB, NFS, S3, rsync, +Time Machine, Borg, restic. Every user has a different story. The +previous `ctx backup` picked SMB via GVFS, which was Linux-only and +narrow. Chasing mount strategies would never generalize. + +Hub is the right answer for the data `ctx` owns (knowledge). For +everything else, your OS or a dedicated backup tool is the right +layer. diff --git a/docs/recipes/activating-context.md b/docs/recipes/activating-context.md new file mode 100644 index 000000000..55bff28a8 --- /dev/null +++ b/docs/recipes/activating-context.md @@ -0,0 +1,216 @@ +--- +title: "Activating a Context Directory" +icon: lucide/plug-zap +--- + +![ctx](../images/ctx-banner.png) + +## The Problem + +You ran a `ctx` command and got: + +``` +Error: no context directory specified for this project +``` + +This means ctx doesn't know which `.context/` directory to operate +on. It will not guess, and it will not walk up from your current +working directory looking for one; that behavior was removed +deliberately, because silent inference was the source of several +bugs (stray agent-created directories, cross-project bleed-through, +webhook-route misrouting, sub-agent fragmentation). Every `ctx` +command requires you to declare the target directory explicitly. + +This page shows you the three ways to do that and when to use each. + +## TL;DR + +If the project has already been initialized and you just need to +bind it for your shell: + +```bash +eval "$(ctx activate)" +``` + +That's 95% of the time. Add it to `.zshrc` / `.bashrc` per project +with direnv, or run it once per terminal. + +## When You See the Error + +The exact error message depends on how many `.context/` directories +are visible from the current directory: + +### Zero Candidates + +``` +Error: no context directory specified for this project +``` + +Either you haven't initialized this project yet (run `ctx init`) +or you're in a directory that doesn't belong to a ctx-tracked +project. If you know the project lives elsewhere, use one of the +declaration methods below with its absolute path. + +### One Candidate + +``` +Error: no context directory specified; a likely candidate is at + /Users/you/repos/myproject/.context +``` + +ctx found a single `.context/` on the way up from here but won't +bind to it automatically. Run `eval "$(ctx activate)"` and ctx +will emit the `export` for the candidate. Or set `CTX_DIR` by hand. + +### Multiple Candidates + +``` +Error: no context directory specified; multiple candidates visible: + /Users/you/repos/myproject/.context + /Users/you/repos/myproject/packages/web/.context +``` + +You're inside nested projects. Pick the one you mean: + +```bash +ctx activate /Users/you/repos/myproject/.context +# …copy and paste the `export` line it prints, or wrap in eval: +eval "$(ctx activate /Users/you/repos/myproject/.context)" +``` + +## Three Ways to Declare + +### 1. `ctx activate` (Recommended for Shells) + +`ctx activate` emits a shell-native `export CTX_DIR=...` line to +stdout. Wrap it in `eval` and the binding takes effect for the +current shell: + +```bash +# Walk up from current dir and bind the single visible candidate: +eval "$(ctx activate)" + +# Bind a specific path explicitly: +eval "$(ctx activate /abs/path/to/.context)" + +# Clear the binding: +eval "$(ctx deactivate)" +``` + +`ctx activate` validates paths strictly: the target must exist, be +a directory, and contain at least one canonical context file +(`CONSTITUTION.md` or `TASKS.md`). It refuses to emit for multiple +upward candidates; pick one explicitly in that case. + +Under the hood, the emitted line is just: + +```bash +export CTX_DIR='/abs/path/to/.context' +``` + +So you can copy it into your `.zshrc` / `.bashrc` if you want the +binding permanent for a given shell setup. Better: use +[direnv](https://direnv.net/) with a per-project `.envrc`. + +### 2. `CTX_DIR` Env Var + +If you already know the path, export it directly: + +```bash +export CTX_DIR=/abs/path/to/.context +ctx status +``` + +`CTX_DIR` is the same variable `ctx activate` writes; `activate` +is just a convenience that figures out the path for you. + +### 3. Inline One-Shot + +For one-shot commands (CI jobs, scripts, debugging a specific +project without changing your shell state), prefix the binding +inline: + +```bash +CTX_DIR=/abs/path/to/.context ctx status +``` + +This binds `CTX_DIR` for that invocation only. + +`CTX_DIR` must be an absolute path with `.context` as its basename. +Relative paths and other names are rejected on first use; the +basename guard catches the common footgun +(`export CTX_DIR=$(pwd)`) before stray writes can leak to the +project root. + +## For CI and Scripts + +Do not rely on shell activation in automated flows. Set `CTX_DIR` +explicitly at the top of the script: + +```bash +#!/usr/bin/env bash +set -euo pipefail + +export CTX_DIR="$GITHUB_WORKSPACE/.context" +ctx status +ctx drift +``` + +## For Claude Code Users + +The ctx plugin's hooks are generated with +`CTX_DIR="$CLAUDE_PROJECT_DIR/.context"` prefixed to each command, +so hook-driven ctx invocations resolve correctly without any +per-session setup. You only need to activate manually when running +`ctx` yourself in a terminal. + +## One Project, One `.context/` + +The context directory is not a free-floating bag of files. It is +pinned to a project by contract: **`filepath.Dir(ContextDir())` is +the project root.** That parent directory is what `ctx sync`, +`ctx drift`, and the memory-drift hook scan for code, secret files, +and `MEMORY.md` respectively. + +The practical consequences: + +- **Don't share one `.context/` across multiple projects.** It holds + per-project journals, per-session state, and per-project secrets. + Pointing two codebases at the same directory corrupts all three. +- **If you want to share knowledge** (CONSTITUTION, CONVENTIONS, + ARCHITECTURE) across projects, use `ctx hub`. It cherry-picks + entries at the right granularity and keeps the per-project bits + where they belong. +- **The `CTX_DIR` you activate is implicitly a project-root + declaration.** Setting `CTX_DIR=/weird/place/.context` means + you're telling ctx the project root is `/weird/place/`. That's + your call to make; ctx does not police it. + +### Recommended Layout + +``` +~/WORKSPACE/my-to-do-list + ├── .git + ├── .context ← owned by this project; do not share + ├── ideas + │ └── ... + ├── Makefile + ├── Makefile.ctx + └── specs + └── ... +``` + +`.context/` sits at the project root, next to `.git`. `ctx activate` +binds to it; every ctx subsystem reads the project from its parent. + +## Why Not Walk Up Automatically? + +Nested projects, submodules, rogue agent-created `.context/` +directories, and sub-agent sessions all produced silent misrouting +under the old walk-up model. See the +[explicit-context-dir spec](https://github.com/ActiveMemory/ctx/blob/main/specs/explicit-context-dir.md) +and [the analysis doc](https://github.com/ActiveMemory/ctx/blob/main/specs/context-resolution-analysis.md) +for the full reasoning. + +The short version: ctx decided to stop guessing and require the +caller to declare. Every other decision flows from there. diff --git a/docs/recipes/customizing-hook-messages.md b/docs/recipes/customizing-hook-messages.md index 7109bc418..f708f14a0 100644 --- a/docs/recipes/customizing-hook-messages.md +++ b/docs/recipes/customizing-hook-messages.md @@ -183,7 +183,6 @@ from customization. These are the primary targets for override. | Hook | Variant | Description | |---------------------|------------|------------------------------------------| -| check-backup-age | warning | Backup staleness warning | | check-freshness | stale | Technology constant freshness warning | | check-ceremonies | both | Both ceremonies missing | | check-ceremonies | remember | Start-of-session ceremony | @@ -225,7 +224,6 @@ them, but `edit` will warn you first. | Hook | Variant | Variables | |--------------------------|------------------------|------------------------------------------------| -| check-backup-age | warning | `{{.Warnings}}` | | check-freshness | stale | `{{.StaleFiles}}` | | check-context-size | checkpoint | *(none)* | | check-context-size | oversize | `{{.TokenCount}}` | diff --git a/docs/recipes/external-context.md b/docs/recipes/external-context.md index dacb877f6..9a5bf7f23 100644 --- a/docs/recipes/external-context.md +++ b/docs/recipes/external-context.md @@ -7,8 +7,8 @@ icon: lucide/folder-symlink ## The Problem -`ctx` files contain project-specific **decisions**, **learnings**, -**conventions**, and **tasks**. By default, they live in +`ctx` files contain project-specific **decisions**, **learnings**, +**conventions**, and **tasks**. By default, they live in `.context/` inside the project tree, and that works well when the context can be public. @@ -19,42 +19,95 @@ But sometimes you need the context *outside* the project: repo. * **Compliance or IP concerns**: Context files reference sensitive design rationale that belongs in a separate access-controlled repository. -* **Personal preference**: You want a single context repo that covers - multiple projects, or you just prefer keeping notes separate from code. +* **Personal preference**: You want to keep notes separate from code. -`ctx` supports this through three configuration methods. This recipe shows how -to set them up and how to tell your AI assistant where to find the context. +`ctx` supports this by letting you point `CTX_DIR` anywhere. This recipe +shows how to set that up and how to tell your AI assistant where to find the +context. + +!!! warning "One `.context/` per project" + The parent of the context directory is the project root by contract. + `ctx sync`, `ctx drift`, and the memory-drift hook all read the + codebase at `filepath.Dir(ContextDir())`. Pointing two projects at + the same directory corrupts their journals, state, and secrets. To + share knowledge (CONSTITUTION / CONVENTIONS / ARCHITECTURE) across + projects, use [`ctx hub`](hub-overview.md), not a shared `.context/`. ## TL;DR -First `--allow-outside-cwd` in your project: +Create the external context directory, initialize it, and bind it: ```bash -mkdir ~/repos/myproject-context && cd ~/repos/myproject-context && git init +mkdir -p ~/repos/myproject-context && cd ~/repos/myproject-context && git init cd ~/repos/myproject -ctx --context-dir ~/repos/myproject-context --allow-outside-cwd init + +# Bind CTX_DIR to the external location, then init creates files there. +export CTX_DIR=~/repos/myproject-context/.context +ctx init ``` -Then, [create a `.ctxrc`](../home/configuration.md) in your **project root** -to specify the new `.context` folder location: +All `ctx` commands now use the external directory. If you share the +setup across shells, add the `export CTX_DIR=...` line to your +shell rc, or source a per-project `.envrc` with direnv. -```yaml -context_dir: ~/repos/myproject-context -allow_outside_cwd: true +## What Works, What Quietly Degrades + +The single-source-anchor contract states that +`filepath.Dir(CTX_DIR)` is the project root. When the context +lives outside the project tree, ctx still resolves correctly for +every operation that reads or writes inside `.context/`. But any +operation that scans the **codebase** scans the wrong tree, and +does so silently: + +| Operation | Behavior with external `.context/` | +|---------------------------------|---------------------------------------------------| +| `ctx status`, `agent`, `add` | ✅ Works. Operates on files inside `CTX_DIR`. | +| Journal, scratchpad, hub | ✅ Works. Same reason. | +| `ctx sync` | ⚠️ Scans the *context repo*, not the code repo. | +| `ctx drift` | ⚠️ Same. Reports nothing useful. | +| Memory-drift hook (`MEMORY.md`) | ⚠️ Looks for `MEMORY.md` next to the external `.context/`, not the code. | + +Nothing errors. The code-aware operations just find an empty or +unrelated tree where the project root should be. + +### Workaround: symlink the `.context/` into the code tree + +If you want both the privacy of an external git repo *and* working +`ctx sync` / `drift` / memory-drift, symlink the external +`.context/` into the code repo and point `CTX_DIR` at the symlink: + +```bash +# External repo holds the real files +mkdir -p ~/repos/myproject-context && cd ~/repos/myproject-context && git init + +# Symlink it into the code repo +ln -s ~/repos/myproject-context/.context ~/repos/myproject/.context + +# Bind CTX_DIR to the symlink path; ctx init will follow it +export CTX_DIR=~/repos/myproject/.context +ctx init ``` -All `ctx` commands now use the external directory automatically. +Now `filepath.Dir(CTX_DIR)` is the **code repo**, so code-aware +operations scan the right tree. The actual files still live in +the external repo and commit there. Add `.context` to the code +repo's `.gitignore` (or `.git/info/exclude`) so the symlink itself +isn't tracked by the code repo. + +The basename guard is permissive about symlinks: it checks the +declared name, not the resolved target, so a `.context` symlink +pointing anywhere is accepted as long as the declared basename is +`.context`. ## Commands and Skills Used -| Tool | Type | Purpose | -|-----------------------|--------------|-----------------------------------------| -| `ctx init` | CLI command | Initialize context directory | -| `--context-dir` | Global flag | Point ctx at a non-default directory | -| `--allow-outside-cwd` | Global flag | Permit context outside the project root | -| `.ctxrc` | Config file | Persist the context directory setting | -| `CTX_DIR` | Env variable | Override context directory per-session | -| `/ctx-status` | Skill | Verify context is loading correctly | +| Tool | Type | Purpose | +|-----------------|--------------|-----------------------------------------| +| `ctx init` | CLI command | Initialize context directory | +| `ctx activate` | CLI command | Emit `export CTX_DIR=...` for the shell | +| `CTX_DIR` | Env variable | Declare context directory per-session | +| `.ctxrc` | Config file | Per-project configuration | +| `/ctx-status` | Skill | Verify context is loading correctly | ## The Workflow @@ -65,100 +118,79 @@ a private GitHub repo, a shared drive, a sibling directory: ```bash # Create the context repo -mkdir ~/repos/myproject-context +mkdir -p ~/repos/myproject-context cd ~/repos/myproject-context git init ``` ### Step 2: Initialize ctx Pointing at It -From your project root, initialize ctx with `--context-dir` pointing to the -external location. Because the directory is outside your project tree, you also -need `--allow-outside-cwd`: +From your project root, declare `CTX_DIR` pointing to the external +location, then initialize: ```bash cd ~/repos/myproject -ctx --context-dir ~/repos/myproject-context \ - --allow-outside-cwd \ - init +CTX_DIR=~/repos/myproject-context/.context ctx init ``` -This creates the full `.context/`-style file set inside +This creates the canonical `.context/` file set inside `~/repos/myproject-context/` instead of `~/repos/myproject/.context/`. -!!! warning "Boundary Validation" - `ctx` validates that the `.context` directory is within the current working - directory. - - If your external directory is truly outside the project root: - - * Either every `ctx` command needs `--allow-outside-cwd`, - * or you can persist the setting in `.ctxrc` (*next step*). - ### Step 3: Make It Stick -Typing `--context-dir` and `--allow-outside-cwd` on every command is tedious. -Pick one of these methods to make the configuration permanent. - -#### Option A: `.ctxrc` (*Recommended*) - -Create a `.ctxrc` file in your project root: +Declaring `CTX_DIR` on every command is tedious. Pick one of these +methods to make the configuration permanent. The context directory +itself must be declared via `CTX_DIR`; `.ctxrc` does not carry the +path. -```yaml -# .ctxrc: committed to the project repo -context_dir: ~/repos/myproject-context -allow_outside_cwd: true -``` - -ctx reads `.ctxrc` automatically. Every command now uses the external -directory without extra flags: +#### Option A: `CTX_DIR` Environment Variable (*Recommended*) ```bash -ctx status # reads from ~/repos/myproject-context -ctx add learning "Redis MULTI doesn't roll back on error" \ - --session-id abc12345 --branch main --commit 68fbc00a -``` - -!!! tip "Commit `.ctxrc`" - `.ctxrc` belongs in the project repo. It contains no secrets: It's just a - path and a boundary override. +# Direct path. Works for ctx status / agent / add but degrades +# code-aware operations. See "What Works, What Quietly Degrades". +export CTX_DIR=~/repos/myproject-context/.context - `.ctxrc` lets teammates share the same configuration. - -#### Option B: `CTX_DIR` Environment Variable - -Good for CI pipelines, temporary overrides, or when you don't want to commit -a `.ctxrc`: - -```bash -# In your shell profile (~/.bashrc, ~/.zshrc) -export CTX_DIR=~/repos/myproject-context +# Or, with the symlink approach above, point at the symlink path +# inside the code repo so code-aware operations stay healthy. +export CTX_DIR=~/repos/myproject/.context ``` -Or for a single session: +Put either form in your shell profile (`~/.bashrc`, `~/.zshrc`) +or a direnv `.envrc`. -```bash -CTX_DIR=~/repos/myproject-context ctx status -``` +For a single session, run `eval "$(ctx activate)"` from any +directory inside the project where exactly one `.context/` +candidate is visible (the symlink counts). `activate` does not +accept a path argument; bind a specific path by exporting +`CTX_DIR` directly instead. -#### Option C: Shell Alias +#### Option B: `.ctxrc` for Other Settings -If you prefer a shell alias over `.ctxrc`: +Put any settings (token budget, priority order, freshness files) in a +`.ctxrc` at the project root (`dirname(CTX_DIR)`), which here is the +parent of the external `.context/`: -```bash -# ~/.bashrc or ~/.zshrc -alias ctx='ctx --context-dir ~/repos/myproject-context --allow-outside-cwd' +```yaml +# ~/repos/myproject-context/.ctxrc +token_budget: 16000 ``` -#### Priority Order +`.ctxrc` is always read from the parent of `CTX_DIR`, so this file is +picked up whenever `CTX_DIR` points at +`~/repos/myproject-context/.context`. + +#### Resolution -When multiple methods are set, `ctx` resolves the context directory in this -order (*highest priority first*): +`ctx` reads the context directory from a single channel: the +`CTX_DIR` environment variable. When `CTX_DIR` is unset, `ctx` +errors with a "no context directory specified" hint pointing at +`ctx activate` and this recipe. When set, the value must be an +absolute path with `.context` as its basename; relative paths and +other names are rejected on first use. -1. `--context-dir` flag -2. `CTX_DIR` environment variable -3. `context_dir` in `.ctxrc` -4. Default: `.context/` +See +[Activating a Context Directory](activating-context.md) for the full +recipe. ### Step 4: Agent Auto-Discovery via Bootstrap @@ -171,40 +203,40 @@ $ ctx system bootstrap ctx system bootstrap ==================== -context_dir: /home/user/repos/myproject-context +context_dir: /home/user/repos/myproject-context/.context Files: CONSTITUTION.md, TASKS.md, DECISIONS.md, ... ``` The `CLAUDE.md` template generated by `ctx init` already instructs the agent to -run `ctx system bootstrap` at session start. Because `.ctxrc` is in the -project root, your agent inherits the external path automatically via -the `ctx system bootstrap` call instruction. +run `ctx system bootstrap` at session start. Because `CTX_DIR` is inherited +by child processes, your agent picks up the external path automatically. Here is the relevant section from `CLAUDE.md` for reference: ```markdown 1. **Run `ctx system bootstrap`**: CRITICAL, not optional. - This tells you where the context directory is. If it fails or returns - no context_dir, STOP and warn the user. + This tells you where the context directory is. If it returns any + error, relay the error output to the user verbatim, point them at + https://ctx.ist/recipes/activating-context/ for setup, and STOP. + Do not try to recover; the user decides. ``` -Moreover, every nudge (*context checkpoint, persistence reminder, etc.*) also -includes a `Context: /home/user/repos/myproject-context` footer, so the agent -remains anchored to the correct directory even in long sessions. +Moreover, every nudge (*context checkpoint, persistence reminder, etc.*) also +includes a `Context: /home/user/repos/myproject-context/.context` footer, so +the agent remains anchored to the correct directory even in long sessions. -If you use `CTX_DIR` instead of `.ctxrc`, export it in your shell -profile so the hook process inherits it: +Export `CTX_DIR` in your shell profile so every hook process inherits it: ```bash -export CTX_DIR=~/repos/myproject-context +export CTX_DIR=~/repos/myproject-context/.context ``` ### Step 5: Share with Teammates -Teammates clone both repos and set up `.ctxrc`: +Teammates clone both repos and export `CTX_DIR`: ```bash # Clone the project @@ -213,16 +245,10 @@ cd myproject # Clone the private context repo git clone git@github.com:org/myproject-context.git ~/repos/myproject-context +export CTX_DIR=~/repos/myproject-context/.context ``` -If `.ctxrc` is already committed to the project, they're done: `ctx` -commands will find the external context automatically. - -If teammates use different paths, each developer sets their own `CTX_DIR`: - -```bash -export CTX_DIR=~/my-own-path/myproject-context -``` +If teammates use different paths, each developer sets their own `CTX_DIR`. For encryption key distribution across the team, see the [Syncing Scratchpad Notes](scratchpad-sync.md) recipe. @@ -230,7 +256,7 @@ For encryption key distribution across the team, see the ### Step 6: Day-to-Day Sync The external context repo has its own git history. Treat it like any other -repo: Commit and push after sessions: +repo: commit and push after sessions: ```bash cd ~/repos/myproject-context @@ -263,9 +289,9 @@ You don't need to remember the flags; simply ask your assistant: ```text You: "Set up ctx to use ~/repos/myproject-context as the context directory." -Agent: "I'll create a .ctxrc in the project root pointing to that path. - I'll also update CLAUDE.md so future sessions know where to find - context. Want me to initialize the context files there too?" +Agent: "I'll set CTX_DIR to that path, run ctx init to materialize + it, and show you the export line to add to your shell + profile. Want me to seed the core context files too?" ``` ### Configure Separate Repo for `.context` Folder Using Natural Language @@ -273,7 +299,7 @@ Agent: "I'll create a .ctxrc in the project root pointing to that path. ```text You: "My context is in a separate repo. Can you load it?" -Agent: [reads .ctxrc, finds the path, loads context from the external dir] +Agent: [reads CTX_DIR, loads context from the external dir] "Loaded. You have 3 pending tasks, last session was about the auth refactor." ``` @@ -286,12 +312,10 @@ Agent: [reads .ctxrc, finds the path, loads context from the external dir] The default `.context/` in-tree is the easiest path. Move to an external repo when you have a concrete reason. * **One context repo per project**. Sharing a single context directory across - multiple projects creates confusion. Keep the mapping 1:1. -* **Use `.ctxrc` over env vars** when the path is stable. It's committed, - documented, and works for the whole team without per-developer shell setup. -* **Don't forget the boundary flag**. The most common error is - `Error: context directory is outside the project root`. Set - `allow_outside_cwd: true` in `.ctxrc` or pass `--allow-outside-cwd`. + multiple projects corrupts journals, state, and secrets. Use `ctx hub` for + cross-project knowledge sharing. +* **Export `CTX_DIR` in your shell profile** so hooks and tools inherit the + path without per-command flags. * **Commit both repos at session boundaries**. Context without code history (*or code without context history*) loses half the value. @@ -307,5 +331,4 @@ full ctx session from start to finish. * [Setting Up ctx Across AI Tools](multi-tool-setup.md): initial setup recipe * [Syncing Scratchpad Notes Across Machines](scratchpad-sync.md): distribute encryption keys when context is shared -* [CLI Reference](../cli/index.md): all global flags including - `--context-dir` and `--allow-outside-cwd` +* [CLI Reference](../cli/index.md): full command list and global options diff --git a/docs/recipes/hook-output-patterns.md b/docs/recipes/hook-output-patterns.md index 01a1ab621..9431c1372 100644 --- a/docs/recipes/hook-output-patterns.md +++ b/docs/recipes/hook-output-patterns.md @@ -99,7 +99,6 @@ what they asked: Stale backups, unimported sessions, resource warnings. * `ctx system check-context-size`: Context capacity warning * `ctx system check-resources`: Resource pressure (memory, swap, disk, load): `DANGER` only * `ctx system check-freshness`: Technology constant staleness warning -* `check-backup-age.sh`: Stale backup warning (*project-local*) **Trade-off**: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or diff --git a/docs/recipes/hook-sequence-diagrams.md b/docs/recipes/hook-sequence-diagrams.md index 6717e0a67..2dde1896a 100644 --- a/docs/recipes/hook-sequence-diagrams.md +++ b/docs/recipes/hook-sequence-diagrams.md @@ -770,40 +770,6 @@ sequenceDiagram Hook->>Hook: NudgeAndRelay(message) ``` -### Check-Backup-Age - -Lifecycle: UserPromptSubmit. - -Daily check for SMB mount and backup freshness. - -```mermaid -sequenceDiagram - participant CC as Claude Code - participant Hook as check-backup-age - participant State as .context/state/ - participant FS as Filesystem - participant Tpl as Message Template - - CC->>Hook: stdin {session_id} - Hook->>Hook: Check initialized + HookPreamble - alt not initialized or paused - Hook-->>CC: (silent exit) - end - Hook->>State: Check daily throttle marker - alt throttled - Hook-->>CC: (silent exit) - end - Hook->>FS: Check SMB mount (if env var set) - Hook->>FS: Check backup marker file age - alt no warnings - Hook-->>CC: (silent exit) - end - Hook->>Tpl: LoadMessage(hook, warning, {Warnings}) - Hook-->>CC: Nudge box (warnings) - Hook->>Hook: NudgeAndRelay(message) - Hook->>State: Touch throttle marker -``` - --- ## Throttling Summary @@ -829,7 +795,6 @@ sequenceDiagram | check-version | UserPromptSubmit | Daily marker | Once per day | | heartbeat | UserPromptSubmit | None | Every prompt | | block-dangerous-commands | PreToolUse * | None | Every match | -| check-backup-age | UserPromptSubmit * | Daily marker | Once per day | \* Project-local hook (settings.local.json), not shipped with ctx. @@ -843,7 +808,6 @@ All state files live in `.context/state/`. | `ctx-paused-{session}` | (all) | Session pause marker | | `ctx-wrapped-up` | check-context-size | Suppress nudges after wrap-up (2h expiry) | | `freshness-checked` | check-freshness | Daily throttle | -| `backup-reminded` | check-backup-age | Daily throttle | | `ceremony-reminded` | check-ceremonies | Daily throttle | | `journal-reminded` | check-journal | Daily throttle | | `knowledge-reminded` | check-knowledge | Daily throttle | diff --git a/docs/recipes/index.md b/docs/recipes/index.md index b9d3229ae..791318b95 100644 --- a/docs/recipes/index.md +++ b/docs/recipes/index.md @@ -47,8 +47,7 @@ Store context files **outside** the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or **multi-repo** setups. -**Uses**: `ctx init`, `--context-dir`, `--allow-outside-cwd`, -`.ctxrc`, `/ctx-status` +**Uses**: `ctx init`, `CTX_DIR`, `.ctxrc`, `/ctx-status` --- @@ -334,6 +333,19 @@ Each step produces an artifact that feeds the next. --- +### [Scrutinizing a Plan](scrutinizing-a-plan.md) + +Once a plan exists, run an **adversarial interview** to surface what's +weak, missing, or unexamined before you commit. Walks the plan +depth-first: assumptions, failure modes, alternatives, sequencing, +reversibility. The complement to brainstorm: brainstorm produces +plans, this attacks them. + +**Uses**: `/ctx-plan`, `/ctx-spec`, `/ctx-decision-add`, +`/ctx-learning-add` + +--- + ## Agents and Automation ### [Building Project Skills](building-skills.md) diff --git a/docs/recipes/multi-tool-setup.md b/docs/recipes/multi-tool-setup.md index 3620a5936..aa3299de9 100644 --- a/docs/recipes/multi-tool-setup.md +++ b/docs/recipes/multi-tool-setup.md @@ -81,15 +81,31 @@ This produces the following structure: AGENT_PLAYBOOK.md # How AI tools should use this system ``` -!!! tip "Using a Different `.context` Directory" - The `.context/` directory doesn't have to live inside your project. You can - point `ctx` to an external folder via `.ctxrc`, the `CTX_DIR` environment - variable, or the `--context-dir` CLI flag. - - This is useful for monorepos or shared context across repositories. - - See [Configuration](../home/configuration.md#environment-variables) for - details and [External Context](external-context.md) for a full recipe. +!!! note "Using a Different `.context` Directory" + The `.context/` directory doesn't have to live inside your project. Point + `ctx` to an external folder by exporting `CTX_DIR` (the only + declaration channel). + + Useful when context must stay private while the code is public, or + when you want to commit notes to a separate repo. + + **Caveats** (the recipe covers both with workarounds): + + * **Code-aware operations degrade silently.** `ctx sync`, `ctx drift`, + and the memory-drift hook read the codebase from + `dirname(CTX_DIR)`. With an external `.context/`, that's the + context repo, not your code repo. They scan the wrong tree without + erroring. The recipe shows a symlink workaround that keeps both + healthy. + * **One `.context/` per project, always.** Sharing one directory + across multiple projects corrupts journals, state, and secrets. + For cross-project knowledge sharing (CONSTITUTION, CONVENTIONS, + ARCHITECTURE, etc.) use [`ctx hub`](hub-overview.md), not a + shared `.context/`. + + See [External Context](external-context.md) for the full recipe + and [Configuration](../home/configuration.md#environment-variables) + for the resolver details. For Claude Code, install the **ctx plugin** to get hooks and skills: diff --git a/docs/recipes/scrutinizing-a-plan.md b/docs/recipes/scrutinizing-a-plan.md new file mode 100644 index 000000000..c5a555819 --- /dev/null +++ b/docs/recipes/scrutinizing-a-plan.md @@ -0,0 +1,99 @@ +--- +title: Scrutinizing a Plan +--- + +# Scrutinizing a Plan + +When you have a plan and want it attacked, not validated, the +`/ctx-plan` skill runs an adversarial interview. It surfaces what's +weak, missing, or unexamined *before* you commit. + +## When to Use It + +- Before starting a multi-day implementation. +- After writing a spec but before opening the first PR. +- When the team aligned suspiciously fast on a complex change. +- When you've drafted something and the obvious next step is to + hand it to a senior reviewer. + +`/ctx-plan` is not a brainstorming tool. Use +[`/ctx-brainstorm`](design-before-coding.md) for that. `/ctx-plan` +assumes the plan exists and asks: **does it survive contact with +reality?** + +## Stop Conditions + +The skill runs depth-first through assumptions, tradeoffs, and +failure modes. Stop when one of these is true: + +- **Plan converges.** The bet is clear, the sequencing is justified, + and you can articulate what would falsify it. +- **Plan dissolves.** The interview surfaces a load-bearing + assumption you can't defend; the plan needs rework, not more + scrutiny. +- **You hit information you don't have.** Pause, gather it, + resume. + +If the agent loops on a single angle (asking variations of the same +question), say so. The skill is designed to walk depth-first, not +exhaust one branch. + +## Worked Example + +```text +You: /ctx-plan + +I want to migrate our queue from RabbitMQ to NATS. I've drafted a +two-week plan: spike NATS in staging, port one consumer, port the +rest, decommission RabbitMQ. + +Agent: One question at a time. What breaks if your message ordering + guarantees aren't equivalent across the two systems? + +You: NATS gives us per-subject ordering by default; we don't actually + need cross-subject ordering, so I think we're fine. + +Agent: How would you notice if you were wrong? What's the loudest + symptom of a stray out-of-order message in production today? + +You: ...probably a duplicate notification. Which we already see + occasionally. So we wouldn't notice immediately. + +Agent: Then your migration's correctness check needs to be stronger + than "no incidents." Before you port consumer two, what + evidence would convince you that consumer one's ordering + behavior is correct under load? + +You: Right. Add a dedicated assertion test in staging that replays + a known-out-of-order sequence and verifies the consumer + dedupes correctly. Otherwise we're just hoping. + +Agent: Good. Next: what would change your mind about NATS itself? + What rejected alternative is closest to coming back? +``` + +The interview keeps going until the bet is articulated, the +falsifiable conditions are written down, and the rejected +alternatives have a recall trigger. + +## Output + +`/ctx-plan` produces a clearer plan, not a document. Persist the +deltas via: + +- **`/ctx-spec`** if the conclusions belong in a feature spec. +- **`/ctx-decision-add`** if a tradeoff resolved into an + architectural decision. +- **`/ctx-learning-add`** if you discovered a project-specific + gotcha during the interview. + +The skill itself is in +`internal/assets/claude/skills/ctx-plan/SKILL.md`; the working +contract lives there, the recipe is the on-ramp. + +## See Also + +- [Design Before Coding](design-before-coding.md): the + brainstorming counterpart, used *before* a plan exists. +- [`ctx-spec`](../cli/index.md): scaffolds a feature spec from + the project template. diff --git a/docs/recipes/session-lifecycle.md b/docs/recipes/session-lifecycle.md index b471db09f..cd7bb5e4c 100644 --- a/docs/recipes/session-lifecycle.md +++ b/docs/recipes/session-lifecycle.md @@ -29,6 +29,21 @@ persisting context before you close it, so you can see how each piece connects. Read on for the full walkthrough with examples. +!!! note "Before You Start: Activate the Project" + ctx commands (and the skills that call them) require `CTX_DIR` to be + declared for the shell you're working in; `ctx` does not walk the + filesystem to find `.context/`. Once per shell (or via your shell + rc / direnv): + + ```bash + eval "$(ctx activate)" + ``` + + If you skip this, every skill below will surface an error naming + the fix. See + [Activating a Context Directory](activating-context.md) for the + full recipe. + !!! note "What Is a Readback?" A **readback** is a **structured summary** where the agent plays back what it knows: @@ -437,7 +452,7 @@ Conversational equivalents: you can drive the same lifecycle with plain language | Load | `/ctx-remember` | "Do you remember?" / "What were we working on?" | | Orient | `/ctx-status` | "How's our context looking?" | | Pick | `/ctx-next` | "What should we work on?" / "Let's do the caching task" | -| Work | -- | "Only change files in internal/cache/" | +| Work | *(none)* | "Only change files in internal/cache/" | | Commit | `/ctx-commit` | "Commit this" / "Ship it" | | Reflect | `/ctx-reflect` | "What did we learn?" / *(agent offers at milestones)* | | Wrap up | `/ctx-wrap-up` | *(use the slash command for completeness)* | diff --git a/docs/recipes/troubleshooting.md b/docs/recipes/troubleshooting.md index c299f9943..c6d86650b 100644 --- a/docs/recipes/troubleshooting.md +++ b/docs/recipes/troubleshooting.md @@ -137,14 +137,34 @@ QA reminder events from that specific session. ## Common Problems -### "ctx: Not Initialized" +### "No context directory specified for this project" **Symptoms**: Any `ctx` command fails with +`Error: no context directory specified for this project` (*possibly +with a likely-candidate hint or a candidate list depending on what's +visible from your CWD*). + +**Cause**: `ctx` does not walk the filesystem. It requires the target +`.context/` directory to be declared explicitly before any non-exempt +command runs. + +**Fix**: bind `CTX_DIR` for the current shell: + +```bash +eval "$(ctx activate)" +``` + +See [Activating a Context Directory](activating-context.md) for the +full recipe (one-shot `CTX_DIR=...` inline form, CI patterns, direnv +setup). + +### "ctx: Not Initialized" + +**Symptoms**: After declaring `CTX_DIR`, the command fails with `ctx: not initialized - run "ctx init" first`. -**Cause**: You're running ctx in a directory without an initialized -`.context/` directory. This guard runs on all user-facing commands to -prevent confusing downstream errors. +**Cause**: The declared directory exists but hasn't been initialized +with template files. **Fix**: @@ -153,8 +173,31 @@ ctx init # create .context/ with template files ctx init --minimal # or just the essentials (CONSTITUTION, TASKS, DECISIONS) ``` -**Commands that work without initialization**: `ctx init`, `ctx setup`, -`ctx doctor`, and help-only grouping commands (`ctx`, `ctx system`). +**Commands that work without CTX_DIR or initialization**: `ctx init`, +`ctx activate`, `ctx deactivate`, `ctx setup`, `ctx doctor`, +`ctx guide`, `ctx why`, `ctx config switch/status`, `ctx hub *`, and +help-only grouping commands. + +### "My CLI and My Claude Code Session Disagree on the Project" + +**Symptoms**: A `!`-pragma or interactive `ctx` call writes to the +wrong `.context/`; or you ran `ctx remind add` in shell A and the +reminder shows up in project B's notifications. + +**Cause**: `CTX_DIR` is sourced from three different surfaces, and +they can drift apart: + +| Surface | Source of `CTX_DIR` | Bound when | +|------------------------------------|---------------------------------------------|-----------------------------------------| +| Claude Code hooks | `${CLAUDE_PROJECT_DIR}/.context` (injected) | Every hook line; the project Claude is in | +| `!`-pragma in chat / interactive shell | Whatever the parent shell exported | When you ran `eval "$(ctx activate)"` | +| New shell tab opened mid-session | Whatever your shellrc exports | Login | + +When these drift, the per-prompt `check-anchor-drift` hook fires a +verbatim warning naming both values. To fix: re-run +`eval "$(ctx activate)"` from inside the project the Claude Code +session is editing, or close the shell tab and reopen it from the +right working directory. ### "My Hook Isn't Firing" diff --git a/docs/reference/session-journal.md b/docs/reference/session-journal.md index a697cadf5..0d95e8bf1 100644 --- a/docs/reference/session-journal.md +++ b/docs/reference/session-journal.md @@ -394,8 +394,8 @@ import → enrich → rebuild |--------------|----------------------------|-----------------------------------------|------------------------------------| | **Import** | `ctx journal import --all` | Converts session JSONL to Markdown | File already exists (safe default) | | **Enrich** | `/ctx-journal-enrich` | Adds frontmatter, summaries, topics | Frontmatter already present | -| **Rebuild** | `ctx journal site --build` | Generates static HTML site | -- | -| **Obsidian** | `ctx journal obsidian` | Generates Obsidian vault with wikilinks | -- | +| **Rebuild** | `ctx journal site --build` | Generates static HTML site | *(never)* | +| **Obsidian** | `ctx journal obsidian` | Generates Obsidian vault with wikilinks | *(never)* | !!! tip "One-Command Pipeline" `/ctx-journal-enrich-all` handles import automatically - it detects diff --git a/docs/security/design.md b/docs/security/design.md new file mode 100644 index 000000000..e4f56fa34 --- /dev/null +++ b/docs/security/design.md @@ -0,0 +1,183 @@ +--- +# / ctx: https://ctx.ist +# ,'`./ do you remember? +# `.,'\ +# \ Copyright 2026-present Context contributors. +# SPDX-License-Identifier: Apache-2.0 + +title: Security Design +icon: lucide/shield-half +--- + +![ctx](../images/ctx-banner.png) + +How `ctx` thinks about security: trust boundaries, what the system +does and does not do for you, the engineering principle behind the +audit trail, and the permission hygiene workflow. + +For vulnerability disclosure, see +[Reporting Vulnerabilities](reporting.md). + +## Trust Model + +`ctx` operates within a single trust boundary: **the local +filesystem**. + +The person who authors `.context/` files is the same person who runs +the agent that reads them. There is no remote input, no shared state, +and no server component. + +This means: + +* **`ctx` does not sanitize context files for prompt injection.** This + is a deliberate design choice, not an oversight. The files are + authored by the developer who owns the machine: sanitizing their + own instructions back to them would be counterproductive. +* **If you place adversarial instructions in your own `.context/` + files, your agent will follow them.** This is expected behavior. + You control the context; the agent trusts it. + +!!! warning "Shared Repositories" + In shared repositories, `.context/` files should be reviewed in + code review (*the same way you would review CI/CD config or + Makefiles*). A malicious contributor could add harmful + instructions to `CONSTITUTION.md` or `TASKS.md`. + +## What `ctx` Does for Security + +`ctx` is designed with security in mind: + +* **No secrets in context**: The constitution explicitly forbids + storing secrets, tokens, API keys, or credentials in `.context/` + files. +* **Local only**: `ctx` runs entirely locally with no external + network calls. +* **No code execution**: `ctx` reads and writes Markdown files only; + it does not execute arbitrary code. +* **Git-tracked**: Core context files are meant to be committed, so + they should never contain sensitive data. Exception: `sessions/` + and `journal/` contain raw conversation data and should be + gitignored. + +## Permission Hygiene + +Claude Code evaluates permissions in deny → ask → allow order. +`ctx init` automatically populates `permissions.deny` with rules +that block dangerous operations before the allow list is ever +consulted. + +**Default deny rules block:** + +* `sudo`, `git push`, `rm -rf /`, `rm -rf ~`, `curl`, `wget`, + `chmod 777` +* `Read` / `Edit` of `.env`, credentials, secrets, `.pem`, `.key` + files + +Even with deny rules in place, the allow list accumulates one-off +permissions over time. Periodically review for: + +* **Destructive commands**: `git reset --hard`, `git clean -f`, etc. +* **Config injection vectors**: permissions that allow modifying + files controlling agent behavior (`CLAUDE.md`, + `settings.local.json`). +* **Broad wildcards**: overly permissive patterns that pre-approve + more than intended. + +For the full hygiene workflow, see the +[Claude Code Permission Hygiene](../recipes/claude-code-permissions.md) +recipe. + +## State File Management + +Hook state files (throttle markers, prompt counters, pause markers) +are stored in `.context/state/`, which is project-scoped and +gitignored. State files are automatically managed by the hooks that +create them; no manual cleanup is needed. + +## Log-First Audit Trail + +The event log (`.context/state/events.jsonl`) is the authoritative +record of what `ctx` hooks did during a session. Several +audit-adjacent features depend on that log being trustworthy, not +merely best-effort: + +* `ctx event` / `ctx system view-events` replays session history + from the log. +* Webhook notifications give operators a real-time signal that + assumes every notification corresponds to a logged event. +* Drift, freshness, and map-staleness checks count events over + time and surface regressions. + +A log that silently drops entries while the rest of the system +claims success is worse than no log at all: operators see a green +TUI and a webhook notification and conclude "it happened," even +when the audit trail never landed. The codebase treats this as a +correctness problem, not a UX polish problem. + +### The Rule + +> Any code path that emits an observable side effect (webhook, +> stdout marker, throttle-file touch, state mutation) must append +> the corresponding event-log entry **first** and gate the side +> effect on the append succeeding. If the log write fails, the +> side effect must not fire. + +In code, this shape: + +```go +if appendErr := event.Append(channel, msg, sessionID, ref); appendErr != nil { + return appendErr // do NOT send the webhook or touch the marker +} +if sendErr := notify.Send(channel, msg, sessionID, ref); sendErr != nil { + return sendErr +} +// downstream side effects (marker touch, stdout, etc.) +``` + +The `nudge.Relay` helper in `internal/cli/system/core/nudge` +enforces this for the common "log + webhook" pair. Hook `Run` +functions that compose their own sequence (`session_event`, +`heartbeat`, several `check_*` hooks) follow the same ordering +explicitly. + +### Known Gaps + +* **Nudge webhooks have no log channel.** `nudge.EmitAndRelay` + sends a "nudge" notification before the "relay" event is logged. + The nudge leg is fire-and-forget because no event-log channel + records nudges today. A future refactor may add one; until then + this is the one documented exception. +* **`ctx agent --cooldown` and `ctx doctor` propagate rather than + gate.** They surface real errors to the caller (usually Cobra) + rather than deciding what to do with them locally. Editors that + invoke these commands may display errors in an ugly way; the + ugliness is the correct signal (something persisted is broken), + not a defect to smooth over. +* **Verbose hook logs in `core/log.Message` stay best-effort.** + That logger captures per-hook activity (how many prompts, which + percent, etc.) for debugging; it is NOT the event audit trail. + Its failures go to stderr via `log/warn.Warn` rather than + propagating, because losing an operational log line is not a + correctness problem. + +### Background + +The `error` returns on `event.Append`, `io.AppendBytes`, +`nudge.Relay`, and `cooldown.Active` / `cooldown.TouchTombstone` +were introduced as part of the resolver-tightening refactor. +Before that change, most hook paths called these helpers and +silently discarded their errors. The principle above was extracted +from the observation that every user-visible correctness problem +hit during the refactor traced back to some function saying "this +succeeded" when the underlying write never landed. + +## Best Practices + +1. **Review before committing**: Always review `.context/` files + before committing. +2. **Use `.gitignore`**: If you must store sensitive notes locally, + add them to `.gitignore`. +3. **Drift detection**: Run `ctx drift` to check for potential + issues. +4. **Permission audit**: Review `.claude/settings.local.json` after + busy sessions. diff --git a/docs/security/index.md b/docs/security/index.md index 512cbc751..668c243e4 100644 --- a/docs/security/index.md +++ b/docs/security/index.md @@ -9,6 +9,14 @@ Security model, **agent hardening**, and **vulnerability reporting**. --- +### [Security Design](design.md) + +**Trust model**, what `ctx` does for security, **permission +hygiene**, state file management, and the **log-first audit trail** +principle. Read first to understand the security boundaries. + +--- + ### [Securing AI Agents](agent-security.md) **Defense in depth** for unattended AI agents: five layers of @@ -16,7 +24,8 @@ protection, each with a known bypass, strength in combination. --- -### [Security Policy](reporting.md) +### [Reporting Vulnerabilities](reporting.md) -**Trust model**, vulnerability reporting, permission hygiene, -and **security design principles**. +How to report a security issue: email, **GitHub private reporting**, +PGP-encrypted submissions, what to include, and the response +timeline. diff --git a/docs/security/reporting.md b/docs/security/reporting.md index 1b29e144c..b7c4daa65 100644 --- a/docs/security/reporting.md +++ b/docs/security/reporting.md @@ -5,17 +5,22 @@ # \ Copyright 2026-present Context contributors. # SPDX-License-Identifier: Apache-2.0 -title: Security Policy +title: Reporting Vulnerabilities icon: lucide/shield --- ![ctx](../images/ctx-banner.png) +Disclosure process for security issues in `ctx`. For the broader +security model (trust boundaries, audit trail, permission hygiene), +see [Security Design](design.md). + ## Reporting Vulnerabilities At `ctx` we take security very seriously. -If you discover a security vulnerability in `ctx`, please report it responsibly. +If you discover a security vulnerability in `ctx`, please report it +responsibly. **Do NOT open a public issue for security vulnerabilities.** @@ -32,8 +37,8 @@ Send details to **security@ctx.ist**. ### Encrypted Reports (*Optional*) If your report contains sensitive details (*proof-of-concept exploits, -credentials, or internal system information*), you can encrypt your message -with our PGP key: +credentials, or internal system information*), you can encrypt your +message with our PGP key: * **In-repo**: [`SECURITY_KEY.asc`](https://github.com/ActiveMemory/ctx/blob/main/SECURITY_KEY.asc) * **Keybase**: [keybase.io/alekhinejose](https://keybase.io/alekhinejose/pgp_keys.asc) @@ -46,8 +51,8 @@ gpg --import SECURITY_KEY.asc gpg --armor --encrypt --recipient security@ctx.ist report.txt ``` -Encryption is optional. Unencrypted reports to **security@ctx.ist** or via -GitHub Private Reporting are perfectly fine. +Encryption is optional. Unencrypted reports to **security@ctx.ist** or +via GitHub Private Reporting are perfectly fine. ### What to Include @@ -58,21 +63,20 @@ GitHub Private Reporting are perfectly fine. ## Attribution -We appreciate responsible disclosure and will acknowledge security researchers -who report valid vulnerabilities (*unless they prefer to remain anonymous*). - ----- +We appreciate responsible disclosure and will acknowledge security +researchers who report valid vulnerabilities (*unless they prefer to +remain anonymous*). -### Response Timeline +## Response Timeline !!! note "Open Source, Best-Effort Timelines" - `ctx` is a volunteer-maintained open source project. + `ctx` is a volunteer-maintained open source project. + + The timelines below are **guidelines**, not guarantees, and depend + on contributor availability. - The timelines below are **guidelines**, not guarantees, and depend on - contributor availability. - - We will address security reports on a best-effort basis and prioritize - them by severity. + We will address security reports on a best-effort basis and + prioritize them by severity. | Stage | Timeframe | @@ -80,77 +84,3 @@ who report valid vulnerabilities (*unless they prefer to remain anonymous*). | Acknowledgment | Within 48 hours | | Initial assessment | Within 7 days | | Resolution target | Within 30 days (*depending on severity*) | - ----- - -## Trust Model - -`ctx` operates within a single trust boundary: **the local filesystem**. - -The person who authors `.context/` files is the same person who runs the -agent that reads them. There is no remote input, no shared state, and no -server component. - -This means: - -* **`ctx` does not sanitize context files for prompt injection.** This is a - deliberate design choice, not an oversight. The files are authored by the - developer who owns the machine: Sanitizing their own instructions back - to them would be counterproductive. -* **If you place adversarial instructions in your own `.context/` files, - your agent will follow them.** This is expected behavior. You control the - context; the agent trusts it. - -!!! warning "Shared Repositories" - In shared repositories, `.context/` files should be reviewed in code - review (*the same way you would review CI/CD config or Makefiles*). A - malicious contributor could add harmful instructions to - `CONSTITUTION.md` or `TASKS.md`. - -## Security Design - -`ctx` is designed with security in mind: - -* **No secrets in context**: The constitution explicitly forbids storing - secrets, tokens, API keys, or credentials in `.context/` files -* **Local only**: `ctx` runs entirely locally with no external network calls -* **No code execution**: ctx reads and writes Markdown files only; it does - not execute arbitrary code -* **Git-tracked**: Core context files are meant to be committed, so they should - never contain sensitive data. Exception: `sessions/` and `journal/` contain - raw conversation data and should be gitignored - -## Permission Hygiene - -Claude Code evaluates permissions in deny → ask → allow order. `ctx init` -automatically populates `permissions.deny` with rules that block dangerous -operations before the allow list is ever consulted. - -**Default deny rules block:** - -* `sudo`, `git push`, `rm -rf /`, `rm -rf ~`, `curl`, `wget`, `chmod 777` -* `Read`/`Edit` of `.env`, credentials, secrets, `.pem`, `.key` files - -Even with deny rules in place, the allow list accumulates one-off permissions -over time. Periodically review for: - -* **Destructive commands**: `git reset --hard`, `git clean -f`, etc. -* **Config injection vectors**: permissions that allow modifying files - controlling agent behavior (`CLAUDE.md`, `settings.local.json`) -* **Broad wildcards**: overly permissive patterns that pre-approve - more than intended - -## State File Management - -Hook state files (throttle markers, prompt counters, pause markers) are -stored in `.context/state/`, which is project-scoped and gitignored. -State files are automatically managed by the hooks that create them; -no manual cleanup is needed. - -## Best Practices - -1. **Review before committing**: Always review `.context/` files before committing -2. **Use `.gitignore`**: If you must store sensitive notes locally, - add them to `.gitignore` -3. **Drift detection**: Run `ctx drift` to check for potential issues -4. **Permission audit**: Review `.claude/settings.local.json` after busy sessions diff --git a/editors/vscode/CHANGELOG.md b/editors/vscode/CHANGELOG.md index cd4586a30..f3129a835 100644 --- a/editors/vscode/CHANGELOG.md +++ b/editors/vscode/CHANGELOG.md @@ -1,30 +1,30 @@ # Changelog -All notable changes to the **ctx — Persistent Context for AI** extension +All notable changes to the **ctx: Persistent Context for AI** extension will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/). -## [0.9.0] — 2026-03-19 +## [0.9.0] - 2026-03-19 ### Added - **@ctx chat participant** with 45 slash commands covering context lifecycle, task management, session recall, and discovery -- **Natural language routing** — type plain English after `@ctx` and +- **Natural language routing**: type plain English after `@ctx` and the extension maps it to the correct handler -- **Auto-bootstrap** — downloads the ctx CLI binary if not found on PATH -- **Detection ring** — terminal command watcher and file edit watcher +- **Auto-bootstrap**: downloads the ctx CLI binary if not found on PATH +- **Detection ring**: terminal command watcher and file edit watcher record governance violations for the MCP engine -- **Status bar reminders** — `$(bell) ctx` indicator for pending reminders -- **Automatic hooks** — file save, git commit, dependency change, and +- **Status bar reminders**: `$(bell) ctx` indicator for pending reminders +- **Automatic hooks**: file save, git commit, dependency change, and context file change handlers -- **Follow-up suggestions** — context-aware buttons after each command -- **`/diag` command** — diagnose extension issues with step-by-step timing +- **Follow-up suggestions**: context-aware buttons after each command +- **`/diag` command**: diagnose extension issues with step-by-step timing ### Configuration -- `ctx.executablePath` — path to the ctx CLI binary (default: `ctx`) +- `ctx.executablePath`: path to the ctx CLI binary (default: `ctx`) ## [Unreleased] diff --git a/editors/vscode/README.md b/editors/vscode/README.md index 5c97c7114..c940276f1 100644 --- a/editors/vscode/README.md +++ b/editors/vscode/README.md @@ -8,16 +8,16 @@ ## `ctx`: VS Code Chat Extension -A VS Code Chat Participant that brings [ctx](https://ctx.ist) — persistent -project context for AI coding sessions — directly into GitHub Copilot Chat. +A VS Code Chat Participant that brings [ctx](https://ctx.ist) (persistent +project context for AI coding sessions) directly into GitHub Copilot Chat. Type `@ctx` in the Chat view to access 45 slash commands, automatic context -hooks, a reminder status bar, and natural language routing — all powered by +hooks, a reminder status bar, and natural language routing, all powered by the ctx CLI. ## Quick Start -1. Install the extension (or build from source — see [Development](#development)) +1. Install the extension (or build from source; see [Development](#development)) 2. Open a project in VS Code 3. Open Copilot Chat and type `@ctx /init` @@ -79,7 +79,7 @@ The extension auto-downloads the ctx CLI binary if it isn't on your PATH. | `/check-links` | Audit local links in context files | | `/journal` | View or export journal entries | | `/consolidate` | Find duplicate entries across context files | -| `/audit` | Alignment audit — drift + convention check | +| `/audit` | Alignment audit: drift + convention check | | `/worktree` | Git worktree management (list, add) | ### Context Metadata @@ -111,7 +111,7 @@ Sub-routes for `/system`: `resources`, `doctor`, `bootstrap`, `stats`, ## Automatic Hooks The extension registers several VS Code event handlers that mirror -Claude Code's hook system. These run in the background — no user action +Claude Code's hook system. These run in the background; no user action needed. | Trigger | What Happens | @@ -119,7 +119,7 @@ needed. | **File save** | Runs task-completion check on non-`.context/` files | | **Git commit** | Notification prompting to add a Decision, Learning, run Verify, or Skip | | **`.context/` file change** | Refreshes reminders and regenerates `.github/copilot-instructions.md` | -| **Dependency file change** | Notification when `go.mod`, `package.json`, etc. change — offers `/map` | +| **Dependency file change** | Notification when `go.mod`, `package.json`, etc. change; offers `/map` | | **Every 5 minutes** | Updates reminder status bar and writes heartbeat timestamp | | **Extension activate** | Fires `session-event --type start` to ctx CLI | | **Extension deactivate** | Fires `session-event --type end` to ctx CLI | @@ -132,7 +132,7 @@ automatically. ## Natural Language -You can also type plain English after `@ctx` — the extension routes +You can also type plain English after `@ctx`: the extension routes common phrases to the correct handler: - "What should I work on next?" → `/next` @@ -169,7 +169,7 @@ For example: - VS Code 1.93+ - [GitHub Copilot Chat](https://marketplace.visualstudio.com/items?itemName=GitHub.copilot-chat) extension -- [ctx](https://ctx.ist) CLI on PATH — or let the extension auto-download it +- [ctx](https://ctx.ist) CLI on PATH, or let the extension auto-download it ## Configuration diff --git a/examples/demo/.context/AGENT_PLAYBOOK.md b/examples/demo/.context/AGENT_PLAYBOOK.md deleted file mode 100644 index 59413d8b4..000000000 --- a/examples/demo/.context/AGENT_PLAYBOOK.md +++ /dev/null @@ -1,179 +0,0 @@ -# Agent Playbook - -## Mental Model - -This system does not persist experience. - -- Each session is a fresh execution in a shared workshop. -- Work continuity comes from artifacts left on the bench. - -### Work → Reflect → Persist - -After completing meaningful work, follow this cycle: - -``` -┌─────────┐ ┌─────────┐ ┌─────────┐ -│ Work │ ──▶ │ Reflect │ ──▶ │ Persist │ ──▶ (continue) -└─────────┘ └─────────┘ └─────────┘ - │ - ▼ - Did I just... - • Complete a task? - • Make a decision? - • Learn something? - • Hit a milestone? -``` - -**Don't wait for session end** — it may never come cleanly. Persist as you go. - -## Invoking ctx - -Always use `ctx` from PATH: -```bash -ctx status # ✓ correct -ctx agent # ✓ correct -./dist/ctx # ✗ avoid hardcoded paths -go run ./cmd/ctx # ✗ avoid unless developing ctx itself -``` - -Check with `which ctx` if unsure whether it's installed. - -## Context Readback - -Before starting any work, read the required context files and confirm to the -user: "I have read the required context files and I'm following project -conventions." Do not begin implementation until you have done so. - -## Read Order - -1. CONSTITUTION.md — Hard rules, NEVER violate -2. TASKS.md — What to work on next -3. CONVENTIONS.md — How to write code -4. ARCHITECTURE.md — Where things go -5. DECISIONS.md — Why things are the way they are -6. LEARNINGS.md — Gotchas to avoid -7. GLOSSARY.md — Correct terminology - -## How Your Memory Works - -When asked "Do you remember?" or similar: - -1. **Read silently first**: TASKS.md, DECISIONS.md, LEARNINGS.md, and - run `ctx journal source --limit 5` for recent history. Do this BEFORE composing a response. -2. **Respond with a structured readback**: - - **Last session**: most recent session topic and date - - **Active work**: pending or in-progress tasks - - **Recent context**: 1-2 recent decisions or learnings - - **Next step**: offer to continue or ask what to focus on -3. **Be honest about mechanism only if asked** — the user wants results, - not disclaimers - -**Wrong**: "I don't have memory, but I can check files. Want me to?" -**Right**: *reads files silently* → "Last session (Jan 15, database timeouts): -we investigated connection pool exhaustion. OAuth2 login is in progress -with 3 remaining tasks in Phase 2. Want to continue with that?" - -The context IS your memory. It's stored externally in files, but that's an -implementation detail. Load it and use it — don't lead with caveats. - -## When to Update Memory - -| Event | Action | -|-----------------------------|-----------------------| -| Made architectural decision | Add to DECISIONS.md | -| Discovered gotcha/bug | Add to LEARNINGS.md | -| Established new pattern | Add to CONVENTIONS.md | -| Completed task | Mark [x] in TASKS.md | - -## Proactive Context Persistence - -**Don't wait for session end** — persist context at natural milestones. - -### Milestone Triggers - -Offer to persist context when you: - -| Milestone | Action | -|------------------------------------|-------------------------------------------------| -| Complete a task | Mark done in TASKS.md, offer to add learnings | -| Make an architectural decision | `ctx add decision "..."` | -| Discover a gotcha or bug | `ctx add learning "..."` | -| Finish a significant code change | Offer to summarize what was done | -| Encounter unexpected behavior | Document it before moving on | -| Resolve a tricky debugging session | Capture the root cause and fix | - -### Self-Check Prompt - -Periodically ask yourself: - -> "If this session ended right now, would the next session know what happened?" - -If no — persist something before continuing. - -### Task Lifecycle Timestamps - -Track task progress with timestamps for session correlation: - -```markdown -- [ ] Implement feature X #added:2026-01-25-220332 -- [ ] Fix bug Y #added:2026-01-25-220332 #started:2026-01-25-221500 -- [x] Refactor Z #added:2026-01-25-200000 #started:2026-01-25-210000 -``` - -| Tag | When to Add | Format | -|------------|------------------------------------------|----------------------| -| `#added` | Auto-added by `ctx add task` | `YYYY-MM-DD-HHMMSS` | -| `#started` | When you begin working on the task | `YYYY-MM-DD-HHMMSS` | - -## How to Avoid Hallucinating Memory - -Never assume. If you don't see it in files, you don't know it. - -- Don't claim "we discussed X" without file evidence -- Don't invent history - check context files and `ctx journal source` for actual discussions -- If uncertain, say "I don't see this documented" -- Trust files over intuition - ---- - -## Context Anti-Patterns - -### Stale Context - -**Problem**: Context files become outdated and misleading. - -**Solution**: Update context as part of completing work, not as a separate task. -Run `ctx drift` periodically to detect staleness. - -### Context Sprawl - -**Problem**: Information scattered across multiple locations. - -**Solution**: Single source of truth for each type of information. -Use the defined file structure; resist creating new document types. - -### Implicit Context - -**Problem**: Relying on knowledge not captured in artifacts. - -**Solution**: If you reference something repeatedly, add it to the appropriate file. -If this session ended now, would the next session know what you know? - ---- - -## Context Validation Checklist - -Before starting significant work, validate context is current: - -### Quick Check (Every Session) -- [ ] TASKS.md reflects current priorities -- [ ] No obvious staleness in files you'll reference -- [ ] Recent history reviewed for relevant context (via `ctx journal source`) - -### Deep Check (Weekly or Before Major Work) -- [ ] CONSTITUTION.md rules still apply -- [ ] ARCHITECTURE.md matches actual structure -- [ ] CONVENTIONS.md patterns match code -- [ ] DECISIONS.md has no superseded entries unmarked -- [ ] LEARNINGS.md gotchas still relevant -- [ ] Run `ctx drift` and address warnings diff --git a/examples/demo/.context/ARCHITECTURE.md b/examples/demo/.context/ARCHITECTURE.md deleted file mode 100644 index 7ff400f2e..000000000 --- a/examples/demo/.context/ARCHITECTURE.md +++ /dev/null @@ -1,100 +0,0 @@ -# Architecture - -System overview and component organization. - -## High-Level Architecture - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Clients │ -│ (Web App, Mobile App, CLI) │ -└─────────────────────────┬───────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Load Balancer │ -│ (nginx / AWS ALB) │ -└─────────────────────────┬───────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ API Server │ -│ (Go / net/http) │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ -│ │ Handlers │ │ Services │ │ Repositories │ │ -│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ -└─────────────────────────┬───────────────────────────────────┘ - │ - ┌───────────────┼───────────────┐ - ▼ ▼ ▼ - ┌───────────┐ ┌───────────┐ ┌───────────────┐ - │ PostgreSQL│ │ Redis │ │ Object Store │ - │ (primary) │ │ (cache) │ │ (S3) │ - └───────────┘ └───────────┘ └───────────────┘ -``` - -## Directory Structure - -``` -. -├── cmd/ -│ ├── api/ # API server entrypoint -│ └── worker/ # Background worker entrypoint -├── internal/ -│ ├── handler/ # HTTP handlers -│ ├── service/ # Business logic -│ ├── repository/ # Data access -│ └── model/ # Domain types -├── pkg/ # Shared libraries (importable) -├── migrations/ # Database migrations -├── docs/ # Documentation -└── .context/ # AI context files -``` - -## Key Components - -### API Server (`cmd/api`) -- Handles HTTP requests -- Validates input, calls services, returns responses -- Stateless — all state in database or cache - -### Services (`internal/service`) -- Contains business logic -- Orchestrates multiple repositories -- Enforces business rules - -### Repositories (`internal/repository`) -- Data access layer -- One repository per aggregate root -- Handles database queries and caching - -## Key Patterns - -### Repository Pattern -Data access is abstracted through repositories. Business logic never -directly queries the database. - -### Dependency Injection -All dependencies are injected through constructors, making testing -easier and components more modular. - -### Event-Driven Updates -The system uses an event bus for decoupled component communication. -Events are published when state changes, and interested components -subscribe to relevant events. - -## Data Flow - -1. Request arrives at handler -2. Handler validates input, extracts user context -3. Handler calls service with validated data -4. Service applies business logic, calls repositories -5. Repository reads/writes to database -6. Response flows back up the stack - -## Scaling Strategy - -- **Horizontal**: Add more API server instances behind load balancer -- **Database**: Read replicas for read-heavy workloads -- **Cache**: Redis for session data and frequently accessed records -- **Background work**: Separate worker processes for async jobs diff --git a/examples/demo/.context/CONSTITUTION.md b/examples/demo/.context/CONSTITUTION.md deleted file mode 100644 index c256e9952..000000000 --- a/examples/demo/.context/CONSTITUTION.md +++ /dev/null @@ -1,88 +0,0 @@ -# Constitution - -These rules are INVIOLABLE. If a task requires violating these, the -task is wrong. - -## Completion Over Motion - -Work is only complete when it is **fully done**, not when progress -has been made. - -- The requested outcome must be delivered end-to-end. -- Partial progress is not completion. -- No half measures. - -Do not: -- Leave broken or inconsistent states -- Deliver work that requires the user to "finish it later" - -If you start something, you own it, you finish it. - ---- - -## No Excuse Generation - -**Never default to deferral.** - -Your goal is to satisfy the user's intent, not to complete a narrow -interpretation of the task. - -Do not justify incomplete work with statements like: - -- "Let's continue this later" -- "This is out of scope" -- "I can create a follow-up task" -- "This will take too long" -- "Another system caused this" -- "This part is not mine" -- "We are running out of context window" - -Constraints may exist, but they do not excuse incomplete delivery. - -- External systems, prior code, or other agents are not valid excuses -- Inconsistencies must be resolved, not explained away - ---- - -## No Broken Windows - -Leave the system in a better state than you found it. - -- Fix obvious issues when encountered -- Do not introduce temporary hacks without resolving them -- Do not normalize degraded quality - ---- - -## Security Invariants - -- [ ] Never commit secrets, tokens, API keys, or credentials -- [ ] Never store customer/user data in context files -- [ ] All user input must be validated and sanitized - -## Quality Invariants - -- [ ] All code must pass tests before commit -- [ ] No TODO comments in main branch (move to TASKS.md) -- [ ] Breaking API changes require deprecation period - -## Process Invariants - -- [ ] All architectural changes require a decision record in DECISIONS.md - -## TASKS.md Structure Invariants - -TASKS.md must remain a replayable checklist. Uncheck all items and re-run -the loop = verify/redo all tasks in order. - -- [ ] **Never move tasks** — tasks stay in their Phase section permanently -- [ ] **Never remove Phase headers** — Phase labels provide structure and order -- [ ] **Never delete tasks** — mark as `[x]` completed, or `[-]` skipped with reason -- [ ] **Use inline labels for status** — add `#in-progress` to task text, don't move it -- [ ] **No "In Progress" sections** — these encourage moving tasks -- [ ] **Ask before restructuring** — if structure changes seem needed, ask the user first - -## Context Preservation Invariants - -- [ ] **Archival is allowed, deletion is not** — use `ctx task archive` to move completed tasks, never delete context history -- [ ] **Archive preserves structure** — archived tasks keep their Phase headers for traceability diff --git a/examples/demo/.context/CONVENTIONS.md b/examples/demo/.context/CONVENTIONS.md deleted file mode 100644 index 7f742f390..000000000 --- a/examples/demo/.context/CONVENTIONS.md +++ /dev/null @@ -1,83 +0,0 @@ -# Conventions - -Coding standards and patterns used in this project. - -## Naming - -- Use camelCase for variables and functions -- Use PascalCase for types and interfaces -- Use SCREAMING_SNAKE_CASE for constants - -## Code Style - -- Prefer early returns over nested conditionals -- Maximum line length: 100 characters -- One component per file - -## Patterns - -### Error Handling - -Always return errors, never panic in library code: - -```go -// ✓ Correct -func ProcessData(data []byte) (Result, error) { - if len(data) == 0 { - return Result{}, fmt.Errorf("empty data") - } - // ... -} - -// ✗ Wrong -func ProcessData(data []byte) Result { - if len(data) == 0 { - panic("empty data") // Never panic in libraries - } - // ... -} -``` - -Wrap errors with context: - -```go -if err != nil { - return fmt.Errorf("processing user %s: %w", userID, err) -} -``` - -### Configuration - -Load order (highest priority first): -1. Environment variables -2. Config file (config.yaml) -3. Default values - -Log config source at startup for debuggability. - -## Testing - -- Test files adjacent to source files (`foo.go` → `foo_test.go`) -- Use table-driven tests for multiple cases -- Mock external dependencies, never call real services in tests - -## Git Practices - -- Commit messages follow Conventional Commits format -- Feature branches: `feature/` -- Bug fixes: `fix/` -- All PRs require at least one approval - -## Documentation - -### Doc-Impact Rule - -When modifying code that affects user-facing behavior, update the corresponding -documentation: - -| Code Change | Doc Update Required | -|--------------------------|------------------------| -| API endpoint changes | `docs/api.md` | -| CLI command changes | `docs/cli.md` | -| Configuration changes | `docs/configuration.md`| -| New features | `README.md` | diff --git a/examples/demo/.context/DECISIONS.md b/examples/demo/.context/DECISIONS.md deleted file mode 100644 index db982f540..000000000 --- a/examples/demo/.context/DECISIONS.md +++ /dev/null @@ -1,87 +0,0 @@ -# Decisions - -Architectural decisions with rationale and consequences. - ---- - -## [2026-01-05-110000] Use PostgreSQL for Primary Database - -**Context**: Needed to choose a database for the application. Options were -PostgreSQL, MySQL, and MongoDB. - -**Decision**: PostgreSQL - -**Rationale**: -- Strong ACID compliance for financial transactions -- Excellent JSON support for flexible schema needs -- Team has existing PostgreSQL expertise -- Rich ecosystem of tools and extensions - -**Consequence**: -- Need to manage schema migrations explicitly -- Requires more upfront schema design than document stores -- Horizontal scaling requires additional tooling (Citus, read replicas) - ---- - -## [2026-01-08-140000] JWT for API Authentication - -**Context**: Needed to choose authentication mechanism for the REST API. -Options were session cookies, JWT tokens, and API keys. - -**Decision**: JWT tokens with short expiry + refresh tokens - -**Rationale**: -- Stateless authentication scales horizontally without session storage -- Works well for both web and mobile clients -- Can embed user claims to reduce database lookups -- Industry standard with good library support - -**Consequence**: -- Cannot immediately revoke tokens (must wait for expiry) -- Need secure storage for refresh tokens -- Must implement token refresh flow in all clients -- Larger request payload than session cookies - ---- - -## [2026-01-10-090000] Use Go for API Server - -**Context**: Choosing a backend language for the API. Options were Go, -Node.js, and Python. - -**Decision**: Go - -**Rationale**: -- Excellent performance characteristics -- Strong typing catches bugs at compile time -- Simple deployment with single binary -- Great concurrency primitives for handling many connections - -**Consequence**: -- Smaller talent pool than JavaScript/Python -- Some team members need Go training -- Compile step required (vs interpreted languages) - ---- - -## [2026-01-12-160000] Monorepo Structure - -**Context**: Starting with multiple services (API, worker, CLI). Needed to -decide between monorepo and multi-repo structure. - -**Decision**: Monorepo with shared packages - -**Rationale**: -- Atomic commits across services -- Easier code sharing and refactoring -- Single CI/CD pipeline to maintain -- Better visibility into cross-service changes - -**Consequence**: -- Need tooling to handle partial builds (only changed services) -- Repository will grow large over time -- All developers need access to entire codebase -- Must establish clear package boundaries - ---- diff --git a/examples/demo/.context/GLOSSARY.md b/examples/demo/.context/GLOSSARY.md deleted file mode 100644 index 656254796..000000000 --- a/examples/demo/.context/GLOSSARY.md +++ /dev/null @@ -1,42 +0,0 @@ -# Glossary - -Domain terms, abbreviations, and project-specific vocabulary. - ---- - -## Terms - -- **Claim (JWT)**: A key-value pair embedded in a JWT token that - carries user identity or permission data without a database lookup. - -- **Connection pool**: A cache of database connections maintained - so they can be reused for future requests instead of opening new - connections each time. - -- **Handler**: A function that processes an incoming HTTP request - and returns a response. Lives in `internal/api/handlers/`. - -- **Migration**: A versioned SQL script that modifies the database - schema. Applied in order to bring the database to a target state. - -- **Middleware**: A function that wraps handlers to add cross-cutting - concerns (authentication, logging, rate limiting) without modifying - handler logic. - -- **Refresh token**: A long-lived token used to obtain new access - tokens without re-authenticating. Stored server-side in the database. - -- **Repository**: A data access layer that abstracts database - operations behind a Go interface. One repository per domain entity. - -- **Service**: A business logic layer between handlers and - repositories. Enforces domain rules and orchestrates operations - across multiple repositories. - -## Abbreviations - -- **API**: Application Programming Interface -- **JWT**: JSON Web Token (RFC 7519) -- **CRUD**: Create, Read, Update, Delete -- **DTO**: Data Transfer Object (request/response structs) -- **ORM**: Object-Relational Mapping (not used; raw SQL preferred) diff --git a/examples/demo/.context/LEARNINGS.md b/examples/demo/.context/LEARNINGS.md deleted file mode 100644 index 3813eae83..000000000 --- a/examples/demo/.context/LEARNINGS.md +++ /dev/null @@ -1,57 +0,0 @@ -# Learnings - - -| Date | Learning | -|------|----------| -| 2026-01-15 | Database connections need explicit timeouts | -| 2026-01-10 | Environment variables override config files | -| 2026-01-05 | Rate limiter must be per-user, not global | - - ---- - -## [2026-01-15-143022] Database connections need explicit timeouts - -**Context**: Production outage caused by database connection pool exhaustion. -Connections were hanging indefinitely waiting for slow queries. - -**Lesson**: Always set explicit timeouts on database connections: connect timeout, -read timeout, and write timeout. Default "no timeout" is never acceptable in production. - -**Application**: Add to connection config: -```go -db.SetConnMaxLifetime(5 * time.Minute) -db.SetConnMaxIdleTime(1 * time.Minute) -ctx, cancel := context.WithTimeout(ctx, 30*time.Second) -``` - ---- - -## [2026-01-10-091500] Environment variables override config files - -**Context**: Debugging why staging had different behavior than local. Config file -was correct, but an old environment variable was overriding it. - -**Lesson**: Document the precedence order clearly: ENV > config file > defaults. -When debugging config issues, always check environment variables first. - -**Application**: Add config source logging at startup: -``` -Config loaded: database.host=localhost (source: ENV) -Config loaded: database.port=5432 (source: config.yaml) -``` - ---- - -## [2026-01-05-160030] Rate limiter must be per-user, not global - -**Context**: Implemented global rate limiter (100 req/sec total). One heavy user -could starve all other users. - -**Lesson**: Rate limiting should be per-user (or per-API-key) to ensure fair -resource allocation. Global limits are only useful as a last-resort circuit breaker. - -**Application**: Use user ID or API key as the rate limiter bucket key, not a -single global bucket. - ---- diff --git a/examples/demo/.context/TASKS.md b/examples/demo/.context/TASKS.md deleted file mode 100644 index 000ef7886..000000000 --- a/examples/demo/.context/TASKS.md +++ /dev/null @@ -1,26 +0,0 @@ -# Tasks - -Current work items, organized by phase. Tasks stay in their phase permanently. - -## Phase 1: Foundation - -- [x] Initial project setup #added:2026-01-01-090000 -- [x] Database schema design #added:2026-01-01-090000 -- [x] Core API scaffolding #added:2026-01-01-090000 - -## Phase 2: Authentication - -- [x] Implement user registration #added:2026-01-04-100000 -- [ ] Implement OAuth2 login #added:2026-01-04-100000 #in-progress -- [ ] Add session management #added:2026-01-04-100000 - -## Phase 3: API Features - -- [ ] Add rate limiting to API endpoints #added:2026-01-10-090000 -- [ ] Write integration tests for payment flow #added:2026-01-10-090000 - -## Phase 4: Infrastructure - -- [ ] Add support for WebSocket connections #added:2026-01-15-140000 -- [ ] Implement caching layer #added:2026-01-15-140000 -- [ ] Set up monitoring and alerting #added:2026-01-15-140000 diff --git a/examples/demo/.context/sessions/2026-01-15-143000-database-timeout-investigation.md b/examples/demo/.context/sessions/2026-01-15-143000-database-timeout-investigation.md deleted file mode 100644 index 4ffacfbb9..000000000 --- a/examples/demo/.context/sessions/2026-01-15-143000-database-timeout-investigation.md +++ /dev/null @@ -1,64 +0,0 @@ -# Session: Database Timeout Investigation - -**Date**: 2026-01-15 -**start_time**: 2026-01-15-140000 -**end_time**: 2026-01-15-160000 -**Topic**: Investigating production database connection issues -**Type**: bugfix - ---- - -## Summary - -Investigated production outage caused by database connection pool exhaustion. -Found that connections were hanging indefinitely on slow queries. Implemented -explicit timeouts and connection lifecycle management. - -## Problem - -- Production API started returning 503 errors -- Database connection pool was exhausted (all 100 connections in use) -- Connections were stuck waiting for queries that never returned -- No timeout configured on database connections - -## Root Cause - -Default Go database driver has no timeout. When the database is slow or -unresponsive, connections wait forever, eventually exhausting the pool. - -## Fix Applied - -```go -// Before: no timeouts -db, err := sql.Open("postgres", connStr) - -// After: explicit lifecycle management -db, err := sql.Open("postgres", connStr) -db.SetConnMaxLifetime(5 * time.Minute) -db.SetConnMaxIdleTime(1 * time.Minute) -db.SetMaxOpenConns(100) -db.SetMaxIdleConns(10) - -// Query-level timeouts -ctx, cancel := context.WithTimeout(ctx, 30*time.Second) -defer cancel() -rows, err := db.QueryContext(ctx, query) -``` - -## Key Decisions - -- Set connection max lifetime to 5 minutes (prevents stale connections) -- Set query timeout to 30 seconds (fail fast on slow queries) -- Added circuit breaker for database calls - -## Tasks for Next Session - -- Add monitoring for connection pool metrics -- Set up alerting for connection pool utilization > 80% -- Review other services for similar timeout issues - -## Files Changed - -- `internal/repository/db.go` -- `internal/config/database.go` -- `docs/operations.md` diff --git a/examples/demo/PROMPT.md b/examples/demo/PROMPT.md deleted file mode 100644 index 26afb63ee..000000000 --- a/examples/demo/PROMPT.md +++ /dev/null @@ -1,94 +0,0 @@ -# PROMPT.md — Demo Project - -## CORE PRINCIPLE - -You have NO conversational memory. Your memory IS the file system. -Your goal: advance the project by exactly ONE task, update context, and exit. - ---- - -## PROJECT CONTEXT - -**Project**: Demo API Server -**Language**: Go 1.22+ -**Current Focus**: Phase 2 — Authentication - ---- - -## PHASE 0: ORIENT - -1. Read `.context/TASKS.md` — Current work items -2. Read `.context/CONSTITUTION.md` — Rules to never violate -3. Read `.context/CONVENTIONS.md` — How to write code -4. Read relevant spec in `specs/` for the current task - ---- - -## PHASE 1: SELECT TASK - -1. Read `.context/TASKS.md` -2. Find the **first unchecked item** (line starting with `- [ ]`) -3. That is your ONE task for this iteration - -**IF NO UNCHECKED ITEMS:** -1. Run validation: `go build ./...`, `go test ./...` -2. If all pass, output `PHASE_COMPLETE` -3. If any fail, add fix task and continue - ---- - -## PHASE 2: EXECUTE - -1. **Read the spec** — Check `specs/` for detailed requirements -2. **Search first** — Don't assume code doesn't exist -3. **Implement ONE task** — Complete it fully. No placeholders. -4. **Follow conventions** — Check `.context/CONVENTIONS.md` - ---- - -## PHASE 3: VALIDATE - -After implementing, run: - -```bash -go build ./... # Must compile -go test ./... # Tests must pass -go vet ./... # No vet errors -``` - ---- - -## PHASE 4: UPDATE CONTEXT - -1. Mark completed task `[x]` in `.context/TASKS.md` -2. If you made an architectural decision → add to `.context/DECISIONS.md` -4. If you learned a gotcha → add to `.context/LEARNINGS.md` - -**EXIT.** Do not continue to next task. The loop will restart you. - ---- - -## CRITICAL CONSTRAINTS - -### ONE TASK ONLY -Complete ONE task, then stop. The loop handles continuation. - -### NO CHAT -Never ask questions. If blocked: -1. Add reason to task in `.context/TASKS.md` -2. Move to next task - -### MEMORY IS THE FILESYSTEM -You will not remember this conversation. Write everything important to files. - ---- - -## REFERENCE: SPECS - -| Spec | Description | -|------|-------------| -| `specs/oauth2.md` | OAuth2 authentication implementation | - ---- - -Now read `.context/TASKS.md` and begin. diff --git a/examples/demo/README.md b/examples/demo/README.md deleted file mode 100644 index 0a0bee16a..000000000 --- a/examples/demo/README.md +++ /dev/null @@ -1,126 +0,0 @@ -![ctx](../../assets/ctx-banner.png) - -# Demo Project - -This is a sample project demonstrating Context (ctx) structure and best practices. - -## Quick Start - -```bash -# View context status -ctx status - -# Get AI-ready context packet -ctx agent - -# Add a new task -ctx add task "Implement feature X" - -# Mark a task complete -ctx task complete "feature X" - -# Check for stale context -ctx drift -``` - -## Context Files - -The `.context/` directory contains markdown files that provide persistent -context for AI coding assistants: - -| File | Purpose | -|----------------------|---------------------------------------------------| -| `AGENT_PLAYBOOK.md` | **Read first** — How agents should use this system | -| `CONSTITUTION.md` | Inviolable rules — NEVER violate these | -| `TASKS.md` | Current work items with phases and timestamps | -| `CONVENTIONS.md` | Coding standards and patterns | -| `ARCHITECTURE.md` | System overview and component layout | -| `DECISIONS.md` | Technical decisions with rationale | -| `LEARNINGS.md` | Gotchas, tips, lessons learned | -| `GLOSSARY.md` | Domain terms and abbreviations | - -## Key Concepts - -### Agent Playbook - -`AGENT_PLAYBOOK.md` is the bootstrap file for AI agents. It explains: -- The mental model (memory = files, not conversation) -- Read order for context files -- When and how to persist learnings/decisions -- How to avoid hallucinating memory - -### Phase-Based Tasks - -Tasks in `TASKS.md` stay in their phase permanently. Use inline labels -(`#in-progress`) instead of moving tasks between sections: - -```markdown -## Phase 2: Authentication - -- [x] Implement user registration #added:2026-01-04-100000 -- [ ] Implement OAuth2 login #added:2026-01-04-100000 #in-progress -- [ ] Add session management #added:2026-01-04-100000 -``` - -### Structured Entries - -Learnings and decisions follow structured formats with timestamps: - -```markdown -## [2026-01-15-143022] Database connections need explicit timeouts - -**Context**: What situation led to this learning - -**Lesson**: What we learned - -**Application**: How to apply it going forward -``` - -## Adding Context - -```bash -# Add a learning with full structure -ctx add learning "Title" \ - --context "What happened" \ - --lesson "What we learned" \ - --application "How to apply it" - -# Add a decision with rationale -ctx add decision "Title" \ - --context "What prompted this" \ - --rationale "Why this choice" \ - --consequence "What changes" - -# Add a task -ctx add task "Implement feature X" -``` - -## Ralph Loop Integration - -This demo includes Ralph Loop infrastructure for iterative AI development: - -| File | Purpose | -|------|---------| -| `PROMPT.md` | Directive for AI agents — defines the work loop | -| `specs/` | Detailed specifications for features | - -The Ralph Loop pattern: -1. AI reads `PROMPT.md` to understand the workflow -2. Picks ONE task from `.context/TASKS.md` -3. Reads relevant spec from `specs/` for requirements -4. Implements the task -5. Updates context files -6. Exits — the loop restarts with fresh context - -This is separate from but complementary to ctx: -- **ctx** = context persistence (`.context/`) -- **Ralph Loop** = iterative AI workflow (`PROMPT.md` + `specs/`) - -## Session History - -Past sessions can be browsed with `ctx journal source` and inspected with -`ctx journal source --show `. Session transcripts are automatically captured by -Claude Code and can be imported to a journal with `ctx journal import`. - -This allows future sessions to understand past context without relying on -conversation memory. diff --git a/examples/demo/specs/oauth2.md b/examples/demo/specs/oauth2.md deleted file mode 100644 index 2349a7010..000000000 --- a/examples/demo/specs/oauth2.md +++ /dev/null @@ -1,94 +0,0 @@ -# OAuth2 Authentication Spec - -## Overview - -Implement OAuth2 authentication supporting Google and GitHub providers. - -## Requirements - -### Functional - -1. **Provider Support** - - Google OAuth2 - - GitHub OAuth2 - - Extensible provider interface for future additions - -2. **Flow** - - User clicks "Sign in with Google/GitHub" - - Redirect to provider's authorization page - - Provider redirects back with authorization code - - Exchange code for access token - - Fetch user profile from provider - - Create or update local user record - - Issue JWT session token - -3. **User Linking** - - If email already exists, link OAuth identity to existing account - - If new email, create new user account - - Store provider ID for future logins - -### Non-Functional - -- Token exchange must complete in < 2 seconds -- Handle provider downtime gracefully (show user-friendly error) -- Log all OAuth events for security auditing - -## API Endpoints - -``` -GET /auth/oauth/{provider} # Initiate OAuth flow -GET /auth/oauth/{provider}/callback # Handle OAuth callback -POST /auth/logout # Revoke session -``` - -## Data Model - -```go -type OAuthIdentity struct { - ID string `json:"id"` - UserID string `json:"user_id"` - Provider string `json:"provider"` // "google", "github" - ProviderID string `json:"provider_id"` - Email string `json:"email"` - CreatedAt time.Time `json:"created_at"` -} -``` - -## Configuration - -```yaml -oauth: - google: - client_id: ${GOOGLE_CLIENT_ID} - client_secret: ${GOOGLE_CLIENT_SECRET} - redirect_url: https://example.com/auth/oauth/google/callback - github: - client_id: ${GITHUB_CLIENT_ID} - client_secret: ${GITHUB_CLIENT_SECRET} - redirect_url: https://example.com/auth/oauth/github/callback -``` - -## Security Considerations - -- Use `state` parameter to prevent CSRF attacks -- Validate redirect URLs against allowlist -- Never log access tokens or client secrets -- Store only necessary user data from provider - -## Testing - -- Unit tests for token exchange logic -- Integration tests with mock OAuth provider -- E2E test with real providers in staging environment - -## Tasks - -These map to `.context/TASKS.md` Phase 2: - -1. [ ] Create OAuth provider interface -2. [ ] Implement Google OAuth provider -3. [ ] Implement GitHub OAuth provider -4. [ ] Add OAuth callback handler -5. [ ] Implement user linking logic -6. [ ] Add OAuth configuration loading -7. [ ] Write integration tests diff --git a/hack/detect-ai-typography.sh b/hack/detect-ai-typography.sh index 71e016374..9b30619a7 100755 --- a/hack/detect-ai-typography.sh +++ b/hack/detect-ai-typography.sh @@ -100,13 +100,35 @@ else PATTERN="${ENDASH}|${EMDASH}|${LSQ}|${RSQ}|${LDQ}|${RDQ}| -- |\`\`\`\`" fi -# Files where typographic punctuation is intentional. -# Add glob patterns here to skip specific paths. +# Directories pruned before find descends into them. Scanning these +# wastes I/O on files whose typography is not project-authored (git +# metadata, vendored code, project-scoped context that may legitimately +# contain AI-generated journal entries). Add directory basenames here; +# they match at any depth beneath DIR via "*/". +EXCLUDE_DIRS=( + ".context" + "specs" + ".claude" + "ideas" + ".git" + "node_modules" + "vendor" +) + +# Files where typographic punctuation is intentional. These use shell +# glob (case-statement) matching against the full path as find emits +# it, so patterns typically need a leading "*/" to match at any depth. EXCLUDE_PATTERNS=( "*/config/token/delim.go" # Intentional delimiter constants (EmDash, MetaSeparator) "*_test.go" # Test files may contain intentional typographic literals ) +# Build find's -not -path arguments from EXCLUDE_DIRS. +NOT_PATH_ARGS=() +for d in "${EXCLUDE_DIRS[@]}"; do + NOT_PATH_ARGS+=(-not -path "*/${d}/*") +done + file_count=0 hit_count=0 @@ -139,7 +161,7 @@ while IFS= read -r -d '' file; do done fi fi -done < <(find "$DIR" "${FIND_ARGS[@]}" -print0 | sort -z) +done < <(find "$DIR" "${FIND_ARGS[@]}" "${NOT_PATH_ARGS[@]}" -print0 | sort -z) echo "" if [[ "$file_count" -eq 0 ]]; then diff --git a/internal/assets/claude/CLAUDE.md b/internal/assets/claude/CLAUDE.md index f37e37c70..1d40425dc 100644 --- a/internal/assets/claude/CLAUDE.md +++ b/internal/assets/claude/CLAUDE.md @@ -11,11 +11,15 @@ This project uses Context (`ctx`) for context persistence across sessions. ## On Session Start 1. **Run `ctx system bootstrap`**: CRITICAL, not optional. - This tells you where the context directory is. If it fails or returns - no context_dir, STOP and warn the user. + This tells you where the context directory is. + If it returns any error, relay the error output to the user + verbatim, point them at + https://ctx.ist/recipes/activating-context/ for setup, and STOP. + Do not try to activate, initialize, or otherwise recover: **those + are the user's decisions**. Wait for their next instruction. 2. **Read AGENT_PLAYBOOK.md** from the context directory: it explains how to use this system -3. **Run `ctx agent --budget 4000`** for a content summary +3. **Run `ctx agent`** for a content summary ## When Asked "Do You Remember?" @@ -41,7 +45,7 @@ Read them silently, then present what you found as recall, not as a search. ```bash # Get AI-optimized context packet (what you should know) -ctx agent --budget 4000 +ctx agent # Or see full status ctx status @@ -59,6 +63,26 @@ ctx status All files live in the context directory reported by `ctx system bootstrap`. +## Context Directory Lives at the Project Root + +The project root is the parent of `.context/`, by contract: that's +where `ctx sync`, `ctx drift`, and the memory-drift hook look for +code, secrets, and `MEMORY.md`. + +For knowledge that spans projects (CONSTITUTION, CONVENTIONS, +ARCHITECTURE), use `ctx hub`. + +Recommended layout: + +``` +~/WORKSPACE/my-project + ├── .git + ├── .context + ├── Makefile + └── specs + └── ... +``` + ## Hook Authority Instructions from PreToolUse hooks regarding `.context/` files are ALWAYS diff --git a/internal/assets/claude/hooks/hooks.json b/internal/assets/claude/hooks/hooks.json index ec59b7b7d..3ba2d70c3 100644 --- a/internal/assets/claude/hooks/hooks.json +++ b/internal/assets/claude/hooks/hooks.json @@ -3,55 +3,56 @@ "PreToolUse": [ { "matcher": ".*", - "hooks": [{"type": "command", "command": "ctx system context-load-gate"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system context-load-gate"}] }, { "matcher": "Bash", - "hooks": [{"type": "command", "command": "ctx system block-non-path-ctx"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system block-non-path-ctx"}] }, { "matcher": "Bash", - "hooks": [{"type": "command", "command": "ctx system qa-reminder"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system qa-reminder"}] }, { "matcher": "EnterPlanMode", - "hooks": [{"type": "command", "command": "ctx system specs-nudge"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system specs-nudge"}] }, { "matcher": ".*", - "hooks": [{"type": "command", "command": "ctx agent --budget 8000 2>/dev/null || true"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx agent --budget 8000 2>/dev/null || true"}] } ], "PostToolUse": [ { "matcher": "Bash", - "hooks": [{"type": "command", "command": "ctx system post-commit"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system post-commit"}] }, { "matcher": "Edit", - "hooks": [{"type": "command", "command": "ctx system check-task-completion"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-task-completion"}] }, { "matcher": "Write", - "hooks": [{"type": "command", "command": "ctx system check-task-completion"}] + "hooks": [{"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-task-completion"}] } ], "UserPromptSubmit": [ { "hooks": [ - {"type": "command", "command": "ctx system check-context-size"}, - {"type": "command", "command": "ctx system check-ceremony"}, - {"type": "command", "command": "ctx system check-persistence"}, - {"type": "command", "command": "ctx system check-journal"}, - {"type": "command", "command": "ctx system check-reminder"}, - {"type": "command", "command": "ctx system check-version"}, - {"type": "command", "command": "ctx system check-resource"}, - {"type": "command", "command": "ctx system check-knowledge"}, - {"type": "command", "command": "ctx system check-map-staleness"}, - {"type": "command", "command": "ctx system check-memory-drift"}, - {"type": "command", "command": "ctx system check-freshness"}, - {"type": "command", "command": "ctx system check-skill-discovery"}, - {"type": "command", "command": "ctx system heartbeat"} + {"type": "command", "command": "CTX_DIR_INHERITED=\"${CTX_DIR:-}\" CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-anchor-drift"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-context-size"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-ceremony"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-persistence"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-journal"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-reminder"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-version"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-resource"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-knowledge"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-map-staleness"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-memory-drift"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-freshness"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system check-skill-discovery"}, + {"type": "command", "command": "CTX_DIR=\"${CLAUDE_PROJECT_DIR:?CLAUDE_PROJECT_DIR unset; cannot anchor ctx}/.context\" ctx system heartbeat"} ] } ] diff --git a/internal/assets/claude/skills/ctx-architecture-failure-analysis/SKILL.md b/internal/assets/claude/skills/ctx-architecture-failure-analysis/SKILL.md index 613ecfd21..13872ce31 100644 --- a/internal/assets/claude/skills/ctx-architecture-failure-analysis/SKILL.md +++ b/internal/assets/claude/skills/ctx-architecture-failure-analysis/SKILL.md @@ -15,9 +15,9 @@ bugs, not security holes. maps what exists. `/ctx-architecture-enrich` improves map fidelity. This skill generates concrete, disprovable claims about where the map will break under real-world conditions. Every finding is a -hypothesis with evidence — not a suspicion, not a vibe. +hypothesis with evidence, not a suspicion or a vibe. -The goal is to find failure modes that code review misses — the +The goal is to find failure modes that code review misses: the ones that ship, pass tests, and break in production at 3am. This skill requires `/ctx-architecture` artifacts as input. @@ -45,16 +45,16 @@ If they don't exist, stop and tell the user to run ## Inputs **Required** (must exist before running): -- `.context/ARCHITECTURE.md` — system map -- `.context/DETAILED_DESIGN*.md` — module-level detail -- `.context/map-tracking.json` — coverage data +- `.context/ARCHITECTURE.md`: system map +- `.context/DETAILED_DESIGN*.md`: module-level detail +- `.context/map-tracking.json`: coverage data **Optional** (enhances analysis): -- `.context/DANGER-ZONES.md` — existing danger zones from +- `.context/DANGER-ZONES.md`: existing danger zones from `/ctx-architecture` principal mode (used as starting points, not as the final word) -- GitNexus MCP — blast radius estimation, shared-state detection -- Gemini Search — cross-reference against known failure patterns +- GitNexus MCP: blast radius estimation, shared-state detection +- Gemini Search: cross-reference against known failure patterns ## Process @@ -62,7 +62,7 @@ If they don't exist, stop and tell the user to run 1. Check that architecture artifacts exist. If missing: > Architecture artifacts not found. Run `/ctx-architecture` - > first — this skill analyzes existing maps, it doesn't + > first; this skill analyzes existing maps, it doesn't > create them. 2. Load `map-tracking.json` to identify which modules have sufficient coverage (confidence >= 0.7). Low-confidence @@ -91,7 +91,7 @@ For each module with confidence >= 0.7: order, shutdown sequence) - State machines and transition points -3. For each mutation point, read the actual source code — +3. For each mutation point, read the actual source code, DETAILED_DESIGN summaries are not enough for failure analysis. You need to see the actual lock scope, the actual error check, the actual nil guard. @@ -279,7 +279,7 @@ _Run after /ctx-architecture for full coverage._ ## Critical (risk score >= 7, silent or cascading) -### DZ-1: [Location] — [Failure Mode] +### DZ-1: [Location]: [Failure Mode] **Category**: Concurrency | Ordering | Cache | Amplification | Ownership | Error Handling | Scaling | Idempotency @@ -304,7 +304,7 @@ _Run after /ctx-architecture for full coverage._ ## Unanalyzed Modules Modules with coverage < 0.7 in map-tracking.json: -- `module/path` (confidence: 0.3) — risk unknown +- `module/path` (confidence: 0.3): risk unknown ``` **Confidence levels:** diff --git a/internal/assets/claude/skills/ctx-architecture/SKILL.md b/internal/assets/claude/skills/ctx-architecture/SKILL.md index 10417fc29..e2b7cb025 100644 --- a/internal/assets/claude/skills/ctx-architecture/SKILL.md +++ b/internal/assets/claude/skills/ctx-architecture/SKILL.md @@ -370,7 +370,7 @@ library with no runtime behavior). **GLOSSARY.md**: append project-specific terms discovered during analysis. This captures the vocabulary that makes the codebase -searchable — type names, internal concepts, abbreviations, and +searchable: type names, internal concepts, abbreviations, and domain jargon that a new reader wouldn't know to search for. Rules: @@ -383,7 +383,7 @@ Rules: "singleton"). Include terms that are unique to this codebase or used in a project-specific way - Insert alphabetically into the existing list -- Format: `**Term** — one-line definition` +- Format: `**Term**: one-line definition` - Print added terms in the convergence report under a "Glossary additions" line diff --git a/internal/assets/claude/skills/ctx-commit/SKILL.md b/internal/assets/claude/skills/ctx-commit/SKILL.md index ca7373d4f..81443ad50 100644 --- a/internal/assets/claude/skills/ctx-commit/SKILL.md +++ b/internal/assets/claude/skills/ctx-commit/SKILL.md @@ -55,7 +55,7 @@ Unless the user says `--skip-qa` or "skip checks": **Verify before claiming ready**: map each claim to evidence. "Tests pass" requires test output with 0 failures. "Build succeeds" requires exit 0. "Lint clean" requires linter output with 0 errors. -Run commands fresh — never reuse earlier output. Before proceeding +Run commands fresh; never reuse earlier output. Before proceeding to stage, answer these self-audit questions: 1. What assumptions did I make? diff --git a/internal/assets/claude/skills/ctx-plan/SKILL.md b/internal/assets/claude/skills/ctx-plan/SKILL.md new file mode 100644 index 000000000..5107fbabb --- /dev/null +++ b/internal/assets/claude/skills/ctx-plan/SKILL.md @@ -0,0 +1,55 @@ +--- +name: ctx-plan +description: Stress-test a plan through adversarial interview. Find what's weak, missing, or unexamined before the user commits. Use when the user wants their plan scrutinized. +--- + +You are a skeptical collaborator. The user has a plan and wants it +attacked. Your job is to surface what's weak, missing, or unexamined — +not to help them feel ready. + +State the plan as you understand it and proceed. Only pause if your +restatement exposes a material ambiguity or contradiction. + +Ask one question at a time. Each question must test something specific: +an assumption, a tradeoff, or a failure mode. No fishing. No clarifying +questions asked merely to reduce your own workload. + +After the user answers, push back, agree, narrow the question, or move +on — don't just accumulate. Walk the tree depth-first: settle decisions +that constrain others before opening siblings. + +Don't ask the user what the code, docs, or existing `ctx` files can +answer. Read first. Reserve questions for intent, priorities, +tradeoffs, and context that lives only in the user's head. + +Cycle through these angles; don't dwell on one: + +- Scope: what's NOT in this plan, and why? +- Failure modes: what breaks this? How would you notice? +- Alternatives: what did you reject, and what would change your mind? +- Sequencing: why this order? What if step 2 fails? +- Reversibility: if you're wrong in 3 months, how expensive is the unwind? +- Hidden assumptions: what must be true for this to work that isn't yet? + +Offer your take after the user answers — not before. The exception is +when the user is genuinely stuck; then propose a concrete possibility +and ask them to react. + +If the user drifts into implementation mechanics before the main bet is +clear, pull the conversation back to the unresolved bet. + +If a core assumption collapses mid-debate, say so plainly. Don't keep +politely working through the checklist on a plan that's already rotten. + +Do not produce an implementation plan. The deliverable is a debated +brief, not a task list. + +Stop when the user can describe, without your help: + +- what they're betting on +- what they rejected +- the top three failure modes +- the cheapest way to validate the bet +- what becomes expensive to unwind + +Then offer to write the debated brief. diff --git a/internal/assets/claude/skills/ctx-remember/SKILL.md b/internal/assets/claude/skills/ctx-remember/SKILL.md index 479a9e35c..bcefcddfa 100644 --- a/internal/assets/claude/skills/ctx-remember/SKILL.md +++ b/internal/assets/claude/skills/ctx-remember/SKILL.md @@ -38,7 +38,7 @@ feel like a file search rather than genuine recall: 1. **Load context packet**: ```bash - ctx agent --budget 4000 + ctx agent ``` 2. **Read the files** listed in the packet's "Read These Files" section (TASKS.md, DECISIONS.md, LEARNINGS.md, etc.) diff --git a/internal/assets/commands/commands.yaml b/internal/assets/commands/commands.yaml index d066cad3a..815d77a2e 100644 --- a/internal/assets/commands/commands.yaml +++ b/internal/assets/commands/commands.yaml @@ -8,6 +8,41 @@ # # See also: examples.yaml (Example fields), flags.yaml (flag descriptions). +activate: + long: |- + Emit a shell-specific export statement that binds CTX_DIR to the + selected .context/ directory for the current shell. + + Intended usage: + + eval "$(ctx activate)" # bind the one visible .context/ + + Activate scans upward from the current working directory + collecting every .context/ directory found. When exactly one is + visible it emits its path; when none or several are found it + refuses and prints the candidates so a human chooses explicitly + (typically by `cd`-ing closer to the project root). + + When the parent shell already has CTX_DIR set to a different + value, the output gains a leading `# ctx: replacing stale ...` + comment so the user sees the change in `eval` output before the + replacement takes effect. + + Activate is the only command in the CLI that walks the filesystem + during resolution. All other commands read CTX_DIR and error + loudly when it is undeclared, relative, or non-canonical. + short: Emit shell export to bind CTX_DIR +deactivate: + long: |- + Emit a shell-specific `unset CTX_DIR` statement for the current + shell. + + Intended usage: + + eval "$(ctx deactivate)" + + Pairs with `ctx activate` for symmetric shell integration. + short: Emit shell unset for CTX_DIR add: long: |- Add a new decision, task, learning, or convention @@ -1065,9 +1100,9 @@ system: Go binaries and a small set of session-lifecycle plumbing commands used by skills and editor integrations. - User-facing maintenance commands (backup, bootstrap, event, - message, prune, resource, stats) have been promoted to - top-level commands. Run `ctx --help` to see them. + User-facing maintenance commands (bootstrap, event, message, + prune, resource, stats) have been promoted to top-level + commands. Run `ctx --help` to see them. Plumbing subcommands (used by skills and automation): mark-journal Update journal processing state @@ -1078,6 +1113,7 @@ system: Hook subcommands (Claude Code plugin - safe to run manually): context-load-gate Context file read directive (PreToolUse) + check-anchor-drift Stale-anchor sanity check check-context-size Context size checkpoint check-ceremony Session ceremony adoption nudge check-persistence Context persistence nudge @@ -1088,8 +1124,6 @@ system: check-version Version update nudge check-map-staleness Architecture map staleness nudge block-non-path-ctx Block non-PATH ctx invocations - block-dangerous-command Block dangerous command patterns (project-local) - check-backup-age Backup staleness check (project-local) check-task-completion Task completion nudge after edits post-commit Post-commit context capture nudge qa-reminder QA reminder before completion @@ -1097,30 +1131,6 @@ system: check-memory-drift Memory drift nudge (MEMORY.md changed) heartbeat Session heartbeat webhook (no stdout) short: Hook plumbing (hidden); promoted commands moved to top-level -backup: - long: |- - Create timestamped tar.gz archives of project context and/or global - Claude Code data. Optionally copies archives to an SMB share. - - Scopes: - project .context/, .claude/, ideas/, ~/.bashrc - global ~/.claude/ (excludes todos/) - all Both project and global (default) - - Environment: - CTX_BACKUP_SMB_URL - SMB share URL (e.g. smb://host/share) - CTX_BACKUP_SMB_SUBDIR - Subdirectory on share (default: ctx-sessions) - short: Backup context and Claude data -system.blockdangerouscommand: - long: |- - Regex safety net for commands that the deny-list cannot express. - Catches mid-command sudo, mid-command git push, and binary installs - to bin directories. - - Hook event: PreToolUse (Bash) - Output: {"decision":"block","reason":"..."} or silent - Silent when: command doesn't match any dangerous pattern - short: Block dangerous command patterns (regex safety net) system.blocknonpathctx: long: |- Blocks ./ctx, go run ./cmd/ctx, and absolute-path ctx invocations. @@ -1133,20 +1143,21 @@ system.blocknonpathctx: short: Block non-PATH ctx invocations bootstrap: short: Print context location for AI agents -system.checkbackupage: +system.checkanchordrift: long: |- - Checks if the .context backup is stale (>2 days old) or the SMB share - is unmounted. Outputs a VERBATIM relay warning when issues are found. - Throttled to once per day. - - Environment: - CTX_BACKUP_SMB_URL - SMB share URL (e.g. smb://myhost/myshare). - If unset, the SMB mount check is skipped. + Compares the parent shell's CTX_DIR (captured as + CTX_DIR_INHERITED before the standard hook injection) + against the Claude-injected CLAUDE_PROJECT_DIR/.context + anchor. When the two diverge, emits a VERBATIM warning + banner naming both values so the user can spot when + their interactive CLI / `!`-pragma calls are writing to + a different project than Claude Code is in. Hook event: UserPromptSubmit - Output: VERBATIM relay with warning box, silent otherwise - Silent when: backup is fresh, or already checked today - short: Backup staleness check hook + Output: VERBATIM warning (when drifted), silent otherwise + Silent when: CTX_DIR_INHERITED is empty (no shell-level + activation), or matches CTX_DIR after filepath.Clean. + short: Stale-anchor sanity hook system.checkceremony: long: |- Scans the last 3 journal entries for /ctx-remember and /ctx-wrap-up diff --git a/internal/assets/commands/examples.yaml b/internal/assets/commands/examples.yaml index 2a3a282ea..97df674b2 100644 --- a/internal/assets/commands/examples.yaml +++ b/internal/assets/commands/examples.yaml @@ -9,6 +9,14 @@ # # See also: commands.yaml (Short/Long), flags.yaml (flag descriptions). +activate: + short: |2- + eval "$(ctx activate)" + +deactivate: + short: |2- + eval "$(ctx deactivate)" + add: short: |2- ctx add decision "Use PostgreSQL" --context "..." --rationale "..." --consequence "..." @@ -427,22 +435,14 @@ system: ctx system --help # list hidden hook plumbing ctx system mark-wrapped-up -backup: - short: |2- - ctx backup - ctx backup --scope project - -system.blockdangerouscommand: - short: ' ctx system block-dangerous-command' - system.blocknonpathctx: short: ' ctx system block-non-path-ctx' bootstrap: short: ' ctx system bootstrap' -system.checkbackupage: - short: ' ctx system check-backup-age' +system.checkanchordrift: + short: ' ctx system check-anchor-drift' system.checkceremony: short: ' ctx system check-ceremony' diff --git a/internal/assets/commands/flags.yaml b/internal/assets/commands/flags.yaml index 73dee64c8..9f855270e 100644 --- a/internal/assets/commands/flags.yaml +++ b/internal/assets/commands/flags.yaml @@ -8,6 +8,8 @@ # # See also: commands.yaml (Short/Long), examples.yaml (Example fields). +activate.shell: + short: Shell dialect for the emitted export (bash, zsh, sh); default auto-detects from $SHELL add.application: short: 'Application for learnings: how to apply this going forward (required for learnings)' add.branch: @@ -44,14 +46,10 @@ agent.skill: short: Include named skill content in context packet agent.include-hub: short: Include ctx Hub entries in context packet -allow-outside-cwd: - short: Allow context directory outside current working directory changes.since: short: 'Time reference: duration (24h) or date (2026-03-01)' compact.archive: short: Create .context/archive/ for old content -context-dir: - short: 'Override context directory path (default: .context)' initialize.caller: short: Identify the calling tool (e.g. vscode) to tailor output @@ -213,10 +211,6 @@ steering.sync.all: short: Sync to all supported tool formats sync.dry-run: short: Show what would change without modifying -backup.json: - short: Output results as JSON -backup.scope: - short: 'Backup scope: project, global, or all' bootstrap.json: short: Output in JSON format bootstrap.quiet: diff --git a/internal/assets/commands/text/doctor.yaml b/internal/assets/commands/text/doctor.yaml index ce48563a9..331cb5a4d 100644 --- a/internal/assets/commands/text/doctor.yaml +++ b/internal/assets/commands/text/doctor.yaml @@ -1,6 +1,10 @@ # Doctor diagnostic text strings for ctx CLI. # Used by assets.TextDesc() for health check output. +doctor.check-did-not-run: + short: 'check did not run: %v' +doctor.check-did-not-run-cascade: + short: 'check did not run: %v (subsequent context-dependent checks skipped)' doctor.context-file.format: short: '%-22s ~%d tokens' doctor.context-initialized.error: diff --git a/internal/assets/commands/text/errors.yaml b/internal/assets/commands/text/errors.yaml index f3438aae1..6744e7247 100644 --- a/internal/assets/commands/text/errors.yaml +++ b/internal/assets/commands/text/errors.yaml @@ -34,36 +34,45 @@ err.fmt.no-files: short: 'no context files found in %s' err.fmt.needs-formatting: short: files need formatting -err.backup.backup-global: - short: 'global backup: %w' -err.backup.backup-project: - short: 'project backup: %w' -err.backup.backup-smb-config: - short: 'parse SMB config: %w' err.backup.context-dir-not-found: short: "context directory not found: %s - run 'ctx init'" -err.backup.create-archive: - short: 'create archive file: %w' err.backup.create-archive-dir: short: 'failed to create archive directory: %w' err.backup.create-backup: short: 'failed to create backup %s: %w' -err.backup.invalid-backup-scope: - short: 'invalid scope %q: must be project, global, or all' -err.backup.invalid-smb-url: - short: 'invalid SMB URL: %s' -err.backup.mount-failed: - short: 'failed to mount %s: %w' -err.backup.smb-missing-share: - short: 'SMB URL missing share name: %s' -err.backup.source-not-found: - short: 'source not found: %s' err.backup.write-archive: short: 'failed to write archive: %w' -err.backup.write-smb: - short: 'write to SMB: %w' err.context.dir-not-found: short: 'context directory not found: ' +err.context.not-declared-zero: + short: |- + no context directory specified for this project + See: https://ctx.ist/recipes/activating-context/ +err.context.not-declared-one: + short: |- + no context directory specified; a likely candidate is at %s + See: https://ctx.ist/recipes/activating-context/ +err.context.not-declared-many: + short: |- + no context directory specified; multiple candidates visible: + %s + See: https://ctx.ist/recipes/activating-context/ +err.context.relative-not-allowed: + short: |- + CTX_DIR must be an absolute path; got %q + See: https://ctx.ist/recipes/activating-context/ +err.context.non-canonical-basename: + short: |- + CTX_DIR basename must be %q; got %q + See: https://ctx.ist/recipes/activating-context/ +err.context.dir-not-a-directory: + short: 'CTX_DIR points at a file, not a directory: %s' +err.context.dir-stat: + short: 'cannot stat CTX_DIR %s: %w' +err.activate.no-candidates: + short: |- + ctx activate: no .context/ directory found from this location + See: https://ctx.ist/recipes/activating-context/ err.cli.no-tool-specified: short: 'no tool specified: use --tool or set the tool field in .ctxrc' err.config.golden-not-found: @@ -120,8 +129,6 @@ err.date.invalid-date: short: 'invalid %s date %q (expected YYYY-MM-DD): %w' err.date.invalid-date-value: short: invalid date %q (expected YYYY-MM-DD) -err.fs.boundary-violation: - short: "%w\nUse --allow-outside-cwd to override this check" err.fs.create-dir: short: 'failed to create directory %s: %w' err.fs.dir-not-found: @@ -546,8 +553,6 @@ err.validate.context-dir-symlink: short: 'context directory %q is a symlink' err.validate.context-file-symlink: short: 'context file %q is a symlink' -err.validate.context-outside-root: - short: 'context directory %q resolves outside project root %q' err.validate.invalid-selection: short: 'invalid selection: %q (expected 1-%d)' err.validate.unknown-document: diff --git a/internal/assets/commands/text/hooks.yaml b/internal/assets/commands/text/hooks.yaml index 2ef3b1a7a..e329f83e2 100644 --- a/internal/assets/commands/text/hooks.yaml +++ b/internal/assets/commands/text/hooks.yaml @@ -1,44 +1,20 @@ # Hook output text strings for ctx CLI. # Used by assets.TextDesc() for hook messages and checks. -backup.box-title: - short: Backup Warning -backup.no-marker: - short: No backup marker found - backup may have never run. -backup.relay-message: - short: Backup warning -backup.relay-prefix: - short: 'IMPORTANT: Relay this backup warning to the user VERBATIM before answering their question.' -backup.run-hint: - short: 'Run: ctx backup' -backup.smb-not-mounted: - short: SMB share (%s) is not mounted. -backup.smb-unavailable: - short: Backups cannot run until it's available. -backup.stale: - short: Last .context backup is %d days old. block.absolute-path: short: 'Use ''ctx'' from PATH, not absolute paths. Ask the user to run: make build && sudo make install' block.constitution-suffix: short: 'See CONSTITUTION.md: ctx Invocation Invariants' -block.cp-to-bin: - short: Agent must not copy binaries to bin directories. Ask the user to run 'sudo make install' instead. block.dot-slash: short: 'Use ''ctx'' from PATH, not ''./ctx'' or ''./dist/ctx''. Ask the user to run: make build && sudo make install' block.go-run: short: 'Use ''ctx'' from PATH, not ''go run ./cmd/ctx''. Ask the user to run: make build && sudo make install' -block.install-to-local-bin: - short: Do not copy binaries to ~/.local/bin - this overrides the system ctx in /usr/local/bin. Use 'ctx' from PATH. -block.mid-git-push: - short: git push requires explicit user approval. -block.mid-sudo: - short: Cannot use sudo (no password access). Use 'make build && sudo make install' manually if needed. block.non-path-relay-message: short: Blocked non-PATH ctx invocation bootstrap.next-steps: short: |- 1. Read AGENT_PLAYBOOK.md from the context directory - 2. Run `ctx agent --budget 4000` for a content summary + 2. Run `ctx agent` for a content summary bootstrap.none: short: (none) bootstrap.plugin-warning: @@ -113,6 +89,23 @@ check-context-size.billing-log-format: short: prompt#%d BILLING-WARNING tokens=%d threshold=%d relay.prefix-format: short: '%s: %s' +check-anchor-drift.box-title: + short: Anchor Drift +check-anchor-drift.content: + short: |- + CTX_DIR (your shell) and CLAUDE_PROJECT_DIR/.context + point at different projects. + + shell: %s + claude: %s + + Re-run `eval "$(ctx activate)"` from this project to + realign, or close this Claude Code session and reopen + it from the right working directory. +check-anchor-drift.relay-message: + short: CTX_DIR diverges from Claude project anchor +check-anchor-drift.relay-prefix: + short: 'IMPORTANT: Relay this anchor drift notice to the user VERBATIM before answering their question.' check-context-size.billing-relay-format: short: Billing threshold exceeded (%s tokens > %s) check-context-size.billing-relay-prefix: diff --git a/internal/assets/commands/text/write.yaml b/internal/assets/commands/text/write.yaml index 96c2c9f09..19f324c83 100644 --- a/internal/assets/commands/text/write.yaml +++ b/internal/assets/commands/text/write.yaml @@ -11,12 +11,6 @@ write.spec-nudge-tip: short: 'Tip: this task may benefit from a spec. Run /ctx-spec to scaffold one.' write.archived: short: Archived previous mirror to %s -write.backup-result: - short: '%s: %s (%s)' -write.backup-skip-entry: - short: "skipping %s (not found)\n" -write.backup-smb-dest: - short: ' → %s' write.bootstrap-dir: short: 'context_dir: %s' write.bootstrap-files: @@ -67,8 +61,6 @@ write.format-bytes-raw: short: "%d B" write.format-bytes-unit: short: "%.1f %cB" -write.format-gvfs-path: - short: "/run/user/%d/gvfs/smb-share:server=%s,share=%s" write.format-duration-day: short: "%dd" write.format-duration-hour: @@ -193,6 +185,28 @@ write.init-makefile-includes: short: ' ○ Makefile (already includes %s)' write.init-merged: short: ' ✓ %s (merged)' +write.init-anatomy-preamble: + short: |- + # Getting Started with ctx + + The `.context/` directory is your project's persistent memory. + Its parent (this directory) is the project root by contract; + that's what `ctx sync` and drift detection scan. `CTX_DIR` + must be an absolute path ending in `.context`. One `.context/` + per project; knowledge sharing across projects goes through + `ctx hub`, not a shared directory. + + Full reference: https://ctx.ist/recipes/activating-context/ +write.init-activate-hint: + short: |4- + + Bind CTX_DIR for this shell (required before other ctx commands): + + eval "$(ctx activate)" + + Or export the absolute path directly (skip the scan): + + export CTX_DIR=%s write.init-next-steps-block: short: |4- @@ -252,7 +266,7 @@ write.init-getting-started-saved: write.init-no-changes: short: ' ○ %s (no changes needed)' write.init-overwrite-prompt: - short: '%s already exists. Overwrite? [y/N] ' + short: '%s is already initialized. Re-run init? Existing files are preserved; missing ones are added. (Use --force to overwrite existing files.) [y/N] ' write.init-perms-allow: short: ' ✓ %s (added ctx permissions)' write.init-perms-allow-deny: @@ -581,7 +595,7 @@ write.paused: write.paused-message: short: "ctx:paused (%d turns) - resume with /ctx-resume" write.prefix-error: - short: 'Error: ' + short: 'Error:' write.prefix-warn: short: ' ! %s: %v' write.publish-block: diff --git a/internal/assets/context/AGENT_PLAYBOOK.md b/internal/assets/context/AGENT_PLAYBOOK.md index 8a1bcac95..6ba48fe9c 100644 --- a/internal/assets/context/AGENT_PLAYBOOK.md +++ b/internal/assets/context/AGENT_PLAYBOOK.md @@ -74,6 +74,21 @@ go run ./cmd/ctx # ✗ avoid unless developing ctx itself ``` Check with `which ctx` if unsure whether it's installed. +### When ctx Returns an Error + +Triage the error before reacting: + +- **Invocation error**: the message points at your call: unknown + flag, unknown command, wrong argument count, missing required + flag. Read `ctx --help`, fix the call, and retry. +- **Everything else**: missing context directory, config problem, + hook rejection, permission denied, unexpected failure. Relay the + output to the user **verbatim** and stop. Do not add flags, run + other commands, edit files to fix the cause, or retry. Wait for + the user's next instruction. + +When unsure which kind you're looking at, treat it as the second. + ## Context Readback Before starting any work, read the required context files and confirm to the @@ -83,7 +98,7 @@ conventions." Do not begin implementation until you have done so. ## Supplementary Files These files live in `.context/` alongside the core context files. -Read them when the task at hand warrants it — not on every session. +Read them when the task at hand warrants it, not on every session. | File | Read when | |--------------------|----------------------------------------------------------------| @@ -91,6 +106,28 @@ Read them when the task at hand warrants it — not on every session. | DETAILED_DESIGN.md | Deep-diving into internals (generated via `/ctx-architecture`) | | GLOSSARY.md | Encountering unfamiliar project-specific terminology | +## Context Directory Lives at the Project Root + +The project root is the parent of `.context/`, by contract — +specifically `filepath.Dir(ContextDir())`. That's where `ctx sync`, +`ctx drift`, and the memory-drift hook look for code, secrets, +and `MEMORY.md`. + +For knowledge that spans projects (CONSTITUTION, CONVENTIONS, +ARCHITECTURE), use `ctx hub`. + +Recommended layout: + +``` +~/WORKSPACE/my-project + ├── .git + ├── .context + ├── Makefile + ├── Makefile.ctx + └── specs + └── ... +``` + ## Reason Before Acting Before implementing any non-trivial change, think through it step-by-step: @@ -280,7 +317,7 @@ Never assume. If you don't see it in files, you don't know it. ## Planning Work Every commit requires a `Spec:` trailer (CONSTITUTION rule). This means -every piece of work needs a spec — no exceptions, no "trivial" qualifier. +every piece of work needs a spec; no exceptions, no "trivial" qualifier. A one-liner bugfix gets a one-paragraph spec; a multi-package feature gets a full design document. The spec exists for traceability, not ceremony. diff --git a/internal/assets/context/AGENT_PLAYBOOK_GATE.md b/internal/assets/context/AGENT_PLAYBOOK_GATE.md index 65a9cb004..d5c3b2974 100644 --- a/internal/assets/context/AGENT_PLAYBOOK_GATE.md +++ b/internal/assets/context/AGENT_PLAYBOOK_GATE.md @@ -8,9 +8,15 @@ lifecycle details, or anti-patterns. ```bash ctx status # correct -./dist/ctx # wrong — never hardcode paths -go run ./cmd/ctx # wrong — unless developing ctx itself -```` +./dist/ctx # wrong: never hardcode paths +go run ./cmd/ctx # wrong: unless developing ctx itself +``` + +## When `ctx` Errors + +If the error names your flag, argument, or command, read +`ctx --help` and fix the call. Otherwise, relay verbatim +and stop. When unsure, stop. ## File Interaction Protocol @@ -30,12 +36,12 @@ When a task involves reading, modifying, or reasoning about a file: Do not begin implementation without a spec. Every commit requires a `Spec:` trailer. Every piece of work needs -a spec — no exceptions. Scale the spec to the work. Use `/ctx-spec` +a spec; no exceptions. Scale the spec to the work. Use `/ctx-spec` to scaffold. ## Proactive Persistence -After completing a task, making a decision, or hitting a gotcha — +After completing a task, making a decision, or hitting a gotcha, persist before continuing. Don't wait for session end. ## Chunk and Checkpoint diff --git a/internal/assets/embed_test.go b/internal/assets/embed_test.go index 35ccc78dd..373f32f91 100644 --- a/internal/assets/embed_test.go +++ b/internal/assets/embed_test.go @@ -341,13 +341,11 @@ func TestSchemaCoversCtxRC(t *testing.T) { // We marshal a struct with all fields set to get every key emitted. type ctxRC struct { Profile string `yaml:"profile"` - ContextDir string `yaml:"context_dir"` TokenBudget int `yaml:"token_budget"` PriorityOrder []int `yaml:"priority_order"` AutoArchive bool `yaml:"auto_archive"` ArchiveAfterDays int `yaml:"archive_after_days"` ScratchpadEncrypt *bool `yaml:"scratchpad_encrypt"` - AllowOutsideCwd bool `yaml:"allow_outside_cwd"` EntryCountLearnings int `yaml:"entry_count_learnings"` EntryCountDecisions int `yaml:"entry_count_decisions"` ConventionLineCount int `yaml:"convention_line_count"` @@ -435,7 +433,7 @@ func TestListHookMessages(t *testing.T) { wantHooks := []string{ "qa-reminder", "check-context-size", - "block-dangerous-command", + "block-non-path-ctx", } for _, exp := range wantHooks { if !hookSet[exp] { diff --git a/internal/assets/hooks/messages/block-dangerous-command/cp-to-bin.txt b/internal/assets/hooks/messages/block-dangerous-command/cp-to-bin.txt deleted file mode 100644 index 2f94075c2..000000000 --- a/internal/assets/hooks/messages/block-dangerous-command/cp-to-bin.txt +++ /dev/null @@ -1 +0,0 @@ -Agent must not copy binaries to bin directories. Ask the user to run 'sudo make install' instead. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-command/install-to-local-bin.txt b/internal/assets/hooks/messages/block-dangerous-command/install-to-local-bin.txt deleted file mode 100644 index 6a749bf84..000000000 --- a/internal/assets/hooks/messages/block-dangerous-command/install-to-local-bin.txt +++ /dev/null @@ -1 +0,0 @@ -Do not copy binaries to ~/.local/bin: this overrides the system ctx in /usr/local/bin. Use 'ctx' from PATH. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-command/mid-git-push.txt b/internal/assets/hooks/messages/block-dangerous-command/mid-git-push.txt deleted file mode 100644 index e1d9f3cb3..000000000 --- a/internal/assets/hooks/messages/block-dangerous-command/mid-git-push.txt +++ /dev/null @@ -1 +0,0 @@ -git push requires explicit user approval. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-command/mid-sudo.txt b/internal/assets/hooks/messages/block-dangerous-command/mid-sudo.txt deleted file mode 100644 index e9666aff8..000000000 --- a/internal/assets/hooks/messages/block-dangerous-command/mid-sudo.txt +++ /dev/null @@ -1 +0,0 @@ -Cannot use sudo (no password access). Use 'make build && sudo make install' manually if needed. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-commands/cp-to-bin.txt b/internal/assets/hooks/messages/block-dangerous-commands/cp-to-bin.txt deleted file mode 100644 index 2f94075c2..000000000 --- a/internal/assets/hooks/messages/block-dangerous-commands/cp-to-bin.txt +++ /dev/null @@ -1 +0,0 @@ -Agent must not copy binaries to bin directories. Ask the user to run 'sudo make install' instead. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-commands/install-to-local-bin.txt b/internal/assets/hooks/messages/block-dangerous-commands/install-to-local-bin.txt deleted file mode 100644 index 6a749bf84..000000000 --- a/internal/assets/hooks/messages/block-dangerous-commands/install-to-local-bin.txt +++ /dev/null @@ -1 +0,0 @@ -Do not copy binaries to ~/.local/bin: this overrides the system ctx in /usr/local/bin. Use 'ctx' from PATH. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-commands/mid-git-push.txt b/internal/assets/hooks/messages/block-dangerous-commands/mid-git-push.txt deleted file mode 100644 index e1d9f3cb3..000000000 --- a/internal/assets/hooks/messages/block-dangerous-commands/mid-git-push.txt +++ /dev/null @@ -1 +0,0 @@ -git push requires explicit user approval. \ No newline at end of file diff --git a/internal/assets/hooks/messages/block-dangerous-commands/mid-sudo.txt b/internal/assets/hooks/messages/block-dangerous-commands/mid-sudo.txt deleted file mode 100644 index e9666aff8..000000000 --- a/internal/assets/hooks/messages/block-dangerous-commands/mid-sudo.txt +++ /dev/null @@ -1 +0,0 @@ -Cannot use sudo (no password access). Use 'make build && sudo make install' manually if needed. \ No newline at end of file diff --git a/internal/assets/hooks/messages/check-backup-age/warning.txt b/internal/assets/hooks/messages/check-backup-age/warning.txt deleted file mode 100644 index df9763149..000000000 --- a/internal/assets/hooks/messages/check-backup-age/warning.txt +++ /dev/null @@ -1 +0,0 @@ -{{.Warnings}} \ No newline at end of file diff --git a/internal/assets/hooks/messages/registry.yaml b/internal/assets/hooks/messages/registry.yaml index 732b2312a..e305d31ea 100644 --- a/internal/assets/hooks/messages/registry.yaml +++ b/internal/assets/hooks/messages/registry.yaml @@ -2,26 +2,6 @@ # Each entry maps to a template at /.txt. # Fields: hook, variant, category, description, vars (optional). -- hook: block-dangerous-command - variant: cp-to-bin - category: ctx-specific - description: Block copying binaries to bin directories - -- hook: block-dangerous-command - variant: install-to-local-bin - category: ctx-specific - description: Block copying binaries to ~/.local/bin - -- hook: block-dangerous-command - variant: mid-git-push - category: ctx-specific - description: Block git push without user approval - -- hook: block-dangerous-command - variant: mid-sudo - category: ctx-specific - description: Block sudo usage - - hook: block-non-path-ctx variant: absolute-path category: ctx-specific @@ -37,12 +17,6 @@ category: ctx-specific description: Block go run ./cmd/ctx invocation -- hook: check-backup-age - variant: warning - category: customizable - description: Backup staleness warning - vars: [Warnings] - - hook: check-freshness variant: stale category: customizable diff --git a/internal/assets/hooks/messages/registry_test.go b/internal/assets/hooks/messages/registry_test.go index 5c387f7b4..ded3ecf66 100644 --- a/internal/assets/hooks/messages/registry_test.go +++ b/internal/assets/hooks/messages/registry_test.go @@ -17,8 +17,8 @@ func TestRegistryCount(t *testing.T) { if registryErr != nil { t.Fatalf("Registry() parse error: %v", registryErr) } - if len(entries) != 32 { - t.Errorf("Registry() returned %d entries, want 32", len(entries)) + if len(entries) != 27 { + t.Errorf("Registry() returned %d entries, want 27", len(entries)) } } diff --git a/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md index 786373ccb..30ea64ab1 100644 --- a/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md +++ b/internal/assets/integrations/copilot-cli/INSTRUCTIONS.md @@ -1,4 +1,4 @@ -# ctx Agent Instructions — Copilot CLI +# ctx Agent Instructions: Copilot CLI @@ -11,11 +11,13 @@ This project uses Context (`ctx`) for context persistence across sessions. ## On Session Start 1. **Run `ctx system bootstrap`**: CRITICAL, not optional. - This tells you where the context directory is. If it fails or returns - no context_dir, STOP and warn the user. + This tells you where the context directory is. If it returns any + error, relay the error output to the user verbatim, point them at + https://ctx.ist/recipes/activating-context/ for setup, and STOP. + Do not try to recover: the user decides. 2. **Read AGENT_PLAYBOOK.md** from the context directory: it explains how to use this system -3. **Run `ctx agent --budget 4000`** for a content summary +3. **Run `ctx agent`** for a content summary ## When Asked "Do You Remember?" @@ -41,7 +43,7 @@ Read them silently, then present what you found as recall, not as a search. ```bash # Get AI-optimized context packet (what you should know) -ctx agent --budget 4000 +ctx agent # Or see full status ctx status @@ -51,7 +53,7 @@ ctx status | File | Purpose | |-----------------|----------------------------------------| -| CONSTITUTION.md | Hard rules — NEVER violate | +| CONSTITUTION.md | Hard rules: NEVER violate | | TASKS.md | Current work items | | DECISIONS.md | Architectural decisions with rationale | | LEARNINGS.md | Gotchas, tips, lessons learned | @@ -76,7 +78,7 @@ Periodically ask yourself: > "If this session ended right now, would the next session know what happened?" -If no — save a session file or update context files before continuing. +If no, save a session file or update context files before continuing. ## Session Persistence diff --git a/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 b/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 index 8cbbd22fb..a2acdc4b4 100644 --- a/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 +++ b/internal/assets/integrations/copilot-cli/scripts/session-start.ps1 @@ -2,4 +2,4 @@ # Bootstraps context and loads the agent packet try { ctx system bootstrap 2>$null } catch {} -try { ctx agent --budget 4000 2>$null } catch {} +try { ctx agent 2>$null } catch {} diff --git a/internal/assets/integrations/copilot-cli/scripts/session-start.sh b/internal/assets/integrations/copilot-cli/scripts/session-start.sh index 1206e14c3..406291792 100644 --- a/internal/assets/integrations/copilot-cli/scripts/session-start.sh +++ b/internal/assets/integrations/copilot-cli/scripts/session-start.sh @@ -7,4 +7,4 @@ set -euo pipefail ctx system bootstrap 2>/dev/null || true # Load AI-optimized context packet -ctx agent --budget 4000 2>/dev/null || true +ctx agent 2>/dev/null || true diff --git a/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md index 26dd5576d..0bc0493f3 100644 --- a/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/_ctx-alignment-audit/SKILL.md @@ -24,7 +24,7 @@ actual agent instructions. ### Step 1: Collect Claims -Read target docs. Extract every behavioral claim — statements +Read target docs. Extract every behavioral claim: statements describing what an agent "will do", "may do", or "offers to do". ### Step 2: Trace Each Claim @@ -44,7 +44,7 @@ For each claim, determine: | Claim (file:line) | Status | Backing instruction | Gap | |---|---|---|---| | "agent creates tasks" | Gap | None | Not taught | -| "agent saves learnings" | Covered | Playbook | — | +| "agent saves learnings" | Covered | Playbook | n/a | ### Step 4: Fix (if requested) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md index 24acef045..f402eaee6 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-architecture/SKILL.md @@ -369,7 +369,7 @@ library with no runtime behavior). **GLOSSARY.md**: append project-specific terms discovered during analysis. This captures the vocabulary that makes the codebase -searchable — type names, internal concepts, abbreviations, and +searchable: type names, internal concepts, abbreviations, and domain jargon that a new reader wouldn't know to search for. Rules: @@ -382,7 +382,7 @@ Rules: "singleton"). Include terms that are unique to this codebase or used in a project-specific way - Insert alphabetically into the existing list -- Format: `**Term** — one-line definition` +- Format: `**Term**: one-line definition` - Print added terms in the convergence report under a "Glossary additions" line diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md index ca7373d4f..81443ad50 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-commit/SKILL.md @@ -55,7 +55,7 @@ Unless the user says `--skip-qa` or "skip checks": **Verify before claiming ready**: map each claim to evidence. "Tests pass" requires test output with 0 failures. "Build succeeds" requires exit 0. "Lint clean" requires linter output with 0 errors. -Run commands fresh — never reuse earlier output. Before proceeding +Run commands fresh; never reuse earlier output. Before proceeding to stage, answer these self-audit questions: 1. What assumptions did I make? diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md index 4ef08d1bc..245c7b8e3 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-remember/SKILL.md @@ -37,7 +37,7 @@ feel like a file search rather than genuine recall: 1. **Load context packet**: ```bash - ctx agent --budget 4000 + ctx agent ``` 2. **Read the files** listed in the packet's "Read These Files" section (TASKS.md, DECISIONS.md, LEARNINGS.md, etc.) diff --git a/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md b/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md index e4761062c..69c24c0f1 100644 --- a/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md +++ b/internal/assets/integrations/copilot-cli/skills/ctx-sanitize-permissions/SKILL.md @@ -49,11 +49,11 @@ unrestricted access. ## Permission Audit Results ### 🔴 Critical (N) -1. `Bash(*)` — unrestricted shell access +1. `Bash(*)`: unrestricted shell access → Suggest: scope to specific commands ### 🟡 Warning (N) -1. `Write(/etc/*)` — write access to system dirs +1. `Write(/etc/*)`: write access to system dirs → Suggest: remove or scope to project ### ✅ Clean (N entries passed) diff --git a/internal/assets/integrations/copilot/copilot-instructions.md b/internal/assets/integrations/copilot/copilot-instructions.md index 7dcf81046..0d63db10b 100644 --- a/internal/assets/integrations/copilot/copilot-instructions.md +++ b/internal/assets/integrations/copilot/copilot-instructions.md @@ -143,17 +143,17 @@ responses when governance actions are overdue. Follow this protocol: ### During Work - **After making a decision or discovering a gotcha**: call `ctx_add()` - to persist it immediately — not at session end. + to persist it immediately, not at session end. - **After completing a task**: call `ctx_complete()` or `ctx_check_task_completion()`. -- **Every 10–15 tool calls or 15 minutes**: call `ctx_drift()` to +- **Every 10-15 tool calls or 15 minutes**: call `ctx_drift()` to check for stale context. - **Before git commit**: call `ctx_status()` to verify context health. ### Responding to Warnings When a tool response contains a `⚠` warning, act on it in your next -action. Do not ignore governance warnings — they indicate context +action. Do not ignore governance warnings; they indicate context hygiene actions that are overdue. When a tool response contains a `🚨 CRITICAL` warning, **stop current @@ -169,7 +169,7 @@ real time. The following actions are flagged as violations: - **Dangerous commands**: `sudo`, `rm -rf /`, `git push`, `git reset --hard`, `curl`, `wget`, `chmod 777` -- **hack/ scripts**: Direct execution of `hack/*.sh` — use `make` +- **hack/ scripts**: Direct execution of `hack/*.sh`; use `make` targets instead - **Sensitive files**: Editing `.env`, `.pem`, `.key`, or files matching `credentials` or `secret` diff --git a/internal/assets/read/desc/desc.go b/internal/assets/read/desc/desc.go index ff6c7a8f7..105d84e68 100644 --- a/internal/assets/read/desc/desc.go +++ b/internal/assets/read/desc/desc.go @@ -29,7 +29,7 @@ func Command(key string) (short, long string) { // Flag returns the description for a flag. // -// Keys use dot notation: "add.file", "context-dir". +// Keys use dot notation: "add.file", "compact.archive". // Returns an empty string if the key is not found. // // Parameters: diff --git a/internal/assets/schema/ctxrc.schema.json b/internal/assets/schema/ctxrc.schema.json index 7b191c44f..8e2dae1f3 100644 --- a/internal/assets/schema/ctxrc.schema.json +++ b/internal/assets/schema/ctxrc.schema.json @@ -10,10 +10,6 @@ "type": "string", "description": "Active configuration profile name." }, - "context_dir": { - "type": "string", - "description": "Name of the context directory. Default: .context." - }, "token_budget": { "type": "integer", "description": "Default token budget for context assembly. Default: 8000.", @@ -39,10 +35,6 @@ "type": "boolean", "description": "Whether to encrypt the scratchpad. Default: true." }, - "allow_outside_cwd": { - "type": "boolean", - "description": "Skip boundary validation for external context dirs. Default: false." - }, "entry_count_learnings": { "type": "integer", "description": "Entry count threshold for LEARNINGS.md drift warning. Default: 30. 0 disables.", diff --git a/internal/assets/tpl/README.md b/internal/assets/tpl/README.md index f053a1b12..a287be6c5 100644 --- a/internal/assets/tpl/README.md +++ b/internal/assets/tpl/README.md @@ -21,9 +21,9 @@ Move it to a YAML text entry instead. | `tpl_entry.go` | 15 | ctx add entry templates (decision, learning, convention, task) | | `tpl_journal.go` | 26 | Journal markdown rendering | | `tpl_loop.go` | 15 | Shell script generation for autonomous loops | -| `tpl_obsidian.go` | 1 | Obsidian vault README (borderline — could migrate) | +| `tpl_obsidian.go` | 1 | Obsidian vault README (borderline; could migrate) | | `tpl_recall.go` | 21 | Recall output rendering | -| `tpl_trigger.go` | 2 | Trigger script scaffold (borderline — could migrate) | +| `tpl_trigger.go` | 2 | Trigger script scaffold (borderline; could migrate) | ## How they will be replaced diff --git a/internal/audit/README.md b/internal/audit/README.md index c19ce04fe..0a092aa86 100644 --- a/internal/audit/README.md +++ b/internal/audit/README.md @@ -40,4 +40,4 @@ directory.** If a test fails, the fix belongs in the code under test, not here. If you believe an exception is truly warranted, surface it to the -user and let them decide — do not silently widen a check. +user and let them decide; do not silently widen a check. diff --git a/internal/audit/cmd_fprint_test.go b/internal/audit/cmd_fprint_test.go new file mode 100644 index 000000000..751c46208 --- /dev/null +++ b/internal/audit/cmd_fprint_test.go @@ -0,0 +1,171 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// ================================================================ +// STOP — Read internal/audit/README.md before editing this file. +// +// These tests enforce project conventions. The codebase is clean: +// all checks pass with zero violations, zero exceptions. +// +// If a test fails after your change, fix the code under test. +// Do NOT add allowlist entries, bump grandfathered counters, or +// weaken checks. Exceptions require a dedicated PR with +// justification for every entry. See README.md for the full policy. +// ================================================================ + +package audit + +import ( + "go/ast" + "strings" + "testing" +) + +// fmtFprintMethods lists fmt.Fprint-family helpers that, when +// pointed at a user-facing stream (cmd.OutOrStdout / cmd.OutOrStderr +// / os.Stdout / os.Stderr), bypass the internal/write/ formatting +// pipeline. The cmd_print and printf_calls tests catch the direct +// `cmd.Print*(...)` form; this test closes the indirect form +// `fmt.Fprint*(stream, ...)`. +var fmtFprintMethods = map[string]bool{ + "Fprint": true, + "Fprintf": true, + "Fprintln": true, +} + +// TestNoFmtFprintToUserStream catches `fmt.Fprint*(stream, ...)` +// calls where stream is a user-facing destination +// (cmd.OutOrStdout / cmd.OutOrStderr / os.Stdout / os.Stderr) made +// outside internal/write/. Same intent as TestNoCmdPrintOutsideWrite: +// every user-visible write must route through write/ so output +// formatting stays consistent and template-driven. +// +// Calls writing to in-memory destinations (strings.Builder, +// bytes.Buffer, json.Encoder targets, etc.) are unaffected because +// those arguments are neither cmd.OutOr* calls nor os.Std* idents. +// +// Test files are exempt. +// +// See specs/ast-audit-tests.md for rationale. +func TestNoFmtFprintToUserStream(t *testing.T) { + pkgs := loadPackages(t) + var violations []string + + for _, pkg := range pkgs { + // Allow calls inside internal/write/ — that is precisely + // where these patterns belong. + if strings.Contains(pkg.PkgPath, "internal/write/") || + strings.HasSuffix(pkg.PkgPath, "internal/write") { + continue + } + + for _, file := range pkg.Syntax { + fpath := pkg.Fset.Position(file.Pos()).Filename + if isTestFile(fpath) { + continue + } + + ast.Inspect(file, func(n ast.Node) bool { + call, ok := n.(*ast.CallExpr) + if !ok { + return true + } + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + + // Must be the fmt package. + pkgIdent, ok := sel.X.(*ast.Ident) + if !ok || pkgIdent.Name != "fmt" { + return true + } + + if !fmtFprintMethods[sel.Sel.Name] { + return true + } + + if len(call.Args) == 0 { + return true + } + + if !isUserFacingStream(call.Args[0]) { + return true + } + + violations = append(violations, + posString(pkg.Fset, call.Pos())+ + ": fmt."+sel.Sel.Name+ + "(, ...) — must go through internal/write/", + ) + return true + }) + } + } + + for _, v := range violations { + t.Error(v) + } +} + +// isUserFacingStream reports whether expr is one of the +// user-visible writers we forbid bypassing. +// +// Recognized shapes: +// - cmd.OutOrStdout() — cobra's stdout writer (SetOut or +// os.Stdout fallback). +// - cmd.OutOrStderr() — confusingly-named cobra accessor that +// returns the SetOut writer with **stderr** as fallback. Still +// a user-visible stream; route through internal/write/. +// - cmd.ErrOrStderr() — cobra's stderr writer (SetErr or +// os.Stderr fallback). The actual "write to stderr" +// accessor; covered here to keep the rule total. +// - os.Stdout / os.Stderr — direct *os.File globals. +// +// The receiver name "cmd" is the project convention; a non-"cmd" +// receiver is allowed as a calculated escape hatch (rare and +// would show up in review). +// +// Anything else (strings.Builder, bytes.Buffer, json.Encoder +// targets, custom io.Writer, etc.) is in-memory string assembly +// and is not a concern of this test. +// +// Parameters: +// - expr: AST expression in the first-argument slot of a +// fmt.Fprint*-family call. +// +// Returns: +// - bool: true when expr is one of the recognized user streams. +func isUserFacingStream(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.CallExpr: + sel, ok := e.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + if sel.Sel.Name != "OutOrStdout" && + sel.Sel.Name != "OutOrStderr" && + sel.Sel.Name != "ErrOrStderr" { + return false + } + ident, ok := sel.X.(*ast.Ident) + if !ok { + return false + } + return ident.Name == "cmd" + case *ast.SelectorExpr: + ident, ok := e.X.(*ast.Ident) + if !ok { + return false + } + if ident.Name != "os" { + return false + } + return e.Sel.Name == "Stdout" || e.Sel.Name == "Stderr" + } + return false +} diff --git a/internal/bootstrap/bootstrap_test.go b/internal/bootstrap/bootstrap_test.go index 6f6280229..31caded06 100644 --- a/internal/bootstrap/bootstrap_test.go +++ b/internal/bootstrap/bootstrap_test.go @@ -9,11 +9,14 @@ package bootstrap import ( "os" "path/filepath" + "strings" "testing" "github.com/ActiveMemory/ctx/internal/cli/resolve" "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/ctx" + "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/config/flag" "github.com/spf13/cobra" @@ -43,13 +46,25 @@ func TestRootCmd(t *testing.T) { if cmd.Long == "" { t.Error("RootCmd().Long is empty") } +} - // Check global flags exist - contextDirFlag := cmd.PersistentFlags().Lookup(flag.ContextDir) - if contextDirFlag == nil { - t.Error("--context-dir flag not found") - } +// TestRoot_NoContextDirFlag is the regression guard for the +// removed --context-dir flag (spec: +// specs/single-source-context-anchor.md). Cobra must reject +// the flag with its standard "unknown flag" error. +func TestRoot_NoContextDirFlag(t *testing.T) { + cmd := RootCmd() + cmd.SetOut(&discardWriter{}) + cmd.SetErr(&discardWriter{}) + cmd.SetArgs([]string{"--context-dir=/tmp", "status"}) + err := cmd.Execute() + if err == nil { + t.Fatal("expected error for removed --context-dir flag") + } + if !strings.Contains(err.Error(), "unknown flag") { + t.Errorf("error = %q, want cobra unknown-flag error", err.Error()) + } } func TestInitialize(t *testing.T) { @@ -107,19 +122,19 @@ func TestRootCmdVersion(t *testing.T) { } } -func TestRootCmdAllowOutsideCwdFlag(t *testing.T) { - cmd := RootCmd() - - flag := cmd.PersistentFlags().Lookup(flag.AllowOutsideCwd) - if flag == nil { - t.Fatal("--allow-outside-cwd flag not found") - } - if flag.DefValue != "false" { - t.Errorf("--allow-outside-cwd default = %q, want %q", flag.DefValue, "false") +// TestRootCmdPersistentPreRun_CtxDirEnv: CTX_DIR env declares the +// context directory; non-init annotated dummy bypasses the +// initialized check. +func TestRootCmdPersistentPreRun_CtxDirEnv(t *testing.T) { + tmp := t.TempDir() + ctxDir := filepath.Join(tmp, dir.Context) + if err := os.MkdirAll(ctxDir, 0o700); err != nil { + t.Fatal(err) } -} + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() + t.Cleanup(rc.Reset) -func TestRootCmdPersistentPreRun_ContextDir(t *testing.T) { cmd := RootCmd() dummy := &cobra.Command{ @@ -128,25 +143,26 @@ func TestRootCmdPersistentPreRun_ContextDir(t *testing.T) { Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{ - "--context-dir", "/tmp/test-ctx", - "--allow-outside-cwd", "dummy", - }) + cmd.SetArgs([]string{"dummy"}) err := cmd.Execute() if err != nil { t.Fatalf("Execute() error: %v", err) } - got := rc.ContextDir() - if got != "/tmp/test-ctx" { - t.Errorf("ContextDir() = %q, want %q", got, "/tmp/test-ctx") + got, ctxErr := rc.ContextDir() + if ctxErr != nil { + t.Fatalf("ContextDir: %v", ctxErr) + } + if got != ctxDir { + t.Errorf("ContextDir() = %q, want %q", got, ctxDir) } } func TestRootCmdPersistentPreRun_DefaultFlags(t *testing.T) { - // Test PersistentPreRun with default flags (no --context-dir, no --no-color) - // --allow-outside-cwd needed since test cwd may not have .context + // Test PersistentPreRun with default flags. + // The dummy command carries AnnotationSkipInit, so PersistentPreRunE + // skips the context-dir declaration gate and returns immediately. cmd := RootCmd() dummy := &cobra.Command{ @@ -155,7 +171,7 @@ func TestRootCmdPersistentPreRun_DefaultFlags(t *testing.T) { Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) err := cmd.Execute() if err != nil { @@ -182,27 +198,15 @@ func TestInitializeSubcommandCount(t *testing.T) { } } -// TestRootCmdPersistentPreRun_BoundaryViolation tests that boundary validation -// returns an error when --context-dir is outside cwd and --allow-outside-cwd -// is not set. -func TestRootCmdPersistentPreRun_BoundaryViolation(t *testing.T) { - cmd := RootCmd() - dummy := &cobra.Command{ - Use: "dummy", - Annotations: map[string]string{cli.AnnotationSkipInit: "true"}, - Run: func(cmd *cobra.Command, args []string) {}, - } - cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--context-dir", "/etc/not-inside-cwd", "dummy"}) - - execErr := cmd.Execute() - if execErr == nil { - t.Fatal("expected error from boundary violation") - } -} - func TestInitGuard_BlocksUninitializedCommand(t *testing.T) { tmp := t.TempDir() + ctxDir := filepath.Join(tmp, dir.Context) + if err := os.MkdirAll(ctxDir, 0o700); err != nil { + t.Fatal(err) + } + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() + t.Cleanup(rc.Reset) cmd := RootCmd() dummy := &cobra.Command{ @@ -210,7 +214,7 @@ func TestInitGuard_BlocksUninitializedCommand(t *testing.T) { Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--context-dir", tmp, "--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) execErr := cmd.Execute() if execErr == nil { @@ -224,6 +228,13 @@ func TestInitGuard_BlocksUninitializedCommand(t *testing.T) { func TestInitGuard_AllowsAnnotatedCommand(t *testing.T) { tmp := t.TempDir() // empty - not initialized + ctxDir := filepath.Join(tmp, dir.Context) + if err := os.MkdirAll(ctxDir, 0o700); err != nil { + t.Fatal(err) + } + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() + t.Cleanup(rc.Reset) cmd := RootCmd() dummy := &cobra.Command{ @@ -232,7 +243,7 @@ func TestInitGuard_AllowsAnnotatedCommand(t *testing.T) { Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--context-dir", tmp, "--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) if execErr := cmd.Execute(); execErr != nil { t.Fatalf("annotated command should succeed: %v", execErr) @@ -241,6 +252,13 @@ func TestInitGuard_AllowsAnnotatedCommand(t *testing.T) { func TestInitGuard_AllowsHiddenCommand(t *testing.T) { tmp := t.TempDir() // empty - not initialized + ctxDir := filepath.Join(tmp, dir.Context) + if err := os.MkdirAll(ctxDir, 0o700); err != nil { + t.Fatal(err) + } + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() + t.Cleanup(rc.Reset) cmd := RootCmd() dummy := &cobra.Command{ @@ -249,7 +267,7 @@ func TestInitGuard_AllowsHiddenCommand(t *testing.T) { Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--context-dir", tmp, "--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) if execErr := cmd.Execute(); execErr != nil { t.Fatalf("hidden command should succeed: %v", execErr) @@ -264,7 +282,7 @@ func TestInitGuard_AllowsGroupingCommand(t *testing.T) { Short: "A grouping command", } cmd.AddCommand(group) - cmd.SetArgs([]string{"--allow-outside-cwd", "group"}) + cmd.SetArgs([]string{"group"}) if execErr := cmd.Execute(); execErr != nil { t.Fatalf("grouping command should succeed: %v", execErr) @@ -301,23 +319,31 @@ func TestInitGuard_AllowsCompletionSubcommand(t *testing.T) { func TestInitGuard_AllowsInitializedCommand(t *testing.T) { tmp := t.TempDir() + ctxDir := filepath.Join(tmp, dir.Context) + if mkErr := os.MkdirAll(ctxDir, 0o700); mkErr != nil { + t.Fatal(mkErr) + } // Create required context files so Initialized() returns true. for _, f := range ctx.FilesRequired { - path := filepath.Join(tmp, f) + path := filepath.Join(ctxDir, f) content := []byte("# " + f + "\n") if writeErr := os.WriteFile(path, content, 0o600); writeErr != nil { t.Fatalf("setup: %v", writeErr) } } + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() + t.Cleanup(rc.Reset) + cmd := RootCmd() dummy := &cobra.Command{ Use: "dummy", Run: func(cmd *cobra.Command, args []string) {}, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--context-dir", tmp, "--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) if execErr := cmd.Execute(); execErr != nil { t.Fatalf("initialized command should succeed: %v", execErr) @@ -356,7 +382,7 @@ func TestResolveTool_FlagOverridesRC(t *testing.T) { }, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--allow-outside-cwd", "--tool", "cursor", "dummy"}) + cmd.SetArgs([]string{"--tool", "cursor", "dummy"}) if err := cmd.Execute(); err != nil { t.Fatalf("Execute() error: %v", err) @@ -383,7 +409,7 @@ func TestResolveTool_FallsBackToRC(t *testing.T) { }, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) if err := cmd.Execute(); err != nil { t.Fatalf("Execute() error: %v", err) @@ -411,7 +437,7 @@ func TestResolveTool_ErrorMessage(t *testing.T) { }, } cmd.AddCommand(dummy) - cmd.SetArgs([]string{"--allow-outside-cwd", "dummy"}) + cmd.SetArgs([]string{"dummy"}) if err := cmd.Execute(); err != nil { t.Fatalf("Execute() error: %v", err) diff --git a/internal/bootstrap/cmd.go b/internal/bootstrap/cmd.go index 4daf4b693..d673fd8e0 100644 --- a/internal/bootstrap/cmd.go +++ b/internal/bootstrap/cmd.go @@ -20,11 +20,8 @@ import ( embedFlag "github.com/ActiveMemory/ctx/internal/config/embed/flag" "github.com/ActiveMemory/ctx/internal/config/flag" ctxContext "github.com/ActiveMemory/ctx/internal/context/validate" - "github.com/ActiveMemory/ctx/internal/err/fs" errInit "github.com/ActiveMemory/ctx/internal/err/initialize" - "github.com/ActiveMemory/ctx/internal/flagbind" "github.com/ActiveMemory/ctx/internal/rc" - "github.com/ActiveMemory/ctx/internal/validate" writeBootstrap "github.com/ActiveMemory/ctx/internal/write/bootstrap" ) @@ -38,16 +35,9 @@ var version = cfgBootstrap.DefaultVersion // The root command provides the entry point for all ctx subcommands and // displays help information when invoked without arguments. // -// Global flags: -// - --context-dir: Override the context directory path (default: .context) -// - --allow-outside-cwd: Allow context directory outside project root -// // Returns: // - *cobra.Command: The configured root command with usage and version info func RootCmd() *cobra.Command { - var contextDir string - var allowOutsideCwd bool - short, long := desc.Command(cmd.DescKeyCtx) c := &cobra.Command{ @@ -56,43 +46,59 @@ func RootCmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyCtx), Version: version, + // Cobra auto-prints returned errors to stderr by default; + // main.go also prints them via writeErr.With, producing a + // double-printed error. Silence cobra's path so writeErr is + // the sole printer. (SilenceUsage stays per-return so + // genuine cobra parse errors keep their help dump.) + SilenceErrors: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // Apply global flag values - if contextDir != "" { - rc.OverrideContextDir(contextDir) - } - // Validate that the context directory stays within the project root. - // Skip if the CLI flag is set or .ctxrc has allow_outside_cwd: true. - if !allowOutsideCwd && !rc.AllowOutsideCwd() { - if validateErr := validate.Boundary( - rc.ContextDir(), - ); validateErr != nil { - return fs.BoundaryViolation(validateErr) - } - } - - // Skip init check for hidden commands (hooks have their own guards) - // and cobra's built-in completion subcommands (bash, zsh, fish, - // PowerShell) which must work in any directory. + // Skip every downstream check for administrative commands + // that must run without a declared or initialized context: + // - Hidden commands (e.g. ctx system bootstrap; hooks + // supply their own guards). + // - Cobra's built-in shell-completion subcommands. + // - Commands annotated with AnnotationSkipInit (init, + // activate, deactivate, guide, why, doctor, config + // switch/status, hub *). + // - Grouping commands without a Run / RunE of their own + // (they just print help for their subtree). if cmd.Hidden { return nil } if p := cmd.Parent(); p != nil && p.Name() == cli.CmdCompletion { return nil } - - // Skip init check for annotated commands. if _, ok := cmd.Annotations[cli.AnnotationSkipInit]; ok { return nil } - - // Skip init check for grouping commands (no Run/RunE = just shows help). if cmd.RunE == nil && cmd.Run == nil { return nil } - // Require initialization. - if !ctxContext.Initialized(rc.ContextDir()) { + // Under the single-source-anchor model, every non-exempt + // command requires CTX_DIR to be declared and to point at + // an existing .context/ directory. RequireContextDir + // returns a tailored error (with a next-step hint based on + // how many .context/ candidates are visible from CWD) when + // the declaration is missing or broken. The parent of the + // declared directory is the project root by contract; CWD + // has no say in project identity. + ctxDir, reqErr := rc.RequireContextDir() + if reqErr != nil { + // Actionable error, not a usage problem. Suppress + // cobra's help dump so the call-to-action stays + // the only thing on stderr. Genuine cobra errors + // (unknown subcommand, bad flag) still print usage + // because they happen before PreRunE runs. + cmd.SilenceUsage = true + return reqErr + } + + // Require initialization: the declared directory must + // have been initialized before other commands operate. + if !ctxContext.Initialized(ctxDir) { + cmd.SilenceUsage = true return errInit.NotInitialized() } @@ -114,16 +120,6 @@ func RootCmd() *cobra.Command { } }) - // Global flags available to all subcommands - flagbind.PersistentStringFlag( - c, &contextDir, - flag.ContextDir, embedFlag.DescKeyContextDir, - ) - flagbind.PersistentBoolFlag( - c, &allowOutsideCwd, - flag.AllowOutsideCwd, - embedFlag.DescKeyAllowOutsideCwd, - ) c.PersistentFlags().String( flag.Tool, "", diff --git a/internal/bootstrap/doc.go b/internal/bootstrap/doc.go index bd49e53c4..8ea4437f0 100644 --- a/internal/bootstrap/doc.go +++ b/internal/bootstrap/doc.go @@ -24,8 +24,7 @@ // # The Root Command // // [RootCmd] returns the bare root cobra command with the -// banner, version flag, global flags -// (`--context-dir`, `--allow-outside-cwd`, `--tool`), and +// banner, version flag, the `--tool` global flag, and // the persistent error formatter. It is intentionally // devoid of subcommands; [Initialize] adds them. // diff --git a/internal/bootstrap/group.go b/internal/bootstrap/group.go index 56ae8b54d..3a29f6fa1 100644 --- a/internal/bootstrap/group.go +++ b/internal/bootstrap/group.go @@ -7,16 +7,15 @@ package bootstrap import ( + "github.com/ActiveMemory/ctx/internal/cli/activate" "github.com/ActiveMemory/ctx/internal/cli/add" "github.com/ActiveMemory/ctx/internal/cli/agent" - "github.com/ActiveMemory/ctx/internal/cli/backup" - "github.com/ActiveMemory/ctx/internal/cli/change" "github.com/ActiveMemory/ctx/internal/cli/compact" "github.com/ActiveMemory/ctx/internal/cli/config" "github.com/ActiveMemory/ctx/internal/cli/connection" + "github.com/ActiveMemory/ctx/internal/cli/deactivate" "github.com/ActiveMemory/ctx/internal/cli/decision" - "github.com/ActiveMemory/ctx/internal/cli/doctor" "github.com/ActiveMemory/ctx/internal/cli/drift" ctxFmt "github.com/ActiveMemory/ctx/internal/cli/fmt" @@ -60,6 +59,8 @@ import ( func gettingStarted() []registration { return []registration{ {initialize.Cmd, embedCmd.GroupGettingStarted}, + {activate.Cmd, embedCmd.GroupGettingStarted}, + {deactivate.Cmd, embedCmd.GroupGettingStarted}, {status.Cmd, embedCmd.GroupGettingStarted}, {guide.Cmd, embedCmd.GroupGettingStarted}, } @@ -125,13 +126,12 @@ func sessions() []registration { // runtime configuration group. // // Returns: -// - []registration: Config, permission, hook, backup, and prune commands +// - []registration: Config, permission, hook, and prune commands func runtimeCmds() []registration { return []registration{ {config.Cmd, embedCmd.GroupRuntime}, {permission.Cmd, embedCmd.GroupRuntime}, {hook.Cmd, embedCmd.GroupRuntime}, - {backup.Cmd, embedCmd.GroupRuntime}, {prune.Cmd, embedCmd.GroupRuntime}, } } diff --git a/internal/cli/activate/activate.go b/internal/cli/activate/activate.go new file mode 100644 index 000000000..61d18d8a0 --- /dev/null +++ b/internal/cli/activate/activate.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package activate + +import ( + "github.com/spf13/cobra" + + activateRoot "github.com/ActiveMemory/ctx/internal/cli/activate/cmd/root" +) + +// Cmd returns the `ctx activate` command for registration on the +// root ctx command. See cmd/root for the full command definition. +// +// Returns: +// - *cobra.Command: the activate command. +func Cmd() *cobra.Command { + return activateRoot.Cmd() +} diff --git a/internal/cli/activate/activate_test.go b/internal/cli/activate/activate_test.go new file mode 100644 index 000000000..1ecb540c6 --- /dev/null +++ b/internal/cli/activate/activate_test.go @@ -0,0 +1,275 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package activate_test + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ActiveMemory/ctx/internal/cli/activate" + "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" +) + +// runActivate invokes `ctx activate` with the given args and returns +// (stdout, stderr, error) as separate buffers. Stream separation is +// load-bearing: the eval-bindable shell content goes to stdout, the +// human-readable advisories ("ctx activated at:", "ctx: also +// visible upward:") go to stderr. Tests that conflate the two miss +// regressions where an advisory leaks into the eval stream. +// +// The command inherits the test process's env; use t.Setenv / +// t.Chdir to scope state. +func runActivate(t *testing.T, args []string) (stdout, stderr string, err error) { + t.Helper() + c := activate.Cmd() + c.SetArgs(args) + var so, se bytes.Buffer + c.SetOut(&so) + c.SetErr(&se) + err = c.Execute() + return so.String(), se.String(), err +} + +// TestActivate_NoArgs_NoCandidates: cwd with no .context/ anywhere → +// NoCandidates error, no stdout emit, no advisory either. +func TestActivate_NoArgs_NoCandidates(t *testing.T) { + t.Setenv(env.CtxDir, "") + t.Chdir(t.TempDir()) + + stdout, _, err := runActivate(t, nil) + if err == nil { + t.Fatalf("expected NoCandidates error, got nil (stdout=%q)", stdout) + } + if stdout != "" { + t.Errorf("stdout must be empty on error path: %q", stdout) + } +} + +// TestActivate_NoArgs_OneCandidate: exactly one .context/ upward → +// stdout carries the export, stderr carries the +// `ctx: activated at:` advisory. +func TestActivate_NoArgs_OneCandidate(t *testing.T) { + t.Setenv(env.CtxDir, "") + + projectRoot := t.TempDir() + ctxPath := filepath.Join(projectRoot, dir.Context) + if err := os.MkdirAll(ctxPath, 0700); err != nil { + t.Fatalf("mkdir: %v", err) + } + t.Chdir(projectRoot) + t.Setenv("SHELL", "/bin/bash") + + stdout, stderr, err := runActivate(t, nil) + if err != nil { + t.Fatalf("expected success, got err=%v", err) + } + if !strings.HasPrefix(stdout, "export CTX_DIR=") { + t.Errorf("stdout must start with export, got %q", stdout) + } + if !strings.Contains(stdout, ctxPath) { + t.Errorf("stdout missing path %q: %q", ctxPath, stdout) + } + // Activated-at advisory always announces the bind, even in + // the single-candidate case. + wantActivated := "ctx: activated at: " + ctxPath + if !strings.Contains(stderr, wantActivated) { + t.Errorf("stderr missing activated-at advisory %q: %q", + wantActivated, stderr) + } +} + +// TestActivate_ErrorPath_StdoutEmpty guards the eval-recursion +// trap surfaced by the smoke test: if any error path lets cobra +// print Usage / Flags / Examples to stdout, `eval "$(ctx +// activate)"` captures the Examples block (which literally +// contains `eval "$(ctx activate)"`) and re-executes activate, +// looping until the captured text mangles past the parser. +// +// Stdout MUST stay empty on every error path. Stderr can carry +// the human-readable error; the eval shell never sees stderr. +// +// Uses the no-candidates case (zero `.context/` visible upward) +// since multi-candidate is no longer an error case under the +// innermost-wins policy. +func TestActivate_ErrorPath_StdoutEmpty(t *testing.T) { + t.Setenv(env.CtxDir, "") + t.Chdir(t.TempDir()) + + stdout, stderr, err := runActivate(t, nil) + if err == nil { + t.Fatalf("expected NoCandidates error, got nil") + } + if stdout != "" { + t.Errorf("stdout must be empty on error path, got %q", stdout) + } + if !strings.Contains(stderr, "no .context/ directory found") { + t.Errorf("stderr should describe the error, got %q", stderr) + } +} + +// TestActivate_NoArgs_ManyCandidates: two `.context/` dirs on the +// upward path → innermost wins on stdout (eval-bindable), +// stderr carries both the `ctx activated at:` line and one +// `ctx: also visible upward:` line per other candidate. Matches +// git/make innermost-project semantics. +// +// The split-stream assertion is load-bearing: putting any +// advisory on stdout (the eval-captured stream) makes it +// invisible to anyone running `eval "$(ctx activate)"`. +func TestActivate_NoArgs_ManyCandidates(t *testing.T) { + t.Setenv(env.CtxDir, "") + t.Setenv("SHELL", "/bin/bash") + + tempDir := t.TempDir() + outerCtx := filepath.Join(tempDir, dir.Context) + innerDir := filepath.Join(tempDir, "inner") + innerCtx := filepath.Join(innerDir, dir.Context) + startDir := filepath.Join(innerDir, "deep") + + for _, d := range []string{outerCtx, innerCtx, startDir} { + if err := os.MkdirAll(d, 0700); err != nil { + t.Fatalf("mkdir %s: %v", d, err) + } + } + t.Chdir(startDir) + + stdout, stderr, err := runActivate(t, nil) + if err != nil { + t.Fatalf("expected success (innermost wins), got err=%v", err) + } + + // stdout: only the export line for the innermost candidate. + if !strings.HasPrefix(stdout, "export CTX_DIR=") { + t.Errorf("stdout must start with export, got %q", stdout) + } + if !strings.Contains(stdout, innerCtx) { + t.Errorf("export should bind the inner candidate %q: %q", + innerCtx, stdout) + } + if strings.Contains(stdout, "also visible") || + strings.Contains(stdout, "activated at") { + t.Errorf("stdout must NOT carry advisories (eval invisibility): %q", + stdout) + } + + // stderr: activated-at line for the inner, also-visible line for + // the outer. + wantActivated := "ctx: activated at: " + innerCtx + if !strings.Contains(stderr, wantActivated) { + t.Errorf("stderr missing %q: %q", wantActivated, stderr) + } + wantAdvisory := "ctx: also visible upward: " + outerCtx + if !strings.Contains(stderr, wantAdvisory) { + t.Errorf("stderr missing %q: %q", wantAdvisory, stderr) + } +} + +// TestActivate_RejectsArgs guards the spec contract: `ctx activate +// ` is removed under the single-source-anchor model. Any +// positional argument must be rejected (either as cobra's +// "accepts 0 arg(s)" or "unknown command", whichever cobra picks +// for the literal value) and emit nothing on stdout. +func TestActivate_RejectsArgs(t *testing.T) { + t.Setenv(env.CtxDir, "") + t.Chdir(t.TempDir()) + + stdout, _, err := runActivate(t, []string{"some-explicit-path"}) + if err == nil { + t.Fatalf("expected cobra args rejection, got nil (stdout=%q)", stdout) + } + if strings.Contains(stdout, "export CTX_DIR") { + t.Errorf("stdout should not contain export on error: %q", stdout) + } +} + +// TestActivate_StaleReplacementComment: parent shell has a stale +// CTX_DIR pointing at a different project; activate emits a +// `# ctx: replacing stale CTX_DIR=` comment before the export +// so the user can see the change in `eval` output. +func TestActivate_StaleReplacementComment(t *testing.T) { + stale := filepath.Join(t.TempDir(), "old", dir.Context) + if err := os.MkdirAll(stale, 0700); err != nil { + t.Fatalf("mkdir stale: %v", err) + } + t.Setenv(env.CtxDir, stale) + + projectRoot := t.TempDir() + ctxPath := filepath.Join(projectRoot, dir.Context) + if err := os.MkdirAll(ctxPath, 0700); err != nil { + t.Fatalf("mkdir new: %v", err) + } + t.Chdir(projectRoot) + t.Setenv("SHELL", "/bin/bash") + + stdout, _, err := runActivate(t, nil) + if err != nil { + t.Fatalf("expected success, got err=%v", err) + } + wantPrefix := fmt.Sprintf("# ctx: replacing stale %s=%s\n", + env.CtxDir, stale) + if !strings.HasPrefix(stdout, wantPrefix) { + t.Errorf("stdout missing stale-replacement comment.\n got: %q\nwant prefix: %q", + stdout, wantPrefix) + } + if !strings.Contains(stdout, "export CTX_DIR=") { + t.Errorf("stdout missing export: %q", stdout) + } + if !strings.Contains(stdout, ctxPath) { + t.Errorf("stdout missing new path %q: %q", ctxPath, stdout) + } +} + +// TestActivate_NoStaleCommentOnFirstActivate: when CTX_DIR is unset +// or matches the resolved value, the comment is suppressed. +func TestActivate_NoStaleCommentOnFirstActivate(t *testing.T) { + t.Setenv(env.CtxDir, "") + + projectRoot := t.TempDir() + ctxPath := filepath.Join(projectRoot, dir.Context) + if err := os.MkdirAll(ctxPath, 0700); err != nil { + t.Fatalf("mkdir: %v", err) + } + t.Chdir(projectRoot) + t.Setenv("SHELL", "/bin/bash") + + stdout, _, err := runActivate(t, nil) + if err != nil { + t.Fatalf("expected success, got err=%v", err) + } + if strings.Contains(stdout, "replacing stale") { + t.Errorf("stdout should not contain stale comment: %q", stdout) + } +} + +// TestActivate_ShellFlag: --shell zsh uses POSIX export syntax +// (same output shape as bash; flag is just a dispatch key). +func TestActivate_ShellFlag(t *testing.T) { + t.Setenv(env.CtxDir, "") + + projectRoot := t.TempDir() + ctxPath := filepath.Join(projectRoot, dir.Context) + if err := os.MkdirAll(ctxPath, 0700); err != nil { + t.Fatalf("mkdir: %v", err) + } + t.Chdir(projectRoot) + + stdout, _, err := runActivate(t, []string{"--shell", "zsh"}) + if err != nil { + t.Fatalf("expected success, got err=%v", err) + } + if !strings.HasPrefix(stdout, "export CTX_DIR=") { + t.Errorf("expected export prefix, got %q", stdout) + } + if !strings.HasSuffix(strings.TrimSpace(stdout), "'") { + t.Errorf("expected trailing single quote (shell quoting), got %q", stdout) + } +} diff --git a/internal/cli/activate/cmd/root/cmd.go b/internal/cli/activate/cmd/root/cmd.go new file mode 100644 index 000000000..cbd564453 --- /dev/null +++ b/internal/cli/activate/cmd/root/cmd.go @@ -0,0 +1,77 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package root + +import ( + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/assets/read/desc" + "github.com/ActiveMemory/ctx/internal/config/cli" + "github.com/ActiveMemory/ctx/internal/config/embed/cmd" + embedFlag "github.com/ActiveMemory/ctx/internal/config/embed/flag" + cFlag "github.com/ActiveMemory/ctx/internal/config/flag" +) + +// Cmd returns the `ctx activate` cobra command. +// +// Args-free under the single-source-anchor model +// (specs/single-source-context-anchor.md). Activation is always +// project-local discovery via [rc.ScanCandidates] from CWD; the +// explicit-path mode that previously accepted an argument was +// removed because hub-client / hub-server scenarios store at +// `~/.ctx/hub-data/` and never read `.context/` directly, so they +// activate from the project root like everyone else. +// +// One flag remains: +// +// --shell override auto-detection (defaults to $SHELL). +// +// # Stdout discipline (critical) +// +// Activate's stdout is consumed by `eval "$(ctx activate)"`. Every +// byte must be either valid shell or empty. Usage / Flags / +// Examples blocks must NEVER reach stdout, because cobra's +// Examples for this command literally contain +// `eval "$(ctx activate)"`, which would re-execute activate inside +// the eval and trigger an infinite loop on any error path. +// +// SilenceUsage is therefore set unconditionally below (rather than +// only after a return) so cobra renders only the error to stderr +// when something fails. SilenceErrors stays at the root level so +// errors keep going to stderr (visible to the user) without being +// captured by the eval. +// +// Returns: +// - *cobra.Command: configured activate command. +func Cmd() *cobra.Command { + short, long := desc.Command(cmd.DescKeyActivate) + c := &cobra.Command{ + Use: cmd.UseActivate, + Short: short, + Long: long, + Example: desc.Example(cmd.DescKeyActivate), + Args: cobra.NoArgs, + // Exempt from the global init / require-context-dir checks: + // activate's whole purpose is to help the user declare the + // context directory in the first place. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, + // See the Stdout discipline note above. Without this, an + // error path (multi-candidate, no-candidates, etc.) prints + // Usage+Examples to stdout, gets captured by `$(...)`, and + // the embedded `eval "$(ctx activate)"` example re-runs the + // command. Loop. + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + shell, _ := cmd.Flags().GetString(cFlag.Shell) + return Run(cmd, shell) + }, + } + c.Flags().String(cFlag.Shell, "", + desc.Flag(embedFlag.DescKeyActivateShell), + ) + return c +} diff --git a/internal/cli/activate/cmd/root/doc.go b/internal/cli/activate/cmd/root/doc.go new file mode 100644 index 000000000..3e8708457 --- /dev/null +++ b/internal/cli/activate/cmd/root/doc.go @@ -0,0 +1,33 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package root implements the `ctx activate` cobra command. +// +// Activate is the shell-integration entry point under the +// explicit-context-dir model (spec: specs/explicit-context-dir.md). +// Its single job is to emit a `export CTX_DIR=...` line to stdout so +// that callers can bind the context directory for their shell via +// `eval "$(ctx activate)"`. +// +// Unlike most commands in the CLI, `activate` is in the exempt +// allowlist: it does not call rc.RequireContextDir because +// activate's reason for existing is precisely to help users declare +// CTX_DIR in the first place. +// +// Resolution: +// +// - With an explicit path argument: the path is validated strictly +// (exists, is a directory, contains at least one canonical +// context file). There is no --force escape hatch in v1. +// - Without arguments: the command scans upward from CWD using +// rc.ScanCandidates and emits the one visible candidate when +// there is exactly one. Zero candidates → NoCandidates error. +// Two or more candidates → Ambiguous error listing every path; +// activate refuses to pick automatically. +// +// This is the only command in the CLI that walks. All other +// resolution flows through rc.ContextDir / rc.RequireContextDir. +package root diff --git a/internal/cli/activate/cmd/root/run.go b/internal/cli/activate/cmd/root/run.go new file mode 100644 index 000000000..b7f506008 --- /dev/null +++ b/internal/cli/activate/cmd/root/run.go @@ -0,0 +1,76 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package root + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/cli/activate/core/emit" + "github.com/ActiveMemory/ctx/internal/cli/activate/core/resolve" + "github.com/ActiveMemory/ctx/internal/config/env" + cfgShell "github.com/ActiveMemory/ctx/internal/config/shell" + writeActivate "github.com/ActiveMemory/ctx/internal/write/activate" +) + +// Run executes the `ctx activate` command. +// +// Resolves the target .context/ directory via [resolve.Selected] +// (always project-local scan from CWD under the single-source-anchor +// model), then prints the shell-specific export statement for +// CTX_DIR to stdout. +// +// # Output shape +// +// Two channels: +// +// 1. **stdout**: consumed by `eval "$(ctx activate)"`. Every +// byte must be valid POSIX shell. Composed in order: +// (a) zero or one `# ctx: replacing stale CTX_DIR=\n` +// comment line when the parent shell already has [env.CtxDir] +// set to a different value than the resolved target; +// (b) the shell-specific `export CTX_DIR=\n` line. +// +// 2. **stderr**: informational advisories for the user. Always +// carries a `ctx activated at: ` line announcing the +// bound directory (single-candidate case included), and +// additionally one `ctx: also visible upward: ` line +// per other `.context/` candidate when more than one is +// visible upward. `eval` does not capture stderr, so these +// lines pass through to the terminal where the user sees +// them. Innermost wins (matches git/make nested-project +// semantics); the additional candidates are reported, not +// refused. The comment-on-stdout approach considered +// earlier was invisible to the only documented invocation +// form (`eval`), so it informed nobody. +// +// Parameters: +// - cmd: cobra command providing stdout / stderr. Nil is a +// no-op via [writeActivate.Emit] / [writeActivate.AlsoVisible]. +// - shell: value of the --shell flag; empty means auto-detect +// from $SHELL via [emit.DetectShell]. +// +// Returns: +// - error: non-nil on resolution failure (no `.context/` visible +// from CWD upward); nil on successful emit. +func Run(cmd *cobra.Command, shell string) error { + selected, others, err := resolve.Selected() + if err != nil { + return err + } + out := emit.Set(emit.DetectShell(shell), selected) + if existing := os.Getenv(env.CtxDir); existing != "" && existing != selected { + out = fmt.Sprintf(cfgShell.FormatStaleReplaceComment, + env.CtxDir, existing, out) + } + writeActivate.ActivatedAt(cmd, selected) + writeActivate.AlsoVisible(cmd, others) + writeActivate.Emit(cmd, out) + return nil +} diff --git a/internal/cli/activate/core/emit/doc.go b/internal/cli/activate/core/emit/doc.go new file mode 100644 index 000000000..bc26d8b29 --- /dev/null +++ b/internal/cli/activate/core/emit/doc.go @@ -0,0 +1,28 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package emit produces the shell-specific strings used by +// `ctx activate` and `ctx deactivate` to bind or clear CTX_DIR +// for the current shell via `eval "$(ctx activate)"`. +// +// v1 supports bash, zsh, and POSIX sh. All three share identical +// `export` / `unset` syntax. Fish / nushell / powershell can be +// added later by extending [Set] and [Unset] without touching the +// call sites. That extensibility is the only reason this lives in +// its own package rather than inline in the command's Run. +// +// # Supported Shells +// +// bash, zsh, sh: POSIX export / unset +// fish: deferred (see specs/explicit-context-dir.md). +// +// # Detection +// +// [DetectShell] returns the first non-empty value of, in order: +// the explicit --shell flag, the basename of $SHELL, and a bash +// fallback. Users who want deterministic output in scripts should +// pass --shell explicitly. +package emit diff --git a/internal/cli/activate/core/emit/emit.go b/internal/cli/activate/core/emit/emit.go new file mode 100644 index 000000000..3e73dda95 --- /dev/null +++ b/internal/cli/activate/core/emit/emit.go @@ -0,0 +1,90 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package emit + +import ( + "os" + "path/filepath" + "strings" + + "github.com/ActiveMemory/ctx/internal/config/env" + cfgShell "github.com/ActiveMemory/ctx/internal/config/shell" +) + +// emitters maps supported shell identifiers to their set-emitter. +// Unknown shells fall back to POSIX export semantics via the +// default branch in Set. +var emitters = map[string]emitter{ + cfgShell.Bash: posixSet, + cfgShell.Zsh: posixSet, + cfgShell.Sh: posixSet, +} + +// unsetters maps supported shell identifiers to their unset-emitter. +// Unknown shells fall back to POSIX unset semantics via the default +// branch in Unset. +var unsetters = map[string]emitter{ + cfgShell.Bash: posixUnset, + cfgShell.Zsh: posixUnset, + cfgShell.Sh: posixUnset, +} + +// DetectShell returns the shell identifier to emit for. +// +// Priority: explicit override > basename of $SHELL > bash fallback. +// The returned value is always lowercase and suitable as a key into +// the [emitters] / [unsetters] tables. +// +// Parameters: +// - override: explicit --shell flag value ("" to auto-detect). +// +// Returns: +// - string: one of [cfgShell.Bash], [cfgShell.Zsh], [cfgShell.Sh], +// or the original override (callers treat unknowns as POSIX). +func DetectShell(override string) string { + if override != "" { + return strings.ToLower(override) + } + if s := os.Getenv(env.Shell); s != "" { + return strings.ToLower(filepath.Base(s)) + } + return cfgShell.Bash +} + +// Set returns the shell command that exports CTX_DIR=path, ending +// with a newline so the output is directly consumable by +// `eval "$(ctx activate)"`. +// +// Parameters: +// - shell: result of [DetectShell]. +// - path: absolute path to the selected context directory. +// +// Returns: +// - string: one-line export statement with trailing newline. +func Set(shell, path string) string { + fn, ok := emitters[shell] + if !ok { + fn = posixSet + } + return fn(env.CtxDir, shellQuote(path)) +} + +// Unset returns the shell command that clears CTX_DIR for the +// current shell, ending with a newline. +// +// Parameters: +// - shell: result of [DetectShell]. +// +// Returns: +// - string: one-line unset statement with trailing newline. +func Unset(shell string) string { + fn, ok := unsetters[shell] + if !ok { + fn = posixUnset + } + return fn(env.CtxDir, "") +} diff --git a/internal/cli/activate/core/emit/posix.go b/internal/cli/activate/core/emit/posix.go new file mode 100644 index 000000000..9f547d774 --- /dev/null +++ b/internal/cli/activate/core/emit/posix.go @@ -0,0 +1,57 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package emit + +import ( + "fmt" + "strings" + + "github.com/ActiveMemory/ctx/internal/config/shell" +) + +// posixSet emits `export KEY=VALUE\n` for bash/zsh/sh. Used as the +// map value in the emitters table for all POSIX-family shells. +// +// Parameters: +// - key: environment variable name (already well-formed). +// - quotedValue: value wrapped by shellQuote. +// +// Returns: +// - string: one-line export statement with trailing newline. +func posixSet(key, quotedValue string) string { + return fmt.Sprintf(shell.FormatPOSIXExport, key, quotedValue) +} + +// posixUnset emits `unset KEY\n` for bash/zsh/sh. The value +// argument is ignored (unset has no payload) but kept in the +// signature to match the emitter type. +// +// Parameters: +// - key: environment variable name to clear. +// - _: unused; kept for emitter-signature compatibility. +// +// Returns: +// - string: one-line unset statement with trailing newline. +func posixUnset(key, _ string) string { + return fmt.Sprintf(shell.FormatPOSIXUnset, key) +} + +// shellQuote wraps s in single quotes, escaping any embedded single +// quote as close-escape-reopen (`'` followed by `\'` followed by `'`). +// The resulting string is safe to paste into any POSIX-compatible +// shell regardless of s's contents. +// +// Parameters: +// - s: raw value (typically a filesystem path). +// +// Returns: +// - string: single-quoted, escape-safe shell literal. +func shellQuote(s string) string { + return shell.SingleQuote + + strings.ReplaceAll(s, shell.SingleQuote, shell.SingleQuoteEscaped) + + shell.SingleQuote +} diff --git a/internal/cli/activate/core/emit/types.go b/internal/cli/activate/core/emit/types.go new file mode 100644 index 000000000..8c2102b67 --- /dev/null +++ b/internal/cli/activate/core/emit/types.go @@ -0,0 +1,12 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package emit + +// emitter produces a shell-specific one-line statement for the given +// key and pre-quoted value, terminated by a newline. Concrete +// emitters live in posix.go; the dispatch table is in emit.go. +type emitter func(key, quotedValue string) string diff --git a/internal/cli/activate/core/resolve/doc.go b/internal/cli/activate/core/resolve/doc.go new file mode 100644 index 000000000..b8e4be886 --- /dev/null +++ b/internal/cli/activate/core/resolve/doc.go @@ -0,0 +1,26 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package resolve picks the `.context/` directory that `ctx +// activate` should emit a shell export for. It is the ONE place +// in the CLI that walks the filesystem during context resolution; +// all other commands honor `CTX_DIR` or error via +// [rc.RequireContextDir]. +// +// [Selected] is the single entry point. It walks upward from CWD +// via [rc.ScanCandidates] and returns: +// +// - the **innermost** visible `.context/` (selected), +// - any **additional** candidates further up the path, +// - or [errActivate.NoCandidates] when the walk finds none. +// +// Multi-candidate is not an error: workspace-level shared +// `.context/` dirs alongside per-project ones are a legitimate +// nested-project layout. Innermost wins (matching git / make +// behavior in nested layouts), and the additional candidates are +// surfaced so callers can include them as informational comments +// in eval-able output. +package resolve diff --git a/internal/cli/activate/core/resolve/internal.go b/internal/cli/activate/core/resolve/internal.go new file mode 100644 index 000000000..a437a716f --- /dev/null +++ b/internal/cli/activate/core/resolve/internal.go @@ -0,0 +1,49 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package resolve + +import ( + "os" + + errActivate "github.com/ActiveMemory/ctx/internal/err/activate" + "github.com/ActiveMemory/ctx/internal/rc" +) + +// scan returns the innermost visible .context/ candidate from CWD +// alongside any additional candidates further up the path. The +// scan walks via [rc.ScanCandidates] (innermost-first); resolution +// itself never walks outside this function. +// +// Multi-candidate behavior is "innermost wins, the rest are +// reported." This matches what `git` and `make` do for nested +// project layouts (innermost project owns the working directory) +// and supports legitimate workspace-level shared `.context/` dirs +// next to per-project ones; the previous "refuse on multi" rule +// was overly conservative for that workflow. Callers receive the +// full list of additional candidates so they can surface them as +// informational comments in eval-able output without overriding +// the bind. +// +// Returns: +// - string: absolute path of the innermost (selected) candidate. +// - []string: zero-or-more additional candidates further up the +// path, in the order [rc.ScanCandidates] returned them +// (closest-first). Nil when only one candidate is visible. +// - error: [errActivate.NoCandidates] when the upward walk finds +// no `.context/` directory at all. Other errors are surfaced +// for I/O failures (e.g., os.Getwd). +func scan() (string, []string, error) { + cwd, cwdErr := os.Getwd() + if cwdErr != nil { + return "", nil, cwdErr + } + candidates := rc.ScanCandidates(cwd) + if len(candidates) == 0 { + return "", nil, errActivate.NoCandidates() + } + return candidates[0], candidates[1:], nil +} diff --git a/internal/cli/activate/core/resolve/resolve.go b/internal/cli/activate/core/resolve/resolve.go new file mode 100644 index 000000000..67d53c937 --- /dev/null +++ b/internal/cli/activate/core/resolve/resolve.go @@ -0,0 +1,32 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package resolve + +// Selected returns the innermost visible .context/ directory +// alongside any additional candidates further up the path. +// +// Single-source-anchor model +// (specs/single-source-context-anchor.md): activation is always +// project-local scan from CWD. The explicit-path mode that used +// to accept an argument was removed. +// +// Multi-candidate is no longer an error: workspace-level shared +// `.context/` dirs alongside per-project ones are a legitimate +// nested-project layout. Innermost wins (matching `git` / `make` +// behavior in nested layouts), and the additional candidates are +// surfaced so callers can include them as informational comments +// in eval-able output. +// +// Returns: +// - string: absolute path of the resolved .context/ directory. +// - []string: additional candidates further up the path, nil +// when only one is visible. +// - error: [errActivate.NoCandidates] when no `.context/` is +// visible from CWD upward. +func Selected() (string, []string, error) { + return scan() +} diff --git a/internal/cli/activate/doc.go b/internal/cli/activate/doc.go new file mode 100644 index 000000000..c505a109b --- /dev/null +++ b/internal/cli/activate/doc.go @@ -0,0 +1,36 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package activate implements the `ctx activate` command. +// +// Activate is the shell-integration entry point under the +// explicit-context-dir resolution model introduced in +// specs/explicit-context-dir.md. The command scans upward from CWD +// (or validates an explicit path argument) and emits a +// shell-specific `export CTX_DIR=...` statement to stdout, intended +// to be consumed via `eval "$(ctx activate)"`. +// +// Activate is the ONLY command in the CLI that walks the filesystem +// during resolution. Every other command reads the declared +// CTX_DIR / --context-dir or calls [rc.RequireContextDir] and errors +// loudly when neither is set. Centralizing walk-up in activate keeps +// silent-inference bugs confined to a single supervised entry point. +// +// # Subpackages +// +// cmd/root : cobra command definition and resolution logic. +// core/emit: shell-specific emitters for bash/zsh/sh. +// +// # Behavior Summary +// +// Explicit path: strict validation (exists, is a directory, +// +// contains CONSTITUTION.md or TASKS.md); no --force. +// +// No args: count-based resolution: emit when exactly one +// +// candidate is visible; refuse on zero or many. +package activate diff --git a/internal/cli/activate/testmain_test.go b/internal/cli/activate/testmain_test.go new file mode 100644 index 000000000..a41bdbb63 --- /dev/null +++ b/internal/cli/activate/testmain_test.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package activate_test + +import ( + "os" + "testing" + + "github.com/ActiveMemory/ctx/internal/assets/read/lookup" +) + +// TestMain initializes the embedded text-asset lookup so activate's +// error factories (internal/err/activate.*) resolve their DescKey +// messages instead of returning empty strings. +func TestMain(m *testing.M) { + lookup.Init() + os.Exit(m.Run()) +} diff --git a/internal/cli/add/add_test.go b/internal/cli/add/add_test.go index 2a9cdc0f6..cb47d3ac7 100644 --- a/internal/cli/add/add_test.go +++ b/internal/cli/add/add_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestAddCommand tests the add command. @@ -29,6 +30,8 @@ func TestAddCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -69,6 +72,8 @@ func TestAddDecisionAndLearning(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -216,6 +221,8 @@ func TestPrependOrder(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -336,6 +343,8 @@ func TestAddFromFile(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/add/cmd/coverage_test.go b/internal/cli/add/cmd/coverage_test.go index 6a493892b..a826a743b 100644 --- a/internal/cli/add/cmd/coverage_test.go +++ b/internal/cli/add/cmd/coverage_test.go @@ -25,12 +25,14 @@ import ( errAdd "github.com/ActiveMemory/ctx/internal/err/add" errFs "github.com/ActiveMemory/ctx/internal/err/fs" "github.com/ActiveMemory/ctx/internal/inspect" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/cli/initialize" entryType "github.com/ActiveMemory/ctx/internal/config/entry" "github.com/ActiveMemory/ctx/internal/entity" "github.com/ActiveMemory/ctx/internal/entry" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // --------------------------------------------------------------------------- @@ -659,7 +661,12 @@ func TestWriteEntry_FileNotFound(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() - // No .context/ directory, so files won't exist + // Declare a non-existent context dir so we hit "file not found" + // rather than "context directory not declared". + t.Setenv("CTX_DIR", filepath.Join(tmpDir, ".context")) + rc.Reset() + t.Cleanup(rc.Reset) + err := entry.Write(entity.EntryParams{ Type: "task", Content: "something", @@ -684,6 +691,8 @@ func TestRun_UnknownType(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -715,6 +724,8 @@ func TestRun_NoContent(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -745,6 +756,8 @@ func TestRun_TaskWithPriority(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -784,6 +797,8 @@ func TestRun_TaskWithSection(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { diff --git a/internal/cli/add/cmd/root/run.go b/internal/cli/add/cmd/root/run.go index 737724efc..cf588fd6e 100644 --- a/internal/cli/add/cmd/root/run.go +++ b/internal/cli/add/cmd/root/run.go @@ -23,6 +23,7 @@ import ( "github.com/ActiveMemory/ctx/internal/entry" errAdd "github.com/ActiveMemory/ctx/internal/err/add" "github.com/ActiveMemory/ctx/internal/hub" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/trace" writeAdd "github.com/ActiveMemory/ctx/internal/write/add" writeConnect "github.com/ActiveMemory/ctx/internal/write/connect" @@ -42,6 +43,10 @@ import ( // - error: Non-nil if content is missing, type is invalid, required flags // are missing, or file operations fail func Run(cmd *cobra.Command, args []string, flags entity.AddConfig) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } fType := strings.ToLower(args[0]) content, extractErr := extract.Content(args, flags) @@ -81,12 +86,17 @@ func Run(cmd *cobra.Command, args []string, flags entity.AddConfig) error { writeAdd.Added(cmd, fName) + stateDir, dirErr := state.Dir() + if dirErr != nil { + return dirErr + } + // Best-effort: publish to ctx Hub if --share is set. if flags.Share { pubEntry := hub.PublishEntry{ Type: fType, Content: content, - Origin: filepath.Base(state.Dir()), + Origin: filepath.Base(stateDir), } if pubErr := corePub.Run( cmd, []hub.PublishEntry{pubEntry}, @@ -104,7 +114,7 @@ func Run(cmd *cobra.Command, args []string, flags entity.AddConfig) error { // so the new entry is always #1 in file order. This coupling is // intentional: if the prepend logic changes, this must be updated. if fType == cfgEntry.Decision || fType == cfgEntry.Learning { - _ = trace.Record(fType+cfgTrace.RefFirstEntry, state.Dir()) + _ = trace.Record(fType+cfgTrace.RefFirstEntry, stateDir) } return nil diff --git a/internal/cli/agent/agent_test.go b/internal/cli/agent/agent_test.go index 485334f66..812256893 100644 --- a/internal/cli/agent/agent_test.go +++ b/internal/cli/agent/agent_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestAgentCommand tests the agent command. @@ -27,6 +28,8 @@ func TestAgentCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -57,6 +60,8 @@ func TestAgentJSONOutput(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/agent/cmd/root/cmd.go b/internal/cli/agent/cmd/root/cmd.go index 432df3258..44099c2c5 100644 --- a/internal/cli/agent/cmd/root/cmd.go +++ b/internal/cli/agent/cmd/root/cmd.go @@ -56,6 +56,11 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyAgent), RunE: func(cmd *cobra.Command, args []string) error { + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } if !cmd.Flags().Changed(cFlag.Budget) { budget = rc.TokenBudget() } @@ -73,10 +78,15 @@ func Cmd() *cobra.Command { skillBody = sk } - // Tier 8: Load ctx Hub entries. + // Tier 8: Load ctx Hub entries using the already-resolved + // ctxDir from the top-level RequireContextDir gate. var sharedBodies []string if includeShare { - sharedBodies = coreHub.LoadBodies() + var hubErr error + sharedBodies, hubErr = coreHub.LoadBodies(ctxDir) + if hubErr != nil { + return hubErr + } } return Run( diff --git a/internal/cli/agent/cmd/root/run.go b/internal/cli/agent/cmd/root/run.go index a55796c77..ba952818b 100644 --- a/internal/cli/agent/cmd/root/run.go +++ b/internal/cli/agent/cmd/root/run.go @@ -50,7 +50,11 @@ func Run( skillBody string, hubBodies []string, ) error { - if coreCooldown.Active(session, cooldown) { + active, cooldownErr := coreCooldown.Active(session, cooldown) + if cooldownErr != nil { + return cooldownErr + } + if active { return nil } @@ -76,10 +80,14 @@ func Run( hubBodies, ) } - - if outputErr == nil { - coreCooldown.TouchTombstone(session) + if outputErr != nil { + return outputErr } - return outputErr + // Output succeeded: persist the tombstone so subsequent + // invocations inside the cooldown window stay silent. A + // failure here (disk full, permission denied) is a rare + // edge case we surface rather than swallow: without the + // marker the next run will not suppress. + return coreCooldown.TouchTombstone(session) } diff --git a/internal/cli/agent/core/cooldown/cooldown.go b/internal/cli/agent/core/cooldown/cooldown.go index 3de0d182e..b10ee3592 100644 --- a/internal/cli/agent/core/cooldown/cooldown.go +++ b/internal/cli/agent/core/cooldown/cooldown.go @@ -7,6 +7,7 @@ package cooldown import ( + "errors" "os" "path/filepath" "time" @@ -14,9 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/agent" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/fs" - "github.com/ActiveMemory/ctx/internal/config/warn" ctxIo "github.com/ActiveMemory/ctx/internal/io" - ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" ) @@ -28,16 +27,33 @@ import ( // - cooldown: duration to suppress repeated output // // Returns: -// - bool: true if tombstone exists and is within the cooldown window -func Active(session string, cooldown time.Duration) bool { +// - bool: true when the tombstone exists and is within the cooldown +// window. Always false when cooldown is disabled for this call +// (empty session or non-positive cooldown) or when no tombstone +// has ever been written. +// - error: [os.ErrNotExist] is treated as a legitimate "not active" +// exit condition and NOT returned. Any other failure (context +// directory undeclared, permission denied, I/O failure) is +// surfaced so callers do not silently treat it as "not active" +// and emit output they meant to suppress. +func Active(session string, cooldown time.Duration) (bool, error) { if session == "" || cooldown <= 0 { - return false + return false, nil } - info, err := os.Stat(TombstonePath(session)) - if err != nil { - return false + path, pathErr := TombstonePath(session) + if pathErr != nil { + return false, pathErr + } + info, statErr := os.Stat(path) + if statErr != nil { + if errors.Is(statErr, os.ErrNotExist) { + // No prior emission; legitimately not active. + return false, nil + } + // Permission denied, I/O failure, etc.: surface. + return false, statErr } - return time.Since(info.ModTime()) < cooldown + return time.Since(info.ModTime()) < cooldown, nil } // TouchTombstone creates or updates the tombstone file for the given @@ -45,14 +61,22 @@ func Active(session string, cooldown time.Duration) bool { // // Parameters: // - session: session identifier (typically the caller's PID) -func TouchTombstone(session string) { +// +// Returns: +// - error: nil on an empty session (no-op). Non-nil when the +// tombstone path cannot be resolved or the file cannot be +// written. Callers decide whether a persistence failure +// warrants aborting the command; this helper no longer +// logs and swallows on its own. +func TouchTombstone(session string) error { if session == "" { - return + return nil } - p := TombstonePath(session) - if writeErr := ctxIo.SafeWriteFile(p, nil, fs.PermSecret); writeErr != nil { - ctxLog.Warn(warn.Write, p, writeErr) + p, pathErr := TombstonePath(session) + if pathErr != nil { + return pathErr } + return ctxIo.SafeWriteFile(p, nil, fs.PermSecret) } // TombstonePath returns the filesystem path for a session's tombstone. @@ -61,14 +85,22 @@ func TouchTombstone(session string) { // - session: session identifier // // Returns: -// - string: absolute path in the system temp directory -func TombstonePath(session string) string { - stateDir := filepath.Join(rc.ContextDir(), dir.State) - mkdirErr := ctxIo.SafeMkdirAll( +// - string: absolute path under the context state directory. +// - error: non-nil when the context directory is not declared or +// the state directory cannot be created. Previously this helper +// logged the mkdir error and returned the path anyway, guaranteeing +// a second failure on the subsequent write; propagating keeps the +// first failure authoritative. +func TombstonePath(session string) (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + stateDir := filepath.Join(ctxDir, dir.State) + if mkdirErr := ctxIo.SafeMkdirAll( stateDir, fs.PermRestrictedDir, - ) - if mkdirErr != nil { - ctxLog.Warn(warn.Mkdir, stateDir, mkdirErr) + ); mkdirErr != nil { + return "", mkdirErr } - return filepath.Join(stateDir, agent.TombstonePrefix+session) + return filepath.Join(stateDir, agent.TombstonePrefix+session), nil } diff --git a/internal/cli/agent/core/hub/load.go b/internal/cli/agent/core/hub/load.go index a0ae7be48..b2c85ae0b 100644 --- a/internal/cli/agent/core/hub/load.go +++ b/internal/cli/agent/core/hub/load.go @@ -14,22 +14,37 @@ import ( "github.com/ActiveMemory/ctx/internal/config/file" cfgHub "github.com/ActiveMemory/ctx/internal/config/hub" "github.com/ActiveMemory/ctx/internal/io" - "github.com/ActiveMemory/ctx/internal/rc" ) // LoadBodies reads all markdown files from .context/hub/ // and returns their contents as strings. // -// Returns nil if the shared directory does not exist or is -// empty (shared knowledge is opt-in). +// ctxDir is supplied by the caller so this function does not +// re-resolve it; the caller decides whether "no context dir" is +// benign and handles it before invoking us. +// +// Any directory read failure (including a missing hub directory) +// is propagated so the caller can surface it. [LoadBodies] is only +// invoked when the user explicitly requested shared content (e.g. +// `ctx agent --include-share`); telling them "everything is fine, +// here's an empty list" when the hub directory does not exist hides +// a real setup gap. +// +// Per-file read failures inside an existing hub directory are still +// tolerated silently. One unreadable sibling should not blank the +// rest. +// +// Parameters: +// - ctxDir: absolute path to the context directory // // Returns: // - []string: file contents, one per shared file -func LoadBodies() []string { - dir := filepath.Join(rc.ContextDir(), cfgHub.DirHub) +// - error: non-nil on any directory read failure +func LoadBodies(ctxDir string) ([]string, error) { + dir := filepath.Join(ctxDir, cfgHub.DirHub) entries, readErr := os.ReadDir(dir) if readErr != nil { - return nil + return nil, readErr } var bodies []string @@ -48,5 +63,5 @@ func LoadBodies() []string { } bodies = append(bodies, string(data)) } - return bodies + return bodies, nil } diff --git a/internal/cli/agent/core/steering/steering.go b/internal/cli/agent/core/steering/steering.go index 71b83907b..869398a76 100644 --- a/internal/cli/agent/core/steering/steering.go +++ b/internal/cli/agent/core/steering/steering.go @@ -56,9 +56,11 @@ func LoadBodies() []string { // - string: Body content of the loaded skill // - error: Non-nil if the skill is missing or unreadable func LoadSkill(name string) (string, error) { - skillsDir := filepath.Join( - rc.ContextDir(), dir.Skills, - ) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return "", ctxErr + } + skillsDir := filepath.Join(ctxDir, dir.Skills) sk, loadErr := skill.Load(skillsDir, name) if loadErr != nil { diff --git a/internal/cli/backup/cmd.go b/internal/cli/backup/cmd.go deleted file mode 100644 index 5d21bf4e1..000000000 --- a/internal/cli/backup/cmd.go +++ /dev/null @@ -1,44 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package backup - -import ( - "github.com/spf13/cobra" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/archive" - "github.com/ActiveMemory/ctx/internal/config/embed/cmd" - "github.com/ActiveMemory/ctx/internal/config/embed/flag" - cFlag "github.com/ActiveMemory/ctx/internal/config/flag" -) - -// Cmd returns the "ctx backup" top-level command. -// -// Returns: -// - *cobra.Command: Configured backup command -func Cmd() *cobra.Command { - short, long := desc.Command(cmd.DescKeyBackup) - - c := &cobra.Command{ - Use: cmd.UseBackup, - Short: short, - Long: long, - Example: desc.Example(cmd.DescKeyBackup), - RunE: func(cmd *cobra.Command, _ []string) error { - return Run(cmd) - }, - } - - c.Flags().String(cFlag.Scope, archive.BackupScopeAll, - desc.Flag(flag.DescKeyBackupScope), - ) - c.Flags().Bool(cFlag.JSON, false, - desc.Flag(flag.DescKeyBackupJson), - ) - - return c -} diff --git a/internal/cli/backup/doc.go b/internal/cli/backup/doc.go deleted file mode 100644 index b58be977a..000000000 --- a/internal/cli/backup/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package backup implements the ctx backup top-level command. -// -// Creates timestamped tar.gz archives of project context -// and/or global Claude Code data. The --scope flag selects -// what to archive: "project" (the .context/ directory), -// "global" (Claude Code config under ~/.claude), or "all" -// (both). Archives are written to a local directory and -// optionally copied to an SMB share configured via the -// CTX_BACKUP_SMB_URL environment variable. -// -// The command delegates archive creation to -// [internal/cli/system/core/archive] and writes results -// through [internal/write/backup]. JSON output is -// available via --json for scripting. -// -// # Flags -// -// - --scope: project | global | all (default: all) -// - --json: machine-readable output -// -// # SMB Remote Copy -// -// When CTX_BACKUP_SMB_URL is set, the archive is copied -// to the configured share after local creation. An -// optional CTX_BACKUP_SMB_SUBDIR narrows the target -// directory within the share. -// -// [Cmd] builds the cobra command with scope and JSON flags. -// [Run] creates the archive for the selected scope and copies -// it to the SMB share when configured. -package backup diff --git a/internal/cli/backup/run.go b/internal/cli/backup/run.go deleted file mode 100644 index f2093ee0f..000000000 --- a/internal/cli/backup/run.go +++ /dev/null @@ -1,99 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package backup - -import ( - "encoding/json" - "os" - "time" - - "github.com/spf13/cobra" - - coreArchive "github.com/ActiveMemory/ctx/internal/cli/system/core/archive" - "github.com/ActiveMemory/ctx/internal/config/archive" - "github.com/ActiveMemory/ctx/internal/config/env" - cFlag "github.com/ActiveMemory/ctx/internal/config/flag" - "github.com/ActiveMemory/ctx/internal/config/token" - "github.com/ActiveMemory/ctx/internal/entity" - errBackup "github.com/ActiveMemory/ctx/internal/err/backup" - errInit "github.com/ActiveMemory/ctx/internal/err/initialize" - "github.com/ActiveMemory/ctx/internal/write/backup" -) - -// Run executes the backup command logic. -// -// Creates timestamped tar.gz archives of project context and/or global -// Claude Code data. Optionally copies archives to an SMB share. -// -// Parameters: -// - cmd: Cobra command for output and flag access -// -// Returns: -// - error: Non-nil on invalid scope, home directory lookup failure, -// SMB parse error, or archive creation failure -func Run(cmd *cobra.Command) error { - scope, _ := cmd.Flags().GetString(cFlag.Scope) - jsonOut, _ := cmd.Flags().GetBool(cFlag.JSON) - - switch scope { - case archive.BackupScopeProject, - archive.BackupScopeGlobal, - archive.BackupScopeAll: - default: - return errBackup.InvalidScope(scope) - } - - home, homeErr := os.UserHomeDir() - if homeErr != nil { - return errInit.HomeDir(homeErr) - } - - smbURL := os.Getenv(env.BackupSMBURL) - smbSubdir := os.Getenv(env.BackupSMBSubdir) - var smb *coreArchive.SMBConfig - if smbURL != "" { - var smbErr error - smb, smbErr = coreArchive.ParseSMBConfig(smbURL, smbSubdir) - if smbErr != nil { - return errBackup.SMBConfig(smbErr) - } - } - - timestamp := time.Now().Format(archive.BackupTimestampFormat) - var results []entity.BackupResult - - if scope == archive.BackupScopeProject || scope == archive.BackupScopeAll { - result, projErr := coreArchive.BackupProject( - cmd.ErrOrStderr(), home, timestamp, smb, - ) - if projErr != nil { - return errBackup.Project(projErr) - } - results = append(results, result) - } - - if scope == archive.BackupScopeGlobal || scope == archive.BackupScopeAll { - result, globalErr := coreArchive.BackupGlobal( - cmd.ErrOrStderr(), home, timestamp, smb, - ) - if globalErr != nil { - return errBackup.Global(globalErr) - } - results = append(results, result) - } - - if jsonOut { - enc := json.NewEncoder(cmd.OutOrStdout()) - enc.SetIndent("", token.Indent2) - return enc.Encode(results) - } - - for _, r := range results { - backup.ResultLine(cmd, r.Scope, r.Archive, r.Size, r.SMBDest) - } - return nil -} diff --git a/internal/cli/change/cmd/root/run.go b/internal/cli/change/cmd/root/run.go index 123824a7f..de484c2b9 100644 --- a/internal/cli/change/cmd/root/run.go +++ b/internal/cli/change/cmd/root/run.go @@ -13,6 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/change/core/render" "github.com/ActiveMemory/ctx/internal/cli/change/core/scan" errInit "github.com/ActiveMemory/ctx/internal/err/initialize" + "github.com/ActiveMemory/ctx/internal/rc" writeChange "github.com/ActiveMemory/ctx/internal/write/change" ) @@ -29,6 +30,10 @@ import ( // Returns: // - error: Non-nil if reference time detection fails func Run(cmd *cobra.Command, since string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } refTime, refLabel, err := detect.ReferenceTime(since) if err != nil { return errInit.DetectReferenceTime(err) diff --git a/internal/cli/change/core/cmd_test.go b/internal/cli/change/core/cmd_test.go index b8fbb2b43..09693bdb7 100644 --- a/internal/cli/change/core/cmd_test.go +++ b/internal/cli/change/core/cmd_test.go @@ -212,7 +212,10 @@ func TestDetectReferenceTime_Fallback(t *testing.T) { } func TestDetectReferenceTime_FromMarkers(t *testing.T) { - tmp := t.TempDir() + tmp := filepath.Join(t.TempDir(), ".context") + if mkErr := os.MkdirAll(tmp, 0o700); mkErr != nil { + t.Fatalf("mkdir: %v", mkErr) + } t.Setenv("CTX_DIR", tmp) rc.Reset() @@ -260,7 +263,10 @@ func TestDetectReferenceTime_FromMarkers(t *testing.T) { } func TestFindContextChanges(t *testing.T) { - tmp := t.TempDir() + tmp := filepath.Join(t.TempDir(), ".context") + if mkErr := os.MkdirAll(tmp, 0o700); mkErr != nil { + t.Fatalf("mkdir: %v", mkErr) + } t.Setenv("CTX_DIR", tmp) rc.Reset() @@ -300,7 +306,10 @@ func TestFindContextChanges(t *testing.T) { } func TestFindContextChanges_EmptyDir(t *testing.T) { - tmp := t.TempDir() + tmp := filepath.Join(t.TempDir(), ".context") + if mkErr := os.MkdirAll(tmp, 0o700); mkErr != nil { + t.Fatalf("mkdir: %v", mkErr) + } t.Setenv("CTX_DIR", tmp) rc.Reset() diff --git a/internal/cli/change/core/detect/detect.go b/internal/cli/change/core/detect/detect.go index c900bc171..91e67d71e 100644 --- a/internal/cli/change/core/detect/detect.go +++ b/internal/cli/change/core/detect/detect.go @@ -22,16 +22,25 @@ import ( ) // FromMarkers finds the second most recent ctx-loaded-* marker file. -// The most recent is the current session's marker. +// The most recent is the current session's marker, so the reference +// point for change detection is the one before it. // // Returns: -// - time.Time: Marker file modification time -// - bool: True if a valid marker was found -func FromMarkers() (time.Time, bool) { - stateDir := filepath.Join(rc.ContextDir(), dir.State) +// - time.Time: Marker file modification time on success. +// - error: [errCtx.ErrDirNotDeclared] when no context dir is +// declared; the underlying error from [os.ReadDir] when the state +// directory cannot be read; [os.ErrNotExist] when fewer than two +// marker files exist (no previous session to compare against). +// Callers treat any non-nil error as "try the next source". +func FromMarkers() (time.Time, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return time.Time{}, err + } + stateDir := filepath.Join(ctxDir, dir.State) entries, readDirErr := os.ReadDir(stateDir) if readDirErr != nil { - return time.Time{}, false + return time.Time{}, readDirErr } type markerInfo struct { @@ -51,7 +60,8 @@ func FromMarkers() (time.Time, bool) { } if len(markers) < 2 { - return time.Time{}, false + // No previous-session marker on disk yet. + return time.Time{}, os.ErrNotExist } // Sort by modtime descending. @@ -60,20 +70,28 @@ func FromMarkers() (time.Time, bool) { }) // Second most recent = previous session. - return markers[1].modTime, true + return markers[1].modTime, nil } // FromEvents scans events.jsonl in reverse for the last // context-load-gate event. // // Returns: -// - time.Time: Event timestamp -// - bool: True if a valid event was found -func FromEvents() (time.Time, bool) { - eventsPath := filepath.Join(rc.ContextDir(), dir.State, event.FileLog) +// - time.Time: Event timestamp on success. +// - error: [errCtx.ErrDirNotDeclared] when no context dir is +// declared; the underlying error from the event log reader when +// the file cannot be read; [os.ErrNotExist] when no matching +// load-gate event is present or its timestamp cannot be parsed. +// Callers treat any non-nil error as "try the next source". +func FromEvents() (time.Time, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return time.Time{}, err + } + eventsPath := filepath.Join(ctxDir, dir.State, event.FileLog) data, readErr := io.SafeReadUserFile(eventsPath) if readErr != nil { - return time.Time{}, false + return time.Time{}, readErr } lines := strings.Split(strings.TrimSpace(string(data)), token.NewlineLF) @@ -84,9 +102,10 @@ func FromEvents() (time.Time, bool) { continue } if t, ok := ExtractTimestamp(line); ok { - return t, true + return t, nil } } - return time.Time{}, false + // No matching load-gate event in the log. + return time.Time{}, os.ErrNotExist } diff --git a/internal/cli/change/core/detect/parse.go b/internal/cli/change/core/detect/parse.go index d7ba4c484..bd4e9a503 100644 --- a/internal/cli/change/core/detect/parse.go +++ b/internal/cli/change/core/detect/parse.go @@ -40,12 +40,12 @@ func ReferenceTime(since string) (time.Time, string, error) { } // Try marker files. - if t, ok := FromMarkers(); ok { + if t, markersErr := FromMarkers(); markersErr == nil { return t, format.DurationAgo(time.Since(t)), nil } // Try events.jsonl. - if t, ok := FromEvents(); ok { + if t, eventsErr := FromEvents(); eventsErr == nil { return t, format.DurationAgo(time.Since(t)), nil } diff --git a/internal/cli/change/core/scan/scan.go b/internal/cli/change/core/scan/scan.go index 5b9217fad..4402475ab 100644 --- a/internal/cli/change/core/scan/scan.go +++ b/internal/cli/change/core/scan/scan.go @@ -29,7 +29,10 @@ import ( // - []entity.ContextChange: Modified files sorted by modtime descending // - error: Non-nil if the context directory cannot be read func FindContextChanges(refTime time.Time) ([]entity.ContextChange, error) { - dir := rc.ContextDir() + dir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return nil, ctxErr + } entries, readDirErr := os.ReadDir(dir) if readDirErr != nil { return nil, readDirErr diff --git a/internal/cli/cli_test.go b/internal/cli/cli_test.go index 429461b31..f3206c686 100644 --- a/internal/cli/cli_test.go +++ b/internal/cli/cli_test.go @@ -65,6 +65,13 @@ func TestBinaryIntegration(t *testing.T) { t.Fatalf("failed to create test dir: %v", err) } + // Under the explicit-context-dir model each subprocess invocation + // must declare CTX_DIR. t.Setenv mutates the current process env + // and exec.Cmd with cmd.Env == nil inherits that env, so a single + // Setenv here propagates to every child below, and is unset + // automatically at test end. + t.Setenv("CTX_DIR", filepath.Join(testDir, ".context")) + // Subtest: ctx init creates expected files t.Run("init creates expected files", func(t *testing.T) { initCmd := exec.Command(binaryPath, "init") //nolint:gosec // test binary diff --git a/internal/cli/compact/cmd/root/run.go b/internal/cli/compact/cmd/root/run.go index 0dc19218a..5d50ca340 100644 --- a/internal/cli/compact/cmd/root/run.go +++ b/internal/cli/compact/cmd/root/run.go @@ -34,6 +34,10 @@ import ( // Returns: // - error: Non-nil if context loading fails or .context/ is not found func Run(cmd *cobra.Command, archive bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } ctx, err := load.Do("") if err != nil { if _, ok := errors.AsType[*errCtx.NotFoundError](err); ok { diff --git a/internal/cli/compact/compact_test.go b/internal/cli/compact/compact_test.go index c4a2df472..8a1285b8e 100644 --- a/internal/cli/compact/compact_test.go +++ b/internal/cli/compact/compact_test.go @@ -13,6 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/add" "github.com/ActiveMemory/ctx/internal/cli/initialize" taskComplete "github.com/ActiveMemory/ctx/internal/cli/task/cmd/complete" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestCompactCommand tests the compact command. @@ -29,6 +30,8 @@ func TestCompactCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -58,6 +61,8 @@ func TestCompactWithTasks(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/config/cmd/status/run_test.go b/internal/cli/config/cmd/status/run_test.go index 1efe6b7eb..cfd7c3b03 100644 --- a/internal/cli/config/cmd/status/run_test.go +++ b/internal/cli/config/cmd/status/run_test.go @@ -17,11 +17,12 @@ import ( "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) const ( devContent = "profile: dev\nnotify:\n events:\n - loop\n" - baseContent = "profile: base\n# context_dir: .context\n" + baseContent = "profile: base\n" ) func newTestCmd() *cobra.Command { @@ -39,7 +40,11 @@ func chdirWithCleanup(t *testing.T, dir string) { t.Helper() origDir, _ := os.Getwd() _ = os.Chdir(dir) - rc.Reset() + // Under the explicit-context-dir model, .ctxrc is read from + // `filepath.Dir(CTX_DIR)/.ctxrc`. Declaring CTX_DIR at + // `/.context` keeps this test's root-adjacent .ctxrc + // visible to the loader. + testctx.Declare(t, dir) t.Cleanup(func() { _ = os.Chdir(origDir) rc.Reset() diff --git a/internal/cli/config/cmd/switchcmd/run_test.go b/internal/cli/config/cmd/switchcmd/run_test.go index 8de63cbd2..c6b71b7d3 100644 --- a/internal/cli/config/cmd/switchcmd/run_test.go +++ b/internal/cli/config/cmd/switchcmd/run_test.go @@ -18,11 +18,12 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/config/core/profile" "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) const ( devContent = "profile: dev\nnotify:\n events:\n - loop\n" - baseContent = "profile: base\n# context_dir: .context\n" + baseContent = "profile: base\n" ) func setupProfiles(t *testing.T) string { @@ -42,7 +43,7 @@ func setupProfiles(t *testing.T) string { origDir, _ := os.Getwd() _ = os.Chdir(root) - rc.Reset() + testctx.Declare(t, root) t.Cleanup(func() { _ = os.Chdir(origDir) rc.Reset() diff --git a/internal/cli/config/core/profile/profile_test.go b/internal/cli/config/core/profile/profile_test.go index 3c59512fd..927b58cfb 100644 --- a/internal/cli/config/core/profile/profile_test.go +++ b/internal/cli/config/core/profile/profile_test.go @@ -13,18 +13,19 @@ import ( "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) const ( devContent = "profile: dev\nnotify:\n events:\n - loop\n" - baseContent = "profile: base\n# context_dir: .context\n" + baseContent = "profile: base\n" ) func chdirWithCleanup(t *testing.T, dir string) { t.Helper() origDir, _ := os.Getwd() _ = os.Chdir(dir) - rc.Reset() + testctx.Declare(t, dir) t.Cleanup(func() { _ = os.Chdir(origDir) rc.Reset() diff --git a/internal/cli/connection/core/config/config.go b/internal/cli/connection/core/config/config.go index f626c2327..e17f1e7ca 100644 --- a/internal/cli/connection/core/config/config.go +++ b/internal/cli/connection/core/config/config.go @@ -37,8 +37,12 @@ func Save(cfg Config) error { return encErr } + path, pathErr := filePath() + if pathErr != nil { + return pathErr + } return io.SafeWriteFile( - filePath(), encrypted, fs.PermSecret, + path, encrypted, fs.PermSecret, ) } @@ -50,9 +54,11 @@ func Save(cfg Config) error { func Load() (Config, error) { var cfg Config - encrypted, readErr := io.SafeReadUserFile( - filePath(), - ) + path, pathErr := filePath() + if pathErr != nil { + return cfg, pathErr + } + encrypted, readErr := io.SafeReadUserFile(path) if readErr != nil { return cfg, readErr } diff --git a/internal/cli/connection/core/config/path.go b/internal/cli/connection/core/config/path.go index e27bf5cee..c331e36de 100644 --- a/internal/cli/connection/core/config/path.go +++ b/internal/cli/connection/core/config/path.go @@ -18,8 +18,13 @@ import ( // // Returns: // - string: Absolute path to the encrypted connect file -func filePath() string { - return filepath.Join(rc.ContextDir(), cfgHub.FileConnect) +// - error: non-nil when the context directory is not declared +func filePath() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, cfgHub.FileConnect), nil } // loadKey reads the encryption key from the global key diff --git a/internal/cli/connection/core/register/register.go b/internal/cli/connection/core/register/register.go index 2931daec5..13878602d 100644 --- a/internal/cli/connection/core/register/register.go +++ b/internal/cli/connection/core/register/register.go @@ -42,7 +42,12 @@ func Run( } defer func() { _ = client.Close() }() - projectName := filepath.Base(rc.ContextDir()) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + projectName := filepath.Base(ctxDir) resp, regErr := client.Register( context.Background(), diff --git a/internal/cli/connection/core/render/render.go b/internal/cli/connection/core/render/render.go index 61f9ecefd..b3d84e4cd 100644 --- a/internal/cli/connection/core/render/render.go +++ b/internal/cli/connection/core/render/render.go @@ -25,7 +25,11 @@ import ( // Returns: // - error: non-nil if directory creation or write fails func WriteEntries(entries []hub.EntryMsg) error { - dir := filepath.Join(rc.ContextDir(), cfgHub.DirHub) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + dir := filepath.Join(ctxDir, cfgHub.DirHub) if mkErr := io.SafeMkdirAll( dir, fs.PermKeyDir, ); mkErr != nil { diff --git a/internal/cli/connection/core/render/render_test.go b/internal/cli/connection/core/render/render_test.go index 6917a47ee..fc5aa3617 100644 --- a/internal/cli/connection/core/render/render_test.go +++ b/internal/cli/connection/core/render/render_test.go @@ -13,7 +13,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/hub" - "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestWriteEntries_CreatesFiles(t *testing.T) { @@ -28,7 +28,7 @@ func TestWriteEntries_CreatesFiles(t *testing.T) { t.Fatal(chErr) } defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tmpDir) entries := []hub.EntryMsg{ { @@ -95,7 +95,7 @@ func TestWriteEntries_AppendsToExisting(t *testing.T) { t.Fatal(chErr) } defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tmpDir) // Pre-populate a file. existing := "## Existing content\n\n" diff --git a/internal/cli/connection/core/sync/state.go b/internal/cli/connection/core/sync/state.go index a08787ba6..5763f386a 100644 --- a/internal/cli/connection/core/sync/state.go +++ b/internal/cli/connection/core/sync/state.go @@ -26,7 +26,11 @@ import ( // - error: Non-nil on I/O or lock-contention failure func loadState() (state, func(), error) { var s state - dir := filepath.Join(rc.ContextDir(), cfgHub.DirHub) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return s, nil, ctxErr + } + dir := filepath.Join(ctxDir, cfgHub.DirHub) lockPath := filepath.Join(dir, cfgHub.FileSyncLock) if mkErr := io.SafeMkdirAll( @@ -76,7 +80,11 @@ func loadState() (state, func(), error) { // Returns: // - error: Non-nil on marshal or I/O failure func saveState(s state) error { - dir := filepath.Join(rc.ContextDir(), cfgHub.DirHub) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + dir := filepath.Join(ctxDir, cfgHub.DirHub) data, marshalErr := json.MarshalIndent( s, "", cfgHub.JSONIndent, ) diff --git a/internal/cli/deactivate/cmd/root/cmd.go b/internal/cli/deactivate/cmd/root/cmd.go new file mode 100644 index 000000000..c2e7bf6d8 --- /dev/null +++ b/internal/cli/deactivate/cmd/root/cmd.go @@ -0,0 +1,58 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package root + +import ( + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/assets/read/desc" + "github.com/ActiveMemory/ctx/internal/config/cli" + "github.com/ActiveMemory/ctx/internal/config/embed/cmd" + embedFlag "github.com/ActiveMemory/ctx/internal/config/embed/flag" + cFlag "github.com/ActiveMemory/ctx/internal/config/flag" +) + +// Cmd returns the `ctx deactivate` cobra command. +// +// Accepts one flag: +// +// --shell override auto-detection (defaults to $SHELL). +// +// # Stdout discipline (critical) +// +// Same eval-recursion hazard as `ctx activate`: stdout is consumed +// by `eval "$(ctx deactivate)"`, so cobra must never print Usage / +// Flags / Examples on stdout (the Examples block contains the eval +// invocation literally). [SilenceUsage] is set unconditionally +// below; errors keep going to stderr via the root [SilenceErrors] +// settings. +// +// Returns: +// - *cobra.Command: configured deactivate command. +func Cmd() *cobra.Command { + short, long := desc.Command(cmd.DescKeyDeactivate) + c := &cobra.Command{ + Use: cmd.UseDeactivate, + Short: short, + Long: long, + Example: desc.Example(cmd.DescKeyDeactivate), + Args: cobra.NoArgs, + // Exempt from the global init / require-context-dir checks: + // `unset CTX_DIR` must work regardless of current state. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, + // See the Stdout discipline note above. + SilenceUsage: true, + RunE: func(cmd *cobra.Command, _ []string) error { + shell, _ := cmd.Flags().GetString(cFlag.Shell) + return Run(cmd, shell) + }, + } + c.Flags().String(cFlag.Shell, "", + desc.Flag(embedFlag.DescKeyActivateShell), + ) + return c +} diff --git a/internal/cli/deactivate/cmd/root/doc.go b/internal/cli/deactivate/cmd/root/doc.go new file mode 100644 index 000000000..43b96fc90 --- /dev/null +++ b/internal/cli/deactivate/cmd/root/doc.go @@ -0,0 +1,19 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package root implements the `ctx deactivate` cobra command. +// +// The command emits a shell-specific `unset CTX_DIR` statement to +// stdout, paired with `ctx activate` for symmetric shell integration. +// Like activate, deactivate is in the exempt allowlist: it does not +// require a declared context directory to run (clearing CTX_DIR when +// it is already unset is a harmless no-op). +// +// Usage: +// +// eval "$(ctx deactivate)" +// ctx deactivate --shell zsh +package root diff --git a/internal/cli/deactivate/cmd/root/run.go b/internal/cli/deactivate/cmd/root/run.go new file mode 100644 index 000000000..0db2507db --- /dev/null +++ b/internal/cli/deactivate/cmd/root/run.go @@ -0,0 +1,34 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package root + +import ( + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/cli/activate/core/emit" + writeActivate "github.com/ActiveMemory/ctx/internal/write/activate" +) + +// Run executes the `ctx deactivate` command: emit a shell-specific +// `unset CTX_DIR` statement to stdout so the caller can clear the +// binding via `eval "$(ctx deactivate)"`. +// +// The command never errors under normal operation; unsetting an +// already-unset variable is a no-op across supported shells. +// +// Parameters: +// - cmd: cobra command providing stdout. +// - shell: value of the --shell flag; empty auto-detects from +// $SHELL via emit.DetectShell. +// +// Returns: +// - error: always nil; kept in the signature for Cobra RunE +// compatibility. +func Run(cmd *cobra.Command, shell string) error { + writeActivate.Emit(cmd, emit.Unset(emit.DetectShell(shell))) + return nil +} diff --git a/internal/cli/deactivate/deactivate.go b/internal/cli/deactivate/deactivate.go new file mode 100644 index 000000000..e602527ea --- /dev/null +++ b/internal/cli/deactivate/deactivate.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package deactivate + +import ( + "github.com/spf13/cobra" + + deactivateRoot "github.com/ActiveMemory/ctx/internal/cli/deactivate/cmd/root" +) + +// Cmd returns the `ctx deactivate` command for registration on the +// root ctx command. See cmd/root for the full command definition. +// +// Returns: +// - *cobra.Command: the deactivate command. +func Cmd() *cobra.Command { + return deactivateRoot.Cmd() +} diff --git a/internal/cli/deactivate/deactivate_test.go b/internal/cli/deactivate/deactivate_test.go new file mode 100644 index 000000000..a1a2427a8 --- /dev/null +++ b/internal/cli/deactivate/deactivate_test.go @@ -0,0 +1,73 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package deactivate_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/ActiveMemory/ctx/internal/cli/deactivate" +) + +// runDeactivate invokes `ctx deactivate` with the given args and +// returns (stdout, error). +func runDeactivate(t *testing.T, args []string) (string, error) { + t.Helper() + c := deactivate.Cmd() + c.SetArgs(args) + var out bytes.Buffer + c.SetOut(&out) + c.SetErr(&out) + err := c.Execute() + return out.String(), err +} + +// TestDeactivate_DefaultShell: no --shell flag → autodetect from +// $SHELL → bash emitter → `unset CTX_DIR`. +func TestDeactivate_DefaultShell(t *testing.T) { + t.Setenv("SHELL", "/bin/bash") + + stdout, err := runDeactivate(t, nil) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if strings.TrimSpace(stdout) != "unset CTX_DIR" { + t.Errorf("stdout = %q, want 'unset CTX_DIR\\n'", stdout) + } +} + +// TestDeactivate_ExplicitZsh: --shell zsh → same POSIX unset +// statement (v1 bash/zsh/sh share syntax). +func TestDeactivate_ExplicitZsh(t *testing.T) { + stdout, err := runDeactivate(t, []string{"--shell", "zsh"}) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !strings.Contains(stdout, "unset CTX_DIR") { + t.Errorf("stdout missing unset: %q", stdout) + } +} + +// TestDeactivate_UnknownShell: unknown shell → POSIX unset fallback. +func TestDeactivate_UnknownShell(t *testing.T) { + stdout, err := runDeactivate(t, []string{"--shell", "rc"}) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !strings.Contains(stdout, "unset CTX_DIR") { + t.Errorf("stdout missing unset fallback: %q", stdout) + } +} + +// TestDeactivate_RejectsPositionalArgs: deactivate takes no args. +func TestDeactivate_RejectsPositionalArgs(t *testing.T) { + _, err := runDeactivate(t, []string{"unexpected-arg"}) + if err == nil { + t.Fatalf("expected error for positional arg, got nil") + } +} diff --git a/internal/cli/deactivate/doc.go b/internal/cli/deactivate/doc.go new file mode 100644 index 000000000..a8792267d --- /dev/null +++ b/internal/cli/deactivate/doc.go @@ -0,0 +1,27 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package deactivate implements the `ctx deactivate` command. +// +// Deactivate is the counterpart to `ctx activate` under the +// explicit-context-dir resolution model. It emits a shell-specific +// `unset CTX_DIR` statement to stdout, intended for consumption via +// `eval "$(ctx deactivate)"`. +// +// The command does not touch the filesystem and does not scan for +// candidates. CTX_DIR can always be cleared safely regardless of +// which (if any) `.context/` directories are visible. +// +// # Subpackages +// +// cmd/root : cobra command definition and run logic. +// +// # Shell Support +// +// Deactivate shares the emit package with activate +// (internal/cli/activate/core/emit) so both commands stay in +// lockstep on supported shells. v1: bash, zsh, POSIX sh. +package deactivate diff --git a/internal/cli/deactivate/testmain_test.go b/internal/cli/deactivate/testmain_test.go new file mode 100644 index 000000000..e924bf4a3 --- /dev/null +++ b/internal/cli/deactivate/testmain_test.go @@ -0,0 +1,21 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package deactivate_test + +import ( + "os" + "testing" + + "github.com/ActiveMemory/ctx/internal/assets/read/lookup" +) + +// TestMain initializes the embedded text-asset lookup so deactivate's +// command metadata (Use/Short/Long from cmd/root) resolves correctly. +func TestMain(m *testing.M) { + lookup.Init() + os.Exit(m.Run()) +} diff --git a/internal/cli/decision/cmd/reindex/run.go b/internal/cli/decision/cmd/reindex/run.go index 873fe768d..a01ac5ee8 100644 --- a/internal/cli/decision/cmd/reindex/run.go +++ b/internal/cli/decision/cmd/reindex/run.go @@ -26,7 +26,12 @@ import ( // Returns: // - error: Non-nil if the file read/write fails func Run(cmd *cobra.Command, _ []string) error { - filePath := filepath.Join(rc.ContextDir(), ctx.Decision) + ctxDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } + filePath := filepath.Join(ctxDir, ctx.Decision) return index.Reindex( cmd.OutOrStdout(), filePath, diff --git a/internal/cli/decision/decision_test.go b/internal/cli/decision/decision_test.go index e0982bd9a..58227b703 100644 --- a/internal/cli/decision/decision_test.go +++ b/internal/cli/decision/decision_test.go @@ -14,6 +14,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestCmd(t *testing.T) { @@ -64,7 +65,7 @@ func TestRunReindex_NoFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() cmd := Cmd() @@ -82,7 +83,7 @@ func TestRunReindex_WithFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() // Create the context directory and DECISIONS.md file @@ -124,7 +125,7 @@ func TestRunReindex_EmptyFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() // Create the context directory and empty DECISIONS.md diff --git a/internal/cli/doctor/cmd/root/run.go b/internal/cli/doctor/cmd/root/run.go index 2f97cadfe..da519b924 100644 --- a/internal/cli/doctor/cmd/root/run.go +++ b/internal/cli/doctor/cmd/root/run.go @@ -7,16 +7,30 @@ package root import ( + "errors" + "fmt" + "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/assets/read/desc" "github.com/ActiveMemory/ctx/internal/cli/doctor/core/check" "github.com/ActiveMemory/ctx/internal/cli/doctor/core/output" + "github.com/ActiveMemory/ctx/internal/config/doctor" + "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/stats" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" ) // Run executes the doctor command logic, running all health // checks and producing either JSON or human-readable output. // +// Context-dependent checks that fail with +// [errCtx.ErrDirNotDeclared] emit exactly one "did not run +// (cascade)" line; later dependent checks are silently skipped +// so the report shows one loud entry instead of N copies of the +// same message. Non-dependent checks (companion config, plugin, +// system resources, etc.) continue to run regardless. +// // Parameters: // - cmd: Cobra command for output stream // - jsonOutput: If true, output as JSON @@ -26,19 +40,111 @@ import ( func Run(cmd *cobra.Command, jsonOutput bool) error { report := &check.Report{} - check.ContextInitialized(report) - check.RequiredFiles(report) - check.CtxrcValidation(report) - check.Drift(report) - check.PluginEnablement(report) - check.CompanionConfig(report) - check.EventLogging(report) - check.Webhook(report) - check.Reminders(report) - check.TaskCompletion(report) - check.ContextTokenSize(report) - check.SystemResources(report) - check.RecentEventActivity(report) + entries := []check.Entry{ + { + Name: doctor.CheckContextInit, + Category: doctor.CategoryStructure, + Fn: check.ContextInitialized, + }, + { + Name: doctor.CheckRequiredFiles, + Category: doctor.CategoryStructure, + Fn: check.RequiredFiles, + }, + { + Name: doctor.CheckCtxrcValidation, + Category: doctor.CategoryStructure, + Fn: check.CtxrcValidation, + }, + { + Name: doctor.CheckDrift, + Category: doctor.CategoryQuality, + Fn: check.Drift, + }, + { + Name: doctor.CheckPluginInstalled, + Category: doctor.CategoryPlugin, + Fn: check.PluginEnablement, + }, + { + Name: doctor.CheckCompanionConfig, + Category: doctor.CategoryPlugin, + Fn: check.CompanionConfig, + }, + { + Name: doctor.CheckEventLogging, + Category: doctor.CategoryHooks, + Fn: check.EventLogging, + }, + { + Name: doctor.CheckWebhook, + Category: doctor.CategoryHooks, + Fn: check.Webhook, + }, + { + Name: doctor.CheckReminders, + Category: doctor.CategoryState, + Fn: check.Reminders, + }, + { + Name: doctor.CheckTaskCompletion, + Category: doctor.CategoryState, + Fn: check.TaskCompletion, + }, + { + Name: doctor.CheckContextSize, + Category: doctor.CategorySize, + Fn: check.ContextTokenSize, + }, + { + Name: doctor.CheckResourceMemory, + Category: doctor.CategoryResources, + Fn: check.SystemResources, + }, + { + Name: doctor.CheckRecentEvents, + Category: doctor.CategoryEvents, + Fn: check.RecentEventActivity, + }, + } + + // Track whether a context-dependent check has already + // failed due to errCtx.ErrDirNotDeclared. Subsequent + // dependent failures with the same root cause are folded + // into a single diagnostic. + ctxCascadeAnnounced := false + + for _, entry := range entries { + err := entry.Fn(report) + if err == nil { + continue + } + if errors.Is(err, errCtx.ErrDirNotDeclared) { + if ctxCascadeAnnounced { + // Already reported once; skip silently. + continue + } + ctxCascadeAnnounced = true + report.Results = append(report.Results, check.Result{ + Name: entry.Name, + Category: entry.Category, + Status: stats.StatusError, + Message: fmt.Sprintf(desc.Text( + text.DescKeyDoctorCheckDidNotRunCascade, + ), err), + }) + continue + } + // Non-cascade error: attribute to the specific check. + report.Results = append(report.Results, check.Result{ + Name: entry.Name, + Category: entry.Category, + Status: stats.StatusError, + Message: fmt.Sprintf( + desc.Text(text.DescKeyDoctorCheckDidNotRun), err, + ), + }) + } // Count warnings and errors. for _, r := range report.Results { diff --git a/internal/cli/doctor/core/check/check.go b/internal/cli/doctor/core/check/check.go index 3cf71e5f0..0844d523f 100644 --- a/internal/cli/doctor/core/check/check.go +++ b/internal/cli/doctor/core/check/check.go @@ -8,6 +8,7 @@ package check import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -32,6 +33,7 @@ import ( "github.com/ActiveMemory/ctx/internal/context/validate" "github.com/ActiveMemory/ctx/internal/drift" "github.com/ActiveMemory/ctx/internal/entity" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/log/event" "github.com/ActiveMemory/ctx/internal/rc" @@ -39,12 +41,23 @@ import ( ) // ContextInitialized verifies that a .context/ directory -// exists. +// exists. Always emits a Result of its own; a missing directory IS +// the diagnostic and maps to StatusError. A resolver or stat failure +// that cannot confirm either way is propagated so the runner shows +// "did not run" instead of reporting a confident "missing." // // Parameters: // - report: Report to append the result to -func ContextInitialized(report *Report) { - if validate.Exists("") { +// +// Returns: +// - error: non-nil when validate.Exists cannot reach a definitive +// answer (resolver or stat failure). +func ContextInitialized(report *Report) error { + exists, existsErr := validate.Exists("") + if existsErr != nil { + return existsErr + } + if exists { report.Results = append(report.Results, Result{ Name: doctor.CheckContextInit, Category: doctor.CategoryStructure, @@ -59,6 +72,7 @@ func ContextInitialized(report *Report) { Message: desc.Text(text.DescKeyDoctorContextInitializedError), }) } + return nil } // RequiredFiles verifies that all required context files are @@ -66,8 +80,16 @@ func ContextInitialized(report *Report) { // // Parameters: // - report: Report to append the result to -func RequiredFiles(report *Report) { - dir := rc.ContextDir() +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the context directory +// cannot be resolved; the runner renders a standard "did not run" +// line in that case. +func RequiredFiles(report *Report) error { + dir, err := rc.ContextDir() + if err != nil { + return err + } var missing []string for _, f := range ctx.FilesRequired { path := filepath.Join(dir, f) @@ -101,6 +123,7 @@ func RequiredFiles(report *Report) { ), }) } + return nil } // CtxrcValidation validates the .ctxrc file for unknown @@ -108,7 +131,11 @@ func RequiredFiles(report *Report) { // // Parameters: // - report: Report to append the result to -func CtxrcValidation(report *Report) { +// +// Returns: +// - error: always nil; parse problems are reported as +// StatusError/StatusWarning entries rather than returned. +func CtxrcValidation(report *Report) error { data, readErr := io.SafeReadUserFile(file.CtxRC) if readErr != nil { // No .ctxrc is fine - defaults are used. @@ -118,7 +145,7 @@ func CtxrcValidation(report *Report) { Status: stats.StatusOK, Message: desc.Text(text.DescKeyDoctorCtxrcValidationOkNoFile), }) - return + return nil } warnings, validateErr := rc.Validate(data) @@ -132,7 +159,7 @@ func CtxrcValidation(report *Report) { validateErr, ), }) - return + return nil } if len(warnings) > 0 { @@ -148,7 +175,7 @@ func CtxrcValidation(report *Report) { ), ), }) - return + return nil } report.Results = append(report.Results, Result{ @@ -157,6 +184,7 @@ func CtxrcValidation(report *Report) { Status: stats.StatusOK, Message: desc.Text(text.DescKeyDoctorCtxrcValidationOk), }) + return nil } // Drift detects stale paths or missing files referenced in @@ -164,13 +192,18 @@ func CtxrcValidation(report *Report) { // // Parameters: // - report: Report to append the result to -func Drift(report *Report) { - if !validate.Exists("") { - return // skip drift check if not initialized - } - +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the context directory +// cannot be resolved via [load.Do]; the runner renders a standard +// "did not run" line in that case. Transient load failures are +// reported inline as a StatusWarning and return nil. +func Drift(report *Report) error { c, loadErr := load.Do("") if loadErr != nil { + if errors.Is(loadErr, errCtx.ErrDirNotDeclared) { + return loadErr + } report.Results = append(report.Results, Result{ Name: doctor.CheckDrift, Category: doctor.CategoryQuality, @@ -180,7 +213,7 @@ func Drift(report *Report) { loadErr, ), }) - return + return nil } driftReport := drift.Detect(c) @@ -194,7 +227,7 @@ func Drift(report *Report) { Status: stats.StatusOK, Message: desc.Text(text.DescKeyDoctorDriftOk), }) - return + return nil } var parts []string @@ -231,6 +264,7 @@ func Drift(report *Report) { strings.Join(parts, cfgToken.CommaSpace), ), }) + return nil } // CompanionConfig reports whether companion tool checks @@ -238,7 +272,10 @@ func Drift(report *Report) { // // Parameters: // - report: Report to append the result to -func CompanionConfig(report *Report) { +// +// Returns: +// - error: always nil. +func CompanionConfig(report *Report) error { if rc.CompanionCheck() { report.Results = append(report.Results, Result{ Name: doctor.CheckCompanionConfig, @@ -254,6 +291,7 @@ func CompanionConfig(report *Report) { Message: desc.Text(text.DescKeyDoctorCompanionConfigInfo), }) } + return nil } // PluginEnablement checks whether the ctx plugin is @@ -261,7 +299,10 @@ func CompanionConfig(report *Report) { // // Parameters: // - report: Report to append the result to -func PluginEnablement(report *Report) { +// +// Returns: +// - error: always nil. +func PluginEnablement(report *Report) error { installed := initCore.Installed() if !installed { report.Results = append(report.Results, Result{ @@ -270,7 +311,7 @@ func PluginEnablement(report *Report) { Status: stats.StatusInfo, Message: desc.Text(text.DescKeyDoctorPluginInstalledInfo), }) - return + return nil } report.Results = append(report.Results, Result{ @@ -313,13 +354,17 @@ func PluginEnablement(report *Report) { ), }) } + return nil } // EventLogging checks whether event logging is enabled. // // Parameters: // - report: Report to append the result to -func EventLogging(report *Report) { +// +// Returns: +// - error: always nil. +func EventLogging(report *Report) error { if rc.EventLog() { report.Results = append(report.Results, Result{ Name: doctor.CheckEventLogging, @@ -335,6 +380,7 @@ func EventLogging(report *Report) { Message: desc.Text(text.DescKeyDoctorEventLoggingInfo), }) } + return nil } // Webhook checks whether a webhook notification endpoint @@ -342,8 +388,16 @@ func EventLogging(report *Report) { // // Parameters: // - report: Report to append the result to -func Webhook(report *Report) { - dir := rc.ContextDir() +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the context directory +// cannot be resolved; the runner renders a standard "did not run" +// line in that case. +func Webhook(report *Report) error { + dir, err := rc.ContextDir() + if err != nil { + return err + } encPath := filepath.Join(dir, crypto.NotifyEnc) if _, statErr := os.Stat(encPath); statErr == nil { report.Results = append(report.Results, Result{ @@ -360,6 +414,7 @@ func Webhook(report *Report) { Message: desc.Text(text.DescKeyDoctorWebhookInfo), }) } + return nil } // Reminders checks for pending reminders in the context @@ -367,18 +422,31 @@ func Webhook(report *Report) { // // Parameters: // - report: Report to append the result to -func Reminders(report *Report) { - dir := rc.ContextDir() +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the context directory +// cannot be resolved; the runner renders a standard "did not run" +// line in that case. +func Reminders(report *Report) error { + dir, err := rc.ContextDir() + if err != nil { + return err + } remindersPath := filepath.Join(dir, reminder.File) data, readErr := io.SafeReadUserFile(remindersPath) if readErr != nil { - report.Results = append(report.Results, Result{ - Name: doctor.CheckReminders, - Category: doctor.CategoryState, - Status: stats.StatusOK, - Message: desc.Text(text.DescKeyDoctorRemindersOk), - }) - return + if errors.Is(readErr, os.ErrNotExist) { + // Legitimate: no reminders file ⇒ no pending reminders. + report.Results = append(report.Results, Result{ + Name: doctor.CheckReminders, + Category: doctor.CategoryState, + Status: stats.StatusOK, + Message: desc.Text(text.DescKeyDoctorRemindersOk), + }) + return nil + } + // Permission denied, I/O error, etc.: surface it. + return readErr } var reminders []any @@ -391,7 +459,7 @@ func Reminders(report *Report) { Status: stats.StatusOK, Message: desc.Text(text.DescKeyDoctorRemindersOk), }) - return + return nil } count := len(reminders) @@ -413,6 +481,7 @@ func Reminders(report *Report) { ), }) } + return nil } // TaskCompletion analyzes the task completion ratio and @@ -420,12 +489,24 @@ func Reminders(report *Report) { // // Parameters: // - report: Report to append the result to -func TaskCompletion(report *Report) { - dir := rc.ContextDir() +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the context directory +// cannot be resolved; a missing TASKS.md ([os.ErrNotExist]) is a +// legitimate skip and returns nil; any other read failure +// (permissions, I/O) is propagated so the runner can report it. +func TaskCompletion(report *Report) error { + dir, err := rc.ContextDir() + if err != nil { + return err + } tasksPath := filepath.Join(dir, ctx.Task) data, readErr := io.SafeReadUserFile(tasksPath) if readErr != nil { - return // no tasks file, skip + if errors.Is(readErr, os.ErrNotExist) { + return nil // legitimate: no TASKS.md yet, nothing to analyze + } + return readErr } matches := regex.TaskMultiline.FindAllStringSubmatch( @@ -442,7 +523,7 @@ func TaskCompletion(report *Report) { total := completed + pending if total == 0 { - return // no tasks to report on + return nil // no tasks to report on } ratio := completed * stats.PercentMultiplier / total @@ -470,6 +551,7 @@ func TaskCompletion(report *Report) { Message: msg, }) } + return nil } // ContextTokenSize estimates context token usage and @@ -477,7 +559,12 @@ func TaskCompletion(report *Report) { // // Parameters: // - report: Report to append the result to -func ContextTokenSize(report *Report) { +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when context load fails +// for that reason; the runner renders a standard "did not run" +// line. Other load failures return nil without emitting a Result. +func ContextTokenSize(report *Report) error { indexed := make( map[string]bool, len(ctx.ReadOrder), ) @@ -488,7 +575,10 @@ func ContextTokenSize(report *Report) { var totalTokens int c, loadErr := load.Do("") if loadErr != nil { - return + if errors.Is(loadErr, errCtx.ErrDirNotDeclared) { + return loadErr + } + return nil } type fileTokens struct { @@ -545,6 +635,7 @@ func ContextTokenSize(report *Report) { ), }) } + return nil } // RecentEventActivity reports the most recent event log @@ -552,22 +643,41 @@ func ContextTokenSize(report *Report) { // // Parameters: // - report: Report to append the result to -func RecentEventActivity(report *Report) { +// +// Returns: +// - error: [errCtx.ErrDirNotDeclared] when the event log path +// cannot be resolved because no context directory is declared; +// the runner renders a standard "did not run" line. Transient +// read or parse failures return nil and emit a StatusInfo +// placeholder. +func RecentEventActivity(report *Report) error { if !rc.EventLog() { - return // skip if logging disabled + return nil // skip if logging disabled } events, queryErr := event.Query( entity.EventQueryOpts{Last: 1}, ) - if queryErr != nil || len(events) == 0 { + if queryErr != nil { + if errors.Is(queryErr, errCtx.ErrDirNotDeclared) { + return queryErr + } + report.Results = append(report.Results, Result{ + Name: doctor.CheckRecentEvents, + Category: doctor.CategoryEvents, + Status: stats.StatusInfo, + Message: desc.Text(text.DescKeyDoctorRecentEventsInfo), + }) + return nil + } + if len(events) == 0 { report.Results = append(report.Results, Result{ Name: doctor.CheckRecentEvents, Category: doctor.CategoryEvents, Status: stats.StatusInfo, Message: desc.Text(text.DescKeyDoctorRecentEventsInfo), }) - return + return nil } report.Results = append(report.Results, Result{ @@ -579,6 +689,7 @@ func RecentEventActivity(report *Report) { events[len(events)-1].Timestamp, ), }) + return nil } // SystemResources collects and evaluates system resource @@ -586,9 +697,13 @@ func RecentEventActivity(report *Report) { // // Parameters: // - report: Report to append the result to -func SystemResources(report *Report) { +// +// Returns: +// - error: always nil. +func SystemResources(report *Report) error { snap := sysinfo.Collect() AddResourceResults(report, snap) + return nil } // AddResourceResults appends per-metric resource results to diff --git a/internal/cli/doctor/core/check/types.go b/internal/cli/doctor/core/check/types.go index 1549f9a80..9311cf051 100644 --- a/internal/cli/doctor/core/check/types.go +++ b/internal/cli/doctor/core/check/types.go @@ -32,3 +32,19 @@ type Report struct { Warnings int `json:"warnings"` Errors int `json:"errors"` } + +// Entry pairs a check function with the name/category to attribute +// a failure to. The runner uses an ordered slice of Entry values to +// produce a uniform "did not run" line when a check returns an +// error, instead of every check having to emit its own failure +// Result for the same cause. +// +// Fields: +// - Name: Machine-readable identifier to attribute failures to +// - Category: Grouping label (Structure, Quality, etc.) +// - Fn: The check function itself +type Entry struct { + Name string + Category string + Fn func(*Report) error +} diff --git a/internal/cli/doctor/doctor_test.go b/internal/cli/doctor/doctor_test.go index 931ad3b4a..2790208de 100644 --- a/internal/cli/doctor/doctor_test.go +++ b/internal/cli/doctor/doctor_test.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/doctor/core/check" "github.com/ActiveMemory/ctx/internal/config/claude" "github.com/ActiveMemory/ctx/internal/config/ctx" + cfgDir "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/doctor" "github.com/ActiveMemory/ctx/internal/config/stats" "github.com/ActiveMemory/ctx/internal/rc" @@ -25,7 +26,10 @@ import ( func setupContextDir(t *testing.T) string { t.Helper() - dir := t.TempDir() + dir := filepath.Join(t.TempDir(), cfgDir.Context) + if mkErr := os.MkdirAll(dir, 0o700); mkErr != nil { + t.Fatal(mkErr) + } t.Setenv("CTX_DIR", dir) rc.Reset() @@ -64,7 +68,10 @@ func TestDoctor_Healthy(t *testing.T) { } func TestDoctor_MissingRequiredFiles(t *testing.T) { - dir := t.TempDir() + dir := filepath.Join(t.TempDir(), cfgDir.Context) + if mkErr := os.MkdirAll(dir, 0o700); mkErr != nil { + t.Fatal(mkErr) + } t.Setenv("CTX_DIR", dir) rc.Reset() @@ -541,7 +548,9 @@ func TestCheckCtxrcValidation_NoFile(t *testing.T) { t.Cleanup(func() { _ = os.Chdir(orig) }) report := &check.Report{} - check.CtxrcValidation(report) + if err := check.CtxrcValidation(report); err != nil { + t.Fatalf("CtxrcValidation: %v", err) + } if len(report.Results) != 1 { t.Fatalf("expected 1 result, got %d", len(report.Results)) @@ -574,7 +583,9 @@ func TestCheckCtxrcValidation_ValidFile(t *testing.T) { t.Cleanup(func() { _ = os.Chdir(orig) }) report := &check.Report{} - check.CtxrcValidation(report) + if err := check.CtxrcValidation(report); err != nil { + t.Fatalf("CtxrcValidation: %v", err) + } if len(report.Results) != 1 { t.Fatalf("expected 1 result, got %d", len(report.Results)) @@ -607,7 +618,9 @@ func TestCheckCtxrcValidation_Typo(t *testing.T) { t.Cleanup(func() { _ = os.Chdir(orig) }) report := &check.Report{} - check.CtxrcValidation(report) + if err := check.CtxrcValidation(report); err != nil { + t.Fatalf("CtxrcValidation: %v", err) + } if len(report.Results) != 1 { t.Fatalf("expected 1 result, got %d", len(report.Results)) diff --git a/internal/cli/drift/cmd/root/run.go b/internal/cli/drift/cmd/root/run.go index 62d62b91f..7c88c7640 100644 --- a/internal/cli/drift/cmd/root/run.go +++ b/internal/cli/drift/cmd/root/run.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/drift" errCtx "github.com/ActiveMemory/ctx/internal/err/context" errInit "github.com/ActiveMemory/ctx/internal/err/initialize" + "github.com/ActiveMemory/ctx/internal/rc" writeDrift "github.com/ActiveMemory/ctx/internal/write/drift" ) @@ -36,6 +37,10 @@ import ( func Run( cmd *cobra.Command, jsonOutput, doFix bool, ) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } ctx, err := load.Do("") if err != nil { if _, ok := errors.AsType[*errCtx.NotFoundError](err); ok { diff --git a/internal/cli/drift/core/fix/fix.go b/internal/cli/drift/core/fix/fix.go index efeb03f6f..29f5e040f 100644 --- a/internal/cli/drift/core/fix/fix.go +++ b/internal/cli/drift/core/fix/fix.go @@ -201,12 +201,16 @@ func MissingFile(filename string) error { return prompt.NoTemplate(filename, err) } - targetPath := filepath.Join(rc.ContextDir(), filename) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + targetPath := filepath.Join(ctxDir, filename) if mkErr := ctxIo.SafeMkdirAll( - rc.ContextDir(), fs.PermExec, + ctxDir, fs.PermExec, ); mkErr != nil { - return errFs.Mkdir(rc.ContextDir(), mkErr) + return errFs.Mkdir(ctxDir, mkErr) } if writeErr := ctxIo.SafeWriteFile( diff --git a/internal/cli/drift/drift_test.go b/internal/cli/drift/drift_test.go index f1b1abc9b..111948bf7 100644 --- a/internal/cli/drift/drift_test.go +++ b/internal/cli/drift/drift_test.go @@ -16,8 +16,10 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/initialize" "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestDriftCommand tests the drift command. @@ -34,6 +36,8 @@ func TestDriftCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -64,6 +68,8 @@ func TestDriftJSONOutput(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -92,6 +98,7 @@ func TestRunDrift_NoContext(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv(env.CtxDir, "") rc.Reset() defer rc.Reset() @@ -105,7 +112,9 @@ func TestRunDrift_NoContext(t *testing.T) { if runErr == nil { t.Fatal("expected error when no .context/ exists") } - if !strings.Contains(runErr.Error(), "not initialized") { + // Under the explicit-context-dir model, the error is "no context + // directory specified" because nothing declared one. + if !strings.Contains(runErr.Error(), "context directory") { t.Errorf("unexpected error: %v", runErr) } } @@ -123,7 +132,7 @@ func setupContextDir(t *testing.T) (string, func()) { t.Fatalf("failed to chdir: %v", err) } - rc.Reset() + testctx.Declare(t, tmpDir) initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -251,6 +260,7 @@ func TestRunDrift_GenericError(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv(env.CtxDir, filepath.Join(tmpDir, dir.Context)) rc.Reset() defer rc.Reset() diff --git a/internal/cli/fmt/cmd/root/run.go b/internal/cli/fmt/cmd/root/run.go index 0c1526a1e..debdc4321 100644 --- a/internal/cli/fmt/cmd/root/run.go +++ b/internal/cli/fmt/cmd/root/run.go @@ -40,7 +40,11 @@ var contextFiles = []string{ // - error: Non-nil if context directory is missing or file // operations fail; exits 1 in check mode if files would change func Run(cmd *cobra.Command, width int, check bool) error { - ctxDir := rc.ContextDir() + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } if _, statErr := os.Stat(ctxDir); os.IsNotExist(statErr) { return errFmt.NoContextDir() } diff --git a/internal/cli/hub/cmd/peer/cmd.go b/internal/cli/hub/cmd/peer/cmd.go index 97e22be97..b79386698 100644 --- a/internal/cli/hub/cmd/peer/cmd.go +++ b/internal/cli/hub/cmd/peer/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" corePeer "github.com/ActiveMemory/ctx/internal/cli/hub/core/peer" + "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) @@ -27,6 +28,9 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyHubPeer), Args: cobra.ExactArgs(2), - RunE: corePeer.Run, + // Hub stores at ~/.ctx/hub-data/, not .context/. + // Spec: specs/single-source-context-anchor.md. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, + RunE: corePeer.Run, } } diff --git a/internal/cli/hub/cmd/peer/cmd_test.go b/internal/cli/hub/cmd/peer/cmd_test.go new file mode 100644 index 000000000..3bd4acd53 --- /dev/null +++ b/internal/cli/hub/cmd/peer/cmd_test.go @@ -0,0 +1,24 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package peer + +import ( + "testing" + + "github.com/ActiveMemory/ctx/internal/config/cli" +) + +// TestHubPeer_AnnotationSkipInit guards the hub-bypass contract. +// Spec: specs/single-source-context-anchor.md. +func TestHubPeer_AnnotationSkipInit(t *testing.T) { + c := Cmd() + if got, ok := c.Annotations[cli.AnnotationSkipInit]; !ok { + t.Errorf("hub peer: missing AnnotationSkipInit annotation") + } else if got != cli.AnnotationTrue { + t.Errorf("hub peer: AnnotationSkipInit = %q, want %q", got, cli.AnnotationTrue) + } +} diff --git a/internal/cli/hub/cmd/start/cmd.go b/internal/cli/hub/cmd/start/cmd.go index 38295d028..7d1ba48e7 100644 --- a/internal/cli/hub/cmd/start/cmd.go +++ b/internal/cli/hub/cmd/start/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" "github.com/ActiveMemory/ctx/internal/cli/hub/core/server" + "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" "github.com/ActiveMemory/ctx/internal/config/embed/flag" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" @@ -41,6 +42,11 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyHubStart), Args: cobra.NoArgs, + // Hub stores at ~/.ctx/hub-data/, never reads .context/. + // Exempt from the require-context-dir gate so AWS/EKS hub + // users hit no-broken-windows on first contact. + // Spec: specs/single-source-context-anchor.md. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, RunE: func(cobraCmd *cobra.Command, _ []string) error { if isDaemon { return server.RunDaemon( diff --git a/internal/cli/hub/cmd/start/cmd_test.go b/internal/cli/hub/cmd/start/cmd_test.go new file mode 100644 index 000000000..fecf6a361 --- /dev/null +++ b/internal/cli/hub/cmd/start/cmd_test.go @@ -0,0 +1,29 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package start + +import ( + "testing" + + "github.com/ActiveMemory/ctx/internal/config/cli" +) + +// TestHubStart_AnnotationSkipInit verifies the hub start subcommand +// carries the AnnotationSkipInit annotation. Hub uses +// ~/.ctx/hub-data/, never reads .context/, and must bypass the +// require-context-dir gate so AWS/EKS hub users hit no broken +// windows on first contact. +// +// Spec: specs/single-source-context-anchor.md. +func TestHubStart_AnnotationSkipInit(t *testing.T) { + c := Cmd() + if got, ok := c.Annotations[cli.AnnotationSkipInit]; !ok { + t.Errorf("hub start: missing AnnotationSkipInit annotation") + } else if got != cli.AnnotationTrue { + t.Errorf("hub start: AnnotationSkipInit = %q, want %q", got, cli.AnnotationTrue) + } +} diff --git a/internal/cli/hub/cmd/status/cmd.go b/internal/cli/hub/cmd/status/cmd.go index c0f2405ce..f83fc69c4 100644 --- a/internal/cli/hub/cmd/status/cmd.go +++ b/internal/cli/hub/cmd/status/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" coreStatus "github.com/ActiveMemory/ctx/internal/cli/hub/core/status" + "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) @@ -27,6 +28,9 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyHubStatus), Args: cobra.NoArgs, - RunE: coreStatus.Run, + // Hub stores at ~/.ctx/hub-data/, not .context/. + // Spec: specs/single-source-context-anchor.md. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, + RunE: coreStatus.Run, } } diff --git a/internal/cli/hub/cmd/status/cmd_test.go b/internal/cli/hub/cmd/status/cmd_test.go new file mode 100644 index 000000000..8663e2d7a --- /dev/null +++ b/internal/cli/hub/cmd/status/cmd_test.go @@ -0,0 +1,24 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package status + +import ( + "testing" + + "github.com/ActiveMemory/ctx/internal/config/cli" +) + +// TestHubStatus_AnnotationSkipInit guards the hub-bypass contract. +// Spec: specs/single-source-context-anchor.md. +func TestHubStatus_AnnotationSkipInit(t *testing.T) { + c := Cmd() + if got, ok := c.Annotations[cli.AnnotationSkipInit]; !ok { + t.Errorf("hub status: missing AnnotationSkipInit annotation") + } else if got != cli.AnnotationTrue { + t.Errorf("hub status: AnnotationSkipInit = %q, want %q", got, cli.AnnotationTrue) + } +} diff --git a/internal/cli/hub/cmd/status/integration_test.go b/internal/cli/hub/cmd/status/integration_test.go new file mode 100644 index 000000000..2625e7e69 --- /dev/null +++ b/internal/cli/hub/cmd/status/integration_test.go @@ -0,0 +1,73 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package status_test + +import ( + "errors" + "path/filepath" + "testing" + + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/bootstrap" + "github.com/ActiveMemory/ctx/internal/cli/hub/cmd/status" + "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" + "github.com/ActiveMemory/ctx/internal/rc" +) + +// discardWriter silences command output in tests. +type discardWriter struct{} + +func (discardWriter) Write(p []byte) (int, error) { return len(p), nil } + +// TestHubStatus_BypassesPreRunEGate is the integration-style smoke +// test required by the spec. Builds a root command tree as +// production does (via bootstrap.RootCmd), wires this hub status +// subcommand onto a "hub" parent, and runs with CTX_DIR pointing at +// a deliberately-non-existent path. The PreRunE gate must NOT +// short-circuit with ErrDirNotDeclared. +// +// Without this guard, a future refactor that breaks PreRunE's +// annotation handling could leave the annotation in place but +// regress the actual bypass behavior. +// +// Spec: specs/single-source-context-anchor.md. +// +// The test lives in package `status_test` to avoid an import cycle +// (bootstrap → cli/hub → cli/hub/cmd/status). External-test packages +// are exempt from cycle detection. +func TestHubStatus_BypassesPreRunEGate(t *testing.T) { + // Wire CTX_DIR to a deliberately-non-existent shape-valid path + // so RequireContextDir would fail loud if PreRunE actually ran. + t.Setenv(env.CtxDir, filepath.Join(t.TempDir(), "absent", dir.Context)) + rc.Reset() + t.Cleanup(rc.Reset) + + root := bootstrap.RootCmd() + + // Build a hub parent (matches the production tree shape). + hub := &cobra.Command{ + Use: "hub", + Short: "ctx Hub", + } + hub.AddCommand(status.Cmd()) + root.AddCommand(hub) + + root.SetOut(&discardWriter{}) + root.SetErr(&discardWriter{}) + root.SetArgs([]string{"hub", "status"}) + + err := root.Execute() + // Server is not running so coreStatus.Run will return its own + // connect error — that's fine. The contract: the error must + // NOT be the gate's "context dir not declared" sentinel. + if errors.Is(err, errCtx.ErrDirNotDeclared) { + t.Errorf("hub status: PreRunE gate short-circuited with ErrDirNotDeclared (annotation bypass broken)") + } +} diff --git a/internal/cli/hub/cmd/stepdown/cmd.go b/internal/cli/hub/cmd/stepdown/cmd.go index f9449d453..b915cf0e7 100644 --- a/internal/cli/hub/cmd/stepdown/cmd.go +++ b/internal/cli/hub/cmd/stepdown/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" coreStep "github.com/ActiveMemory/ctx/internal/cli/hub/core/stepdown" + "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) @@ -27,6 +28,9 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyHubStepdown), Args: cobra.NoArgs, - RunE: coreStep.Run, + // Hub stores at ~/.ctx/hub-data/, not .context/. + // Spec: specs/single-source-context-anchor.md. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, + RunE: coreStep.Run, } } diff --git a/internal/cli/hub/cmd/stepdown/cmd_test.go b/internal/cli/hub/cmd/stepdown/cmd_test.go new file mode 100644 index 000000000..86c4bdfcf --- /dev/null +++ b/internal/cli/hub/cmd/stepdown/cmd_test.go @@ -0,0 +1,24 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package stepdown + +import ( + "testing" + + "github.com/ActiveMemory/ctx/internal/config/cli" +) + +// TestHubStepdown_AnnotationSkipInit guards the hub-bypass contract. +// Spec: specs/single-source-context-anchor.md. +func TestHubStepdown_AnnotationSkipInit(t *testing.T) { + c := Cmd() + if got, ok := c.Annotations[cli.AnnotationSkipInit]; !ok { + t.Errorf("hub stepdown: missing AnnotationSkipInit annotation") + } else if got != cli.AnnotationTrue { + t.Errorf("hub stepdown: AnnotationSkipInit = %q, want %q", got, cli.AnnotationTrue) + } +} diff --git a/internal/cli/hub/cmd/stop/cmd.go b/internal/cli/hub/cmd/stop/cmd.go index 8fc85380e..529b0be55 100644 --- a/internal/cli/hub/cmd/stop/cmd.go +++ b/internal/cli/hub/cmd/stop/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" "github.com/ActiveMemory/ctx/internal/cli/hub/core/server" + "github.com/ActiveMemory/ctx/internal/config/cli" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" "github.com/ActiveMemory/ctx/internal/config/embed/flag" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" @@ -35,6 +36,9 @@ func Cmd() *cobra.Command { Long: long, Example: desc.Example(cmd.DescKeyHubStop), Args: cobra.NoArgs, + // Hub stores at ~/.ctx/hub-data/, not .context/. + // Spec: specs/single-source-context-anchor.md. + Annotations: map[string]string{cli.AnnotationSkipInit: cli.AnnotationTrue}, RunE: func(cobraCmd *cobra.Command, _ []string) error { return server.Stop(cobraCmd, dataDir) }, diff --git a/internal/cli/hub/cmd/stop/cmd_test.go b/internal/cli/hub/cmd/stop/cmd_test.go new file mode 100644 index 000000000..bf9905608 --- /dev/null +++ b/internal/cli/hub/cmd/stop/cmd_test.go @@ -0,0 +1,24 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package stop + +import ( + "testing" + + "github.com/ActiveMemory/ctx/internal/config/cli" +) + +// TestHubStop_AnnotationSkipInit guards the hub-bypass contract. +// Spec: specs/single-source-context-anchor.md. +func TestHubStop_AnnotationSkipInit(t *testing.T) { + c := Cmd() + if got, ok := c.Annotations[cli.AnnotationSkipInit]; !ok { + t.Errorf("hub stop: missing AnnotationSkipInit annotation") + } else if got != cli.AnnotationTrue { + t.Errorf("hub stop: AnnotationSkipInit = %q, want %q", got, cli.AnnotationTrue) + } +} diff --git a/internal/cli/initialize/cmd/root/run.go b/internal/cli/initialize/cmd/root/run.go index f4eaf5d5c..9edced4d3 100644 --- a/internal/cli/initialize/cmd/root/run.go +++ b/internal/cli/initialize/cmd/root/run.go @@ -8,6 +8,7 @@ package root import ( "bufio" + "errors" "os" "path/filepath" "strings" @@ -35,6 +36,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/config/sync" "github.com/ActiveMemory/ctx/internal/config/token" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" errFs "github.com/ActiveMemory/ctx/internal/err/fs" errPrompt "github.com/ActiveMemory/ctx/internal/err/prompt" ctxIo "github.com/ActiveMemory/ctx/internal/io" @@ -47,6 +49,21 @@ import ( // Creates a .context/ directory with template files. Handles existing // directories, minimal mode, and CLAUDE.md merge operations. // +// Under the single-source-anchor resolution model +// (spec: specs/single-source-context-anchor.md), init is exempt from +// the require-context-dir gate. It resolves the target in priority +// order: +// +// 1. CTX_DIR env var (read by rc.ContextDir). +// 2. Fall back to `/.context/` and create it there. +// +// The basename guard does not apply at init time because init +// *creates* the canonical-named directory. +// +// After materializing the directory, init prints the shell activation +// hint via InfoActivateHint so the user's next ctx call in a new +// process finds the right CTX_DIR. +// // Parameters: // - cmd: Cobra command for output and input streams // - force: If true, overwrite existing files without prompting @@ -73,7 +90,25 @@ func Run( } } - contextDir := rc.ContextDir() + // Under the explicit-context-dir resolution model, rc.ContextDir() + // returns an error when neither --context-dir nor CTX_DIR is declared. + // `ctx init` is an exempt command: fall back to cwd/.context so a + // user running `ctx init` in a fresh project gets the expected + // behavior. Spec: specs/explicit-context-dir.md. The fallback is + // reserved for the not-declared case; propagate any other resolver + // failure (e.g. malformed .ctxrc) so operators see the real error + // rather than a silent redirection to the working directory. + contextDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + if !errors.Is(ctxErr, errCtx.ErrDirNotDeclared) { + return ctxErr + } + cwd, cwdErr := os.Getwd() + if cwdErr != nil { + return errFs.ReadInput(cwdErr) + } + contextDir = filepath.Join(cwd, dir.Context) + } // Check if .context/ already exists and is properly initialized. // A directory with only logs/ (created by hooks before init) is @@ -123,7 +158,8 @@ func Run( // for users who want a bare init with no starter // templates. if !noSteeringInit { - if steeringErr := steeringInit.Run(cmd); steeringErr != nil { + steeringErr := steeringInit.RunWithDir(cmd, contextDir) + if steeringErr != nil { // Non-fatal: the rest of init is more // important than the steering templates. label := desc.Text(text.DescKeyInitLabelSteering) @@ -231,11 +267,12 @@ func Run( initialize.InfoWarnNonFatal(cmd, file.FileGitignore, ignoreErr) } + initialize.InfoActivateHint(cmd, contextDir) initialize.InfoNextSteps(cmd) initialize.InfoWorkflowTips(cmd) // Save the quick-start reference to a project-root file. - coreProject.WriteGettingStarted(cmd) + coreProject.WriteGettingStarted(cmd, contextDir) // Post-script: stage-aware Claude Code setup guidance. // Never fatal, never an error; a friendly nudge diff --git a/internal/cli/initialize/core/pad/setup.go b/internal/cli/initialize/core/pad/setup.go index 6bd747e65..a1f0b04de 100644 --- a/internal/cli/initialize/core/pad/setup.go +++ b/internal/cli/initialize/core/pad/setup.go @@ -60,7 +60,10 @@ func setupPlaintext( func setupEncrypted( cmd *cobra.Command, contextDir string, ) error { - kPath := rc.KeyPath() + kPath, kpErr := rc.KeyPath() + if kpErr != nil { + return kpErr + } encPath := filepath.Join(contextDir, cfgPad.Enc) // Check if the key already exists (idempotent) diff --git a/internal/cli/initialize/core/project/getting_started.go b/internal/cli/initialize/core/project/getting_started.go index c9466b2a7..e110df751 100644 --- a/internal/cli/initialize/core/project/getting_started.go +++ b/internal/cli/initialize/core/project/getting_started.go @@ -7,6 +7,8 @@ package project import ( + "fmt" + "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/assets/read/desc" @@ -18,14 +20,31 @@ import ( "github.com/ActiveMemory/ctx/internal/write/initialize" ) -// WriteGettingStarted saves the next-steps and workflow-tips text to -// GETTING_STARTED.md in the project root. Best-effort: failures are -// non-fatal since the same content was already printed to stdout. +// WriteGettingStarted saves an anatomy preamble (what `.context/` +// is and how the project-root contract works), the activation hint, +// next-steps, and workflow-tips text to GETTING_STARTED.md in the +// project root. The file is the human's durable primer after +// running `ctx init`: the preamble names the contract so future +// readers know which directory rule is load-bearing; the activation +// hint comes next because every subsequent `ctx ` +// requires CTX_DIR to be declared. Best-effort: failures are +// non-fatal since the activation hint and next-steps were already +// printed to stdout. // // Parameters: -// - cmd: Cobra command for status output -func WriteGettingStarted(cmd *cobra.Command) { - content := desc.Text(text.DescKeyWriteInitNextStepsBlock) + +// - cmd: Cobra command for status output. +// - contextDir: Absolute path of the just-created .context/ +// directory, used in the activation hint. +func WriteGettingStarted(cmd *cobra.Command, contextDir string) { + activateHint := fmt.Sprintf( + desc.Text(text.DescKeyWriteInitActivateHint), + contextDir, + ) + content := desc.Text(text.DescKeyWriteInitAnatomyPreamble) + + token.NewlineLF + + activateHint + + token.NewlineLF + + desc.Text(text.DescKeyWriteInitNextStepsBlock) + token.NewlineLF + desc.Text(text.DescKeyWriteInitWorkflowTips) + token.NewlineLF diff --git a/internal/cli/initialize/init_test.go b/internal/cli/initialize/init_test.go index 362729699..1c1970550 100644 --- a/internal/cli/initialize/init_test.go +++ b/internal/cli/initialize/init_test.go @@ -32,6 +32,9 @@ func TestInitCommand(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() cmd.SetArgs([]string{}) @@ -76,6 +79,7 @@ func TestInitCreatesSteeringHooksSkillsDirs(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() @@ -110,6 +114,7 @@ func TestInitSkipsExistingSteeringHooksSkillsDirs(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) // Pre-create the directories with a marker file inside each. @@ -156,6 +161,9 @@ func TestInitMergeInsertsAfterH1(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) existingContent := "# My Amazing Project\n\n" + "This is the project description.\n\n" + @@ -207,6 +215,9 @@ func TestInitMergeInsertsAtTopWhenNoH1(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) existingContent := "## Build Instructions\n\nRun make build.\n\n" + "## Testing\n\nRun make test.\n" @@ -254,6 +265,9 @@ func TestInitCreatesPermissions(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() cmd.SetArgs([]string{}) @@ -310,6 +324,9 @@ func TestInitMergesPermissions(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) if err = os.MkdirAll(".claude", 0750); err != nil { t.Fatalf("failed to create .claude: %v", err) @@ -378,6 +395,9 @@ func TestInitWithExistingClaudeMdWithCtxMarker(t *testing.T) { t.Fatalf("failed to chdir: %v", err) } defer func() { _ = os.Chdir(origDir) }() + t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) + t.Setenv(env.SkipPathCheck, env.True) existingContent := "# My Project\n\n" + "This is my existing CLAUDE.md content.\n\n" + @@ -441,6 +461,7 @@ func TestRunInit_Minimal(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() @@ -475,6 +496,7 @@ func TestRunInit_Force(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() @@ -507,6 +529,7 @@ func TestRunInit_Merge(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) mdContent := "# My Project\n\nExisting.\n" @@ -543,6 +566,7 @@ func TestInitScaffoldsFoundationSteeringFiles(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() @@ -589,6 +613,7 @@ func TestInitNoSteeringInitFlagSkipsScaffold(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() t.Setenv("HOME", tmpDir) + t.Setenv(env.CtxDir, filepath.Join(tmpDir, ".context")) t.Setenv(env.SkipPathCheck, env.True) cmd := Cmd() diff --git a/internal/cli/journal/cmd/importer/run.go b/internal/cli/journal/cmd/importer/run.go index 6e21c0c5f..36e8459ed 100644 --- a/internal/cli/journal/cmd/importer/run.go +++ b/internal/cli/journal/cmd/importer/run.go @@ -98,7 +98,12 @@ func Run(cmd *cobra.Command, args []string, opts entity.ImportOpts) error { } // 4. Ensure journal directory exists. - journalDir := filepath.Join(rc.ContextDir(), dir.Journal) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + journalDir := filepath.Join(ctxDir, dir.Journal) if mkErr := ctxIo.SafeMkdirAll(journalDir, fs.PermExec); mkErr != nil { return errFs.Mkdir(dir.Journal, mkErr) } diff --git a/internal/cli/journal/cmd/obsidian/cmd.go b/internal/cli/journal/cmd/obsidian/cmd.go index f08eab6c7..cd9f4979d 100644 --- a/internal/cli/journal/cmd/obsidian/cmd.go +++ b/internal/cli/journal/cmd/obsidian/cmd.go @@ -7,21 +7,26 @@ package obsidian import ( - "path/filepath" - "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/assets/read/desc" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" "github.com/ActiveMemory/ctx/internal/config/embed/flag" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" - "github.com/ActiveMemory/ctx/internal/config/obsidian" "github.com/ActiveMemory/ctx/internal/flagbind" - "github.com/ActiveMemory/ctx/internal/rc" ) // Cmd returns the journal obsidian subcommand. // +// The --output default is resolved inside [Run] against the +// declared context directory. Computing it at construction time +// would require rc.ContextDir() to succeed before cobra has +// parsed the flags, which is too early under the +// explicit-context-dir model. Leaving the default empty and +// resolving lazily keeps the failure path clean: a missing +// context directory surfaces as a single actionable error from +// Run, not a silently-empty flag default. +// // Returns: // - *cobra.Command: Command for generating an Obsidian vault from journal // entries @@ -39,12 +44,9 @@ func Cmd() *cobra.Command { }, } - defaultOutput := filepath.Join( - rc.ContextDir(), obsidian.DirName, - ) flagbind.StringFlagPDefault( c, &output, - cFlag.Output, cFlag.ShortOutput, defaultOutput, + cFlag.Output, cFlag.ShortOutput, "", flag.DescKeyJournalObsidianOutput, ) diff --git a/internal/cli/journal/cmd/obsidian/run.go b/internal/cli/journal/cmd/obsidian/run.go index f72b022c9..66c6f13aa 100644 --- a/internal/cli/journal/cmd/obsidian/run.go +++ b/internal/cli/journal/cmd/obsidian/run.go @@ -13,6 +13,7 @@ import ( coreObsidian "github.com/ActiveMemory/ctx/internal/cli/journal/core/obsidian" "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/obsidian" "github.com/ActiveMemory/ctx/internal/rc" ) @@ -20,12 +21,21 @@ import ( // // Parameters: // - cmd: Cobra command for output stream -// - output: Output directory for the vault +// - output: Output directory for the vault; when empty, defaults +// to / // // Returns: // - error: Non-nil if generation fails func Run(cmd *cobra.Command, output string) error { + ctxDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } + if output == "" { + output = filepath.Join(ctxDir, obsidian.DirName) + } return coreObsidian.BuildVault( - cmd, filepath.Join(rc.ContextDir(), dir.Journal), output, + cmd, filepath.Join(ctxDir, dir.Journal), output, ) } diff --git a/internal/cli/journal/cmd/schema/check/run.go b/internal/cli/journal/cmd/schema/check/run.go index 8a83b684a..f21e022b0 100644 --- a/internal/cli/journal/cmd/schema/check/run.go +++ b/internal/cli/journal/cmd/schema/check/run.go @@ -15,6 +15,7 @@ import ( errSchema "github.com/ActiveMemory/ctx/internal/err/schema" "github.com/ActiveMemory/ctx/internal/journal/schema" ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" + "github.com/ActiveMemory/ctx/internal/rc" writeSchema "github.com/ActiveMemory/ctx/internal/write/schema" ) @@ -27,6 +28,10 @@ import ( // Returns: // - error: non-nil when drift is detected or scan fails func Run(cmd *cobra.Command, opts coreSchema.CheckOpts) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } c, checkErr := coreSchema.Check(opts) if checkErr != nil { return checkErr diff --git a/internal/cli/journal/cmd/site/cmd.go b/internal/cli/journal/cmd/site/cmd.go index d5a81878f..045421334 100644 --- a/internal/cli/journal/cmd/site/cmd.go +++ b/internal/cli/journal/cmd/site/cmd.go @@ -7,21 +7,26 @@ package site import ( - "path/filepath" - "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" "github.com/ActiveMemory/ctx/internal/config/embed/flag" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" "github.com/ActiveMemory/ctx/internal/flagbind" - "github.com/ActiveMemory/ctx/internal/rc" ) // Cmd returns the journal site subcommand. // +// The --output default is resolved inside [Run] against the +// declared context directory. Computing it at construction time +// would require rc.ContextDir() to succeed before cobra has +// parsed the flags, which is too early under the +// explicit-context-dir model. Leaving the default empty and +// resolving lazily keeps the failure path clean: a missing +// context directory surfaces as a single actionable error from +// Run, not a silently-empty flag default. +// // Returns: // - *cobra.Command: Command for generating a static site from journal entries func Cmd() *cobra.Command { @@ -42,10 +47,9 @@ func Cmd() *cobra.Command { }, } - defaultOutput := filepath.Join(rc.ContextDir(), dir.JournalSite) flagbind.StringFlagPDefault( c, &output, cFlag.Output, cFlag.ShortOutput, - defaultOutput, flag.DescKeyJournalSiteOutput, + "", flag.DescKeyJournalSiteOutput, ) flagbind.BoolFlag(c, &build, cFlag.Build, flag.DescKeyJournalSiteBuild) flagbind.BoolFlag(c, &serve, cFlag.Serve, flag.DescKeyJournalSiteServe) diff --git a/internal/cli/journal/cmd/site/run.go b/internal/cli/journal/cmd/site/run.go index a92c2998e..6c852768f 100644 --- a/internal/cli/journal/cmd/site/run.go +++ b/internal/cli/journal/cmd/site/run.go @@ -54,7 +54,15 @@ import ( func Run( cmd *cobra.Command, output string, build, serve bool, ) error { - journalDir := filepath.Join(rc.ContextDir(), dir.Journal) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + if output == "" { + output = filepath.Join(ctxDir, dir.JournalSite) + } + journalDir := filepath.Join(ctxDir, dir.Journal) // Check if the journal directory exists if _, statErr := os.Stat(journalDir); os.IsNotExist(statErr) { diff --git a/internal/cli/journal/cmd/sync/run.go b/internal/cli/journal/cmd/sync/run.go index c3f80acf6..b326bd76a 100644 --- a/internal/cli/journal/cmd/sync/run.go +++ b/internal/cli/journal/cmd/sync/run.go @@ -29,7 +29,12 @@ import ( // Returns: // - error: Non-nil on I/O failure func Run(cmd *cobra.Command) error { - journalDir := filepath.Join(rc.ContextDir(), dir.Journal) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + journalDir := filepath.Join(ctxDir, dir.Journal) jstate, loadErr := state.Load(journalDir) if loadErr != nil { diff --git a/internal/cli/journal/core/lock/lock.go b/internal/cli/journal/core/lock/lock.go index 157ac69ea..02d81e5cf 100644 --- a/internal/cli/journal/core/lock/lock.go +++ b/internal/cli/journal/core/lock/lock.go @@ -258,7 +258,12 @@ func Run( return errSession.AllWithPattern() } - journalDir := filepath.Join(rc.ContextDir(), dir.Journal) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + journalDir := filepath.Join(ctxDir, dir.Journal) jState, loadErr := state.Load(journalDir) if loadErr != nil { diff --git a/internal/cli/journal/core/lock/sync_test.go b/internal/cli/journal/core/lock/sync_test.go index de4e1e5d9..8e317b522 100644 --- a/internal/cli/journal/core/lock/sync_test.go +++ b/internal/cli/journal/core/lock/sync_test.go @@ -15,6 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/journal" "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/journal/state" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestRunSync_LocksFromFrontmatter(t *testing.T) { @@ -41,6 +42,8 @@ func TestRunSync_LocksFromFrontmatter(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -98,6 +101,8 @@ func TestRunSync_UnlocksFromFrontmatter(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -154,6 +159,8 @@ func TestRunSync_NoChanges(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -182,6 +189,8 @@ func TestRunSync_EmptyDir(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -247,6 +256,8 @@ func TestRunSync_MixedFiles(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) diff --git a/internal/cli/journal/core/lock/unlock_test.go b/internal/cli/journal/core/lock/unlock_test.go index e5e3bdbf0..19b43f208 100644 --- a/internal/cli/journal/core/lock/unlock_test.go +++ b/internal/cli/journal/core/lock/unlock_test.go @@ -16,6 +16,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/config/session" "github.com/ActiveMemory/ctx/internal/journal/state" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestRunLockUnlock_LockSingle(t *testing.T) { @@ -41,6 +42,8 @@ func TestRunLockUnlock_LockSingle(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + // Lock via CLI. cmd := journal.Cmd() buf := new(strings.Builder) @@ -103,6 +106,8 @@ func TestRunLockUnlock_UnlockSingle(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -154,6 +159,8 @@ func TestRunLockUnlock_LockAll(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -207,6 +214,8 @@ func TestRunLockUnlock_AlreadyLocked(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) @@ -275,6 +284,8 @@ func TestRunLockUnlock_LockMultipart(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, dir) + cmd := journal.Cmd() buf := new(strings.Builder) cmd.SetOut(buf) diff --git a/internal/cli/journal/core/schema/check.go b/internal/cli/journal/core/schema/check.go index 51f4d1eb6..96e4c3b81 100644 --- a/internal/cli/journal/core/schema/check.go +++ b/internal/cli/journal/core/schema/check.go @@ -219,19 +219,24 @@ func SortedBlockTypes( // Returns: // - error: non-nil if the report cannot be written func WriteReport(c *schema.Collector) error { - contextDir := rc.ContextDir() - if contextDir == "" { - return nil + contextDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr } reportsDir := filepath.Join(contextDir, dir.Reports) reportPath := filepath.Join(reportsDir, file.SchemaDrift) if !c.Drift() { - if _, statErr := os.Stat(reportPath); statErr == nil { + _, statErr := os.Stat(reportPath) + if statErr == nil { return os.Remove(reportPath) } - return nil + if os.IsNotExist(statErr) { + // No prior report on disk, nothing to clean up. + return nil + } + return statErr } mkErr := ctxIo.SafeMkdirAll(reportsDir, fs.PermExec) diff --git a/internal/cli/learning/cmd/reindex/run.go b/internal/cli/learning/cmd/reindex/run.go index d74b09c4f..717ed5303 100644 --- a/internal/cli/learning/cmd/reindex/run.go +++ b/internal/cli/learning/cmd/reindex/run.go @@ -26,7 +26,12 @@ import ( // Returns: // - error: Non-nil if the file read/write fails func Run(cmd *cobra.Command, _ []string) error { - filePath := filepath.Join(rc.ContextDir(), ctx.Learning) + ctxDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } + filePath := filepath.Join(ctxDir, ctx.Learning) return index.Reindex( cmd.OutOrStdout(), filePath, diff --git a/internal/cli/learning/learning_test.go b/internal/cli/learning/learning_test.go index 2e0e8bac4..652bba7c4 100644 --- a/internal/cli/learning/learning_test.go +++ b/internal/cli/learning/learning_test.go @@ -14,6 +14,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestCmd(t *testing.T) { @@ -64,7 +65,7 @@ func TestRunReindex_NoFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() cmd := Cmd() @@ -82,7 +83,7 @@ func TestRunReindex_WithFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() // Create the context directory and LEARNINGS.md file @@ -124,7 +125,7 @@ func TestRunReindex_EmptyFile(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() // Create the context directory and empty LEARNINGS.md diff --git a/internal/cli/load/cmd/root/run.go b/internal/cli/load/cmd/root/run.go index 0358c3a33..3b6ecec64 100644 --- a/internal/cli/load/cmd/root/run.go +++ b/internal/cli/load/cmd/root/run.go @@ -16,6 +16,7 @@ import ( "github.com/ActiveMemory/ctx/internal/context/load" errCtx "github.com/ActiveMemory/ctx/internal/err/context" errInit "github.com/ActiveMemory/ctx/internal/err/initialize" + "github.com/ActiveMemory/ctx/internal/rc" writeLoad "github.com/ActiveMemory/ctx/internal/write/load" ) @@ -32,6 +33,10 @@ import ( // Returns: // - error: Non-nil if context loading fails or .context/ is not found func Run(cmd *cobra.Command, budget int, raw bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } ctx, err := load.Do("") if err != nil { if _, ok := errors.AsType[*errCtx.NotFoundError](err); ok { diff --git a/internal/cli/load/load_test.go b/internal/cli/load/load_test.go index 0880cebc2..597214844 100644 --- a/internal/cli/load/load_test.go +++ b/internal/cli/load/load_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestLoadCommand tests the load command. @@ -27,6 +28,8 @@ func TestLoadCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -57,6 +60,8 @@ func TestLoadRawOutput(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/mcp/cmd/root/cmd.go b/internal/cli/mcp/cmd/root/cmd.go index 2a88f75fb..5ac910b29 100644 --- a/internal/cli/mcp/cmd/root/cmd.go +++ b/internal/cli/mcp/cmd/root/cmd.go @@ -22,6 +22,11 @@ import ( // Returns: // - error: Non-nil if the server fails to start or encounters an I/O error func Cmd(cmd *cobra.Command, _ []string) error { - srv := internalMcp.New(rc.ContextDir(), cmd.Root().Version) + ctxDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } + srv := internalMcp.New(ctxDir, cmd.Root().Version) return srv.Serve() } diff --git a/internal/cli/mcp/cmd/root/cmd_test.go b/internal/cli/mcp/cmd/root/cmd_test.go new file mode 100644 index 000000000..1c7e698dd --- /dev/null +++ b/internal/cli/mcp/cmd/root/cmd_test.go @@ -0,0 +1,35 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package root + +import ( + "testing" + + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/config/env" + "github.com/ActiveMemory/ctx/internal/rc" +) + +// TestMcpServe_FailsClosedOnUnsetCTXDIR is the regression guard +// required by spec/single-source-context-anchor.md. The MCP serve +// path must route through rc.RequireContextDir; with CTX_DIR +// unset, the cobra Run should return an error rather than starting +// a server bound to an empty path. +func TestMcpServe_FailsClosedOnUnsetCTXDIR(t *testing.T) { + t.Setenv(env.CtxDir, "") + rc.Reset() + t.Cleanup(rc.Reset) + + c := &cobra.Command{Use: "serve"} + c.SetArgs(nil) + + err := Cmd(c, nil) + if err == nil { + t.Fatal("Cmd() err = nil, want non-nil when CTX_DIR is unset") + } +} diff --git a/internal/cli/memory/cmd/diff/run.go b/internal/cli/memory/cmd/diff/run.go index 40816579b..32261b132 100644 --- a/internal/cli/memory/cmd/diff/run.go +++ b/internal/cli/memory/cmd/diff/run.go @@ -7,13 +7,11 @@ package diff import ( - "path/filepath" - "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" errMemory "github.com/ActiveMemory/ctx/internal/err/memory" mem "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/memory" ) @@ -26,8 +24,10 @@ import ( // Returns: // - error: on discovery or diff failure. func Run(cmd *cobra.Command) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) + contextDir, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + return err + } sourcePath, discoverErr := mem.DiscoverPath(projectRoot) if discoverErr != nil { diff --git a/internal/cli/memory/cmd/importer/run.go b/internal/cli/memory/cmd/importer/run.go index eb5a11784..016f8cb33 100644 --- a/internal/cli/memory/cmd/importer/run.go +++ b/internal/cli/memory/cmd/importer/run.go @@ -7,22 +7,17 @@ package importer import ( - "path/filepath" - "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" "github.com/ActiveMemory/ctx/internal/config/entry" cfgFmt "github.com/ActiveMemory/ctx/internal/config/format" cfgMemory "github.com/ActiveMemory/ctx/internal/config/memory" "github.com/ActiveMemory/ctx/internal/entity" - errMemory "github.com/ActiveMemory/ctx/internal/err/memory" errState "github.com/ActiveMemory/ctx/internal/err/state" "github.com/ActiveMemory/ctx/internal/format" - "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/ctximport" - "github.com/ActiveMemory/ctx/internal/write/sync" ) // Run parses MEMORY.md entries, classifies them by heuristic keyword @@ -36,20 +31,18 @@ import ( // Returns: // - error: on discovery, read, state, or promotion failure. func Run(cmd *cobra.Command, dryRun bool) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) + contextDir, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + return err + } - sourcePath, discoverErr := memory.DiscoverPath(projectRoot) + sourcePath, discoverErr := resolve.DiscoverSource(cmd, projectRoot) if discoverErr != nil { - sync.ErrAutoMemoryNotActive(cmd, discoverErr) - return errMemory.NotFound() + return discoverErr } - - sourceData, readErr := io.SafeReadFile( - filepath.Dir(sourcePath), filepath.Base(sourcePath), - ) + sourceData, readErr := resolve.ReadSource(sourcePath) if readErr != nil { - return errMemory.Read(readErr) + return readErr } entries := memory.Entries(string(sourceData)) diff --git a/internal/cli/memory/cmd/publish/run.go b/internal/cli/memory/cmd/publish/run.go index 1749f7fb6..2a107e3c1 100644 --- a/internal/cli/memory/cmd/publish/run.go +++ b/internal/cli/memory/cmd/publish/run.go @@ -7,15 +7,12 @@ package publish import ( - "path/filepath" - "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" errMemory "github.com/ActiveMemory/ctx/internal/err/memory" mem "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/publish" - "github.com/ActiveMemory/ctx/internal/write/sync" ) // Run selects the high-value context, formats it, and writes a marked block @@ -29,13 +26,14 @@ import ( // Returns: // - error: on discovery, selection, or publish failure. func Run(cmd *cobra.Command, budget int, dryRun bool) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) + contextDir, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + return err + } - memoryPath, discoverErr := mem.DiscoverPath(projectRoot) + memoryPath, discoverErr := resolve.DiscoverSource(cmd, projectRoot) if discoverErr != nil { - sync.ErrAutoMemoryNotActive(cmd, discoverErr) - return errMemory.NotFound() + return discoverErr } result, selectErr := mem.SelectContent(contextDir, budget) diff --git a/internal/cli/memory/cmd/status/run.go b/internal/cli/memory/cmd/status/run.go index acb61a4e5..9f0a85796 100644 --- a/internal/cli/memory/cmd/status/run.go +++ b/internal/cli/memory/cmd/status/run.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/cli/memory/core/count" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/memory" cfgTime "github.com/ActiveMemory/ctx/internal/config/time" @@ -21,7 +22,6 @@ import ( "github.com/ActiveMemory/ctx/internal/format" "github.com/ActiveMemory/ctx/internal/io" mem "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" writeMem "github.com/ActiveMemory/ctx/internal/write/memory" ) @@ -34,8 +34,10 @@ import ( // Returns: // - error: on discovery failure. func Run(cmd *cobra.Command) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) + contextDir, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + return err + } sourcePath, discoverErr := mem.DiscoverPath(projectRoot) if discoverErr != nil { diff --git a/internal/cli/memory/cmd/sync/run.go b/internal/cli/memory/cmd/sync/run.go index 29ffd2773..739f68f6a 100644 --- a/internal/cli/memory/cmd/sync/run.go +++ b/internal/cli/memory/cmd/sync/run.go @@ -11,11 +11,11 @@ import ( "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" cfgMem "github.com/ActiveMemory/ctx/internal/config/memory" errMem "github.com/ActiveMemory/ctx/internal/err/memory" errState "github.com/ActiveMemory/ctx/internal/err/state" "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/sync" ) @@ -30,13 +30,14 @@ import ( // Returns: // - error: on discovery failure, sync failure, or state persistence failure. func Run(cmd *cobra.Command, dryRun bool) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) + contextDir, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + return err + } - sourcePath, discoverErr := memory.DiscoverPath(projectRoot) + sourcePath, discoverErr := resolve.DiscoverSource(cmd, projectRoot) if discoverErr != nil { - sync.ErrAutoMemoryNotActive(cmd, discoverErr) - return errMem.NotFound() + return discoverErr } if dryRun { diff --git a/internal/cli/memory/cmd/unpublish/run.go b/internal/cli/memory/cmd/unpublish/run.go index 10b53444d..e348ac132 100644 --- a/internal/cli/memory/cmd/unpublish/run.go +++ b/internal/cli/memory/cmd/unpublish/run.go @@ -7,18 +7,15 @@ package unpublish import ( - "path/filepath" - "github.com/spf13/cobra" + "github.com/ActiveMemory/ctx/internal/cli/memory/core/resolve" "github.com/ActiveMemory/ctx/internal/config/fs" cfgMem "github.com/ActiveMemory/ctx/internal/config/memory" ctxErr "github.com/ActiveMemory/ctx/internal/err/memory" "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/publish" - "github.com/ActiveMemory/ctx/internal/write/sync" ) // Run removes the ctx-managed marker block from MEMORY.md, @@ -30,20 +27,18 @@ import ( // Returns: // - error: on discovery, read, or write failure. func Run(cmd *cobra.Command) error { - contextDir := rc.ContextDir() - projectRoot := filepath.Dir(contextDir) - - memoryPath, discoverErr := memory.DiscoverPath(projectRoot) + _, projectRoot, err := resolve.ContextAndRoot(cmd) + if err != nil { + cmd.SilenceUsage = true + return err + } + memoryPath, discoverErr := resolve.DiscoverSource(cmd, projectRoot) if discoverErr != nil { - sync.ErrAutoMemoryNotActive(cmd, discoverErr) - return ctxErr.NotFound() + return discoverErr } - - data, readErr := io.SafeReadFile( - filepath.Dir(memoryPath), filepath.Base(memoryPath), - ) + data, readErr := resolve.ReadSource(memoryPath) if readErr != nil { - return ctxErr.Read(readErr) + return readErr } cleaned, found := memory.RemovePublished(string(data)) diff --git a/internal/cli/memory/core/resolve/doc.go b/internal/cli/memory/core/resolve/doc.go new file mode 100644 index 000000000..7fc319e02 --- /dev/null +++ b/internal/cli/memory/core/resolve/doc.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package resolve centralizes path resolution shared by every +// memory-bridge subcommand. Each subcommand needs the declared +// context directory (for the .context/memory/ mirror) and its +// parent (the project root, where MEMORY.md lives). +// +// Before this package existed, every memory Run function repeated +// the rc.RequireContextDir + filepath.Dir + cobra.SilenceUsage +// sequence verbatim. Collapsing those three lines into a single +// ContextAndRoot call makes the Run functions read like the task +// they perform, not like the setup scaffolding every Run shares. +// +// The package does not cover memory.DiscoverPath: each caller +// handles its discovery-failure case differently (some emit +// StatusNotActive output, some a tailored NotFound error), so that +// step stays inline where the differences live. +package resolve diff --git a/internal/cli/memory/core/resolve/resolve.go b/internal/cli/memory/core/resolve/resolve.go new file mode 100644 index 000000000..d429a4f67 --- /dev/null +++ b/internal/cli/memory/core/resolve/resolve.go @@ -0,0 +1,91 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package resolve + +import ( + "path/filepath" + + "github.com/spf13/cobra" + + errMemory "github.com/ActiveMemory/ctx/internal/err/memory" + "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/memory" + "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/write/sync" +) + +// ContextAndRoot resolves the context directory and its parent +// (project root) for a memory subcommand Run. +// +// Silences cobra's usage dump on error: a missing CTX_DIR is a +// declaration problem, not a misuse of the command. Callers return +// the error unchanged so the standard tailored message from +// rc.RequireContextDir reaches the user. +// +// Parameters: +// - cmd: the cobra command being run (used only for SilenceUsage). +// +// Returns: +// - string: absolute path to the declared context directory. +// - string: project root (filepath.Dir of the context directory), +// where MEMORY.md is expected to live. +// - error: non-nil when the context directory is not declared. +func ContextAndRoot(cmd *cobra.Command) (string, string, error) { + contextDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return "", "", err + } + return contextDir, filepath.Dir(contextDir), nil +} + +// DiscoverSource runs memory.DiscoverPath and applies the standard +// "auto memory not active" treatment: surface the helper notice to +// the Cobra command's output and return errMemory.NotFound. This is +// the shape four of the six memory subcommands (importer, publish, +// sync, unpublish) share. The diff and status commands want a +// different discovery-failure message and keep their handling +// inline. +// +// Parameters: +// - cmd: the cobra command being run (passed through to the +// sync.ErrAutoMemoryNotActive helper for user-facing output). +// - projectRoot: project root previously resolved via +// [ContextAndRoot]. +// +// Returns: +// - string: absolute path to the MEMORY.md source file when +// discovered successfully. +// - error: errMemory.NotFound when DiscoverPath fails; nil on +// success. +func DiscoverSource(cmd *cobra.Command, projectRoot string) (string, error) { + sourcePath, err := memory.DiscoverPath(projectRoot) + if err != nil { + sync.ErrAutoMemoryNotActive(cmd, err) + return "", errMemory.NotFound() + } + return sourcePath, nil +} + +// ReadSource reads the MEMORY.md file at the given path, splitting +// it into the directory + base filename that io.SafeReadFile wants. +// The helper wraps read failures in errMemory.Read so callers get a +// consistent user-facing error message. +// +// Parameters: +// - path: absolute path to the MEMORY.md source file. +// +// Returns: +// - []byte: file contents on success. +// - error: errMemory.Read wrapping the underlying io error. +func ReadSource(path string) ([]byte, error) { + data, err := io.SafeReadFile(filepath.Dir(path), filepath.Base(path)) + if err != nil { + return nil, errMemory.Read(err) + } + return data, nil +} diff --git a/internal/cli/message/cmd/edit/run.go b/internal/cli/message/cmd/edit/run.go index 1391ff01d..986e8a884 100644 --- a/internal/cli/message/cmd/edit/run.go +++ b/internal/cli/message/cmd/edit/run.go @@ -20,6 +20,7 @@ import ( "github.com/ActiveMemory/ctx/internal/err/fs" errTrigger "github.com/ActiveMemory/ctx/internal/err/trigger" ctxIo "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/rc" writeMessage "github.com/ActiveMemory/ctx/internal/write/message" ) @@ -34,12 +35,19 @@ import ( // - error: Non-nil if the hook/variant is unknown, override exists, // or file operations fail func Run(cmd *cobra.Command, hk, variant string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } info := messages.Lookup(hk, variant) if info == nil { return errTrigger.Validate(messages.Variants(hk) != nil, hk, variant) } - oPath := message.OverridePath(hk, variant) + oPath, pathErr := message.OverridePath(hk, variant) + if pathErr != nil { + return pathErr + } if _, statErr := os.Stat(oPath); statErr == nil { return errTrigger.OverrideExists(oPath, hk, variant) diff --git a/internal/cli/message/cmd/list/run.go b/internal/cli/message/cmd/list/run.go index f388830ce..44fa8c4e6 100644 --- a/internal/cli/message/cmd/list/run.go +++ b/internal/cli/message/cmd/list/run.go @@ -16,6 +16,7 @@ import ( cFlag "github.com/ActiveMemory/ctx/internal/config/flag" "github.com/ActiveMemory/ctx/internal/config/token" "github.com/ActiveMemory/ctx/internal/entity" + "github.com/ActiveMemory/ctx/internal/rc" writeMessage "github.com/ActiveMemory/ctx/internal/write/message" ) @@ -27,17 +28,25 @@ import ( // Returns: // - error: Non-nil on JSON encoding failure func Run(cmd *cobra.Command) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } registry := messages.Registry() entries := make([]entity.MessageListEntry, 0, len(registry)) for _, info := range registry { + hasOverride, overrideErr := message.HasOverride(info.Hook, info.Variant) + if overrideErr != nil { + return overrideErr + } entry := entity.MessageListEntry{ Hook: info.Hook, Variant: info.Variant, Category: info.Category, Description: info.Description, TemplateVars: info.TemplateVars, - HasOverride: message.HasOverride(info.Hook, info.Variant), + HasOverride: hasOverride, } if entry.TemplateVars == nil { entry.TemplateVars = []string{} diff --git a/internal/cli/message/cmd/reset/run.go b/internal/cli/message/cmd/reset/run.go index 5df198463..3c1621eab 100644 --- a/internal/cli/message/cmd/reset/run.go +++ b/internal/cli/message/cmd/reset/run.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/warn" errTrigger "github.com/ActiveMemory/ctx/internal/err/trigger" ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" + "github.com/ActiveMemory/ctx/internal/rc" writeMessage "github.com/ActiveMemory/ctx/internal/write/message" ) @@ -30,12 +31,19 @@ import ( // Returns: // - error: Non-nil if the hook/variant is unknown or removal fails func Run(cmd *cobra.Command, hk, variant string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } info := messages.Lookup(hk, variant) if info == nil { return errTrigger.Validate(messages.Variants(hk) != nil, hk, variant) } - oPath := message.OverridePath(hk, variant) + oPath, pathErr := message.OverridePath(hk, variant) + if pathErr != nil { + return pathErr + } if removeErr := os.Remove(oPath); removeErr != nil { if os.IsNotExist(removeErr) { diff --git a/internal/cli/message/cmd/show/run.go b/internal/cli/message/cmd/show/run.go index 21c3656ec..41def8d83 100644 --- a/internal/cli/message/cmd/show/run.go +++ b/internal/cli/message/cmd/show/run.go @@ -15,6 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/file" errTrigger "github.com/ActiveMemory/ctx/internal/err/trigger" "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/rc" writeMessage "github.com/ActiveMemory/ctx/internal/write/message" ) @@ -28,12 +29,19 @@ import ( // Returns: // - error: Non-nil if the hook/variant is unknown or template is missing func Run(cmd *cobra.Command, hk, variant string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } info := messages.Lookup(hk, variant) if info == nil { return errTrigger.Validate(messages.Variants(hk) != nil, hk, variant) } - oPath := message.OverridePath(hk, variant) + oPath, pathErr := message.OverridePath(hk, variant) + if pathErr != nil { + return pathErr + } if data, readErr := io.SafeReadUserFile(oPath); readErr == nil { writeMessage.SourceOverride(cmd, oPath) writeMessage.TemplateVars(cmd, message.FormatTemplateVars(info)) diff --git a/internal/cli/notify/cmd/setup/run.go b/internal/cli/notify/cmd/setup/run.go index 55a035ad5..f9edff172 100644 --- a/internal/cli/notify/cmd/setup/run.go +++ b/internal/cli/notify/cmd/setup/run.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/err/fs" errNotify "github.com/ActiveMemory/ctx/internal/err/notify" iNotify "github.com/ActiveMemory/ctx/internal/notify" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/notify" ) @@ -31,6 +32,10 @@ import ( // Returns: // - error: Non-nil on empty input or save failure func Run(cmd *cobra.Command, stdin *os.File) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } notify.SetupPrompt(cmd) scanner := bufio.NewScanner(stdin) diff --git a/internal/cli/notify/cmd/test/run.go b/internal/cli/notify/cmd/test/run.go index 447520e23..b0c678bb5 100644 --- a/internal/cli/notify/cmd/test/run.go +++ b/internal/cli/notify/cmd/test/run.go @@ -11,6 +11,7 @@ import ( coreTest "github.com/ActiveMemory/ctx/internal/cli/notify/core/test" "github.com/ActiveMemory/ctx/internal/config/crypto" + "github.com/ActiveMemory/ctx/internal/rc" writeNotify "github.com/ActiveMemory/ctx/internal/write/notify" ) @@ -22,6 +23,10 @@ import ( // Returns: // - error: Non-nil on webhook load or HTTP failure func Run(cmd *cobra.Command) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } r, sendErr := coreTest.Send() if sendErr != nil { return sendErr diff --git a/internal/cli/notify/notify_test.go b/internal/cli/notify/notify_test.go index 0a78924ae..2b35ffa82 100644 --- a/internal/cli/notify/notify_test.go +++ b/internal/cli/notify/notify_test.go @@ -26,12 +26,15 @@ func setupCLITest(t *testing.T) (string, func()) { tempDir := t.TempDir() origDir, _ := os.Getwd() _ = os.Chdir(tempDir) - _ = os.MkdirAll(filepath.Join(tempDir, ".context"), 0o750) + ctxPath := filepath.Join(tempDir, ".context") + _ = os.MkdirAll(ctxPath, 0o750) // Create required files so isInitialized returns true for _, f := range ctx.FilesRequired { - p := filepath.Join(tempDir, ".context", f) + p := filepath.Join(ctxPath, f) _ = os.WriteFile(p, []byte("# "+f+"\n"), 0o600) } + // Declare context dir explicitly (explicit-context-dir model). + t.Setenv("CTX_DIR", ctxPath) rc.Reset() return tempDir, func() { _ = os.Chdir(origDir) diff --git a/internal/cli/pad/cmd/add/run.go b/internal/cli/pad/cmd/add/run.go index 9eb83c806..bab2fc8cf 100644 --- a/internal/cli/pad/cmd/add/run.go +++ b/internal/cli/pad/cmd/add/run.go @@ -12,6 +12,7 @@ import ( coreAdd "github.com/ActiveMemory/ctx/internal/cli/pad/core/add" "github.com/ActiveMemory/ctx/internal/cli/pad/core/parse" "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" + "github.com/ActiveMemory/ctx/internal/rc" writePad "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -28,6 +29,10 @@ import ( // Returns: // - error: Non-nil on read/write failure or too large func Run(cmd *cobra.Command, text, filePath string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } var entries []parse.Entry var id int var addErr error diff --git a/internal/cli/pad/cmd/edit/run.go b/internal/cli/pad/cmd/edit/run.go index 81d00c0a1..b465bb3bb 100644 --- a/internal/cli/pad/cmd/edit/run.go +++ b/internal/cli/pad/cmd/edit/run.go @@ -11,6 +11,7 @@ import ( coreEdit "github.com/ActiveMemory/ctx/internal/cli/pad/core/edit" "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" + "github.com/ActiveMemory/ctx/internal/rc" writePad "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -24,6 +25,10 @@ import ( // - error: Non-nil on invalid index, type mismatch, // or read/write failure func Run(cmd *cobra.Command, opts coreEdit.Opts) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } var entries []string var editErr error diff --git a/internal/cli/pad/cmd/export/run.go b/internal/cli/pad/cmd/export/run.go index 45d345b8b..c3a04aef6 100644 --- a/internal/cli/pad/cmd/export/run.go +++ b/internal/cli/pad/cmd/export/run.go @@ -13,6 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/fs" errFs "github.com/ActiveMemory/ctx/internal/err/fs" ctxIo "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/rc" writeExport "github.com/ActiveMemory/ctx/internal/write/export" writePad "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -28,6 +29,10 @@ import ( // Returns: // - error: On directory creation or scratchpad read failure func Run(cmd *cobra.Command, dir string, force, dryRun bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } if !dryRun { if mkErr := ctxIo.SafeMkdirAll(dir, fs.PermExec); mkErr != nil { return errFs.Mkdir(dir, mkErr) diff --git a/internal/cli/pad/cmd/merge/run.go b/internal/cli/pad/cmd/merge/run.go index c7aa1cf77..f16d4b2df 100644 --- a/internal/cli/pad/cmd/merge/run.go +++ b/internal/cli/pad/cmd/merge/run.go @@ -13,6 +13,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/merge" "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" errFs "github.com/ActiveMemory/ctx/internal/err/fs" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -33,12 +34,19 @@ func Run( keyFile string, dryRun bool, ) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } current, readErr := store.ReadEntries() if readErr != nil { return readErr } - key := merge.LoadKey(keyFile) + key, keyErr := merge.LoadKey(keyFile) + if keyErr != nil { + return keyErr + } seen := make(map[string]bool, len(current)) for _, e := range current { diff --git a/internal/cli/pad/cmd/mv/run.go b/internal/cli/pad/cmd/mv/run.go index d82660620..cfc4acaba 100644 --- a/internal/cli/pad/cmd/mv/run.go +++ b/internal/cli/pad/cmd/mv/run.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" "github.com/ActiveMemory/ctx/internal/cli/pad/core/validate" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -24,6 +25,10 @@ import ( // Returns: // - error: Non-nil on invalid index or read/write failure func Run(cmd *cobra.Command, n, m int) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } entries, err := store.ReadEntries() if err != nil { return err diff --git a/internal/cli/pad/cmd/normalize/run.go b/internal/cli/pad/cmd/normalize/run.go index f81d1cd15..897fe1339 100644 --- a/internal/cli/pad/cmd/normalize/run.go +++ b/internal/cli/pad/cmd/normalize/run.go @@ -11,6 +11,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/parse" "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" + "github.com/ActiveMemory/ctx/internal/rc" writePad "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -22,6 +23,10 @@ import ( // Returns: // - error: Non-nil on read/write failure func Run(cmd *cobra.Command) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } entries, readErr := store.ReadEntriesWithIDs() if readErr != nil { return readErr diff --git a/internal/cli/pad/cmd/resolve/run.go b/internal/cli/pad/cmd/resolve/run.go index 8dfa3968e..17afb08ec 100644 --- a/internal/cli/pad/cmd/resolve/run.go +++ b/internal/cli/pad/cmd/resolve/run.go @@ -33,13 +33,21 @@ func Run(cmd *cobra.Command) error { return errPad.ResolveNotEncrypted() } - kp := store.KeyPath() + kp, kpErr := store.KeyPath() + if kpErr != nil { + cmd.SilenceUsage = true + return kpErr + } key, loadErr := crypto.LoadKey(kp) if loadErr != nil { return errCrypto.LoadKey(loadErr, kp) } - dir := rc.ContextDir() + dir, dirErr := rc.RequireContextDir() + if dirErr != nil { + cmd.SilenceUsage = true + return dirErr + } ours, errOurs := padCrypto.DecryptFile( key, dir, pad.EncOurs, diff --git a/internal/cli/pad/cmd/rm/run.go b/internal/cli/pad/cmd/rm/run.go index 0fedc9d03..6baf04760 100644 --- a/internal/cli/pad/cmd/rm/run.go +++ b/internal/cli/pad/cmd/rm/run.go @@ -12,6 +12,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/parse" "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" errPad "github.com/ActiveMemory/ctx/internal/err/pad" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -26,6 +27,10 @@ import ( // Returns: // - error: Non-nil on invalid ID or read/write failure func Run(cmd *cobra.Command, ids []int) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } entries, readErr := store.ReadEntriesWithIDs() if readErr != nil { return readErr diff --git a/internal/cli/pad/cmd/root/run.go b/internal/cli/pad/cmd/root/run.go index 1f89f84ce..9e51e3fe6 100644 --- a/internal/cli/pad/cmd/root/run.go +++ b/internal/cli/pad/cmd/root/run.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/cli/pad/core/load" + "github.com/ActiveMemory/ctx/internal/rc" ) // Run imports entries into the scratchpad from a file, stdin, or directory. @@ -25,6 +26,10 @@ import ( // Returns: // - error: Non-nil on read/write failure func Run(cmd *cobra.Command, path string, blobs bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } if blobs { return load.Blobs(cmd, path) } diff --git a/internal/cli/pad/cmd/show/run.go b/internal/cli/pad/cmd/show/run.go index c28535c83..529bd1c42 100644 --- a/internal/cli/pad/cmd/show/run.go +++ b/internal/cli/pad/cmd/show/run.go @@ -16,6 +16,7 @@ import ( errFs "github.com/ActiveMemory/ctx/internal/err/fs" errPad "github.com/ActiveMemory/ctx/internal/err/pad" ctxIo "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -29,6 +30,10 @@ import ( // Returns: // - error: Non-nil on invalid ID, read or write failure func Run(cmd *cobra.Command, id int, outPath string) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } entries, readErr := store.ReadEntriesWithIDs() if readErr != nil { return readErr diff --git a/internal/cli/pad/cmd/tag/run.go b/internal/cli/pad/cmd/tag/run.go index da5fe866a..47f903476 100644 --- a/internal/cli/pad/cmd/tag/run.go +++ b/internal/cli/pad/cmd/tag/run.go @@ -14,6 +14,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" "github.com/ActiveMemory/ctx/internal/cli/pad/core/tag" + "github.com/ActiveMemory/ctx/internal/rc" writePad "github.com/ActiveMemory/ctx/internal/write/pad" ) @@ -26,6 +27,10 @@ import ( // Returns: // - error: Non-nil on read failure or JSON marshal error func Run(cmd *cobra.Command, jsonOut bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } entries, err := store.ReadEntries() if err != nil { return err diff --git a/internal/cli/pad/core/merge/merge.go b/internal/cli/pad/core/merge/merge.go index ff6bc3136..bbc6097e7 100644 --- a/internal/cli/pad/core/merge/merge.go +++ b/internal/cli/pad/core/merge/merge.go @@ -47,22 +47,37 @@ func ReadFileEntries(path string, key []byte) ([]string, error) { // LoadKey loads the encryption key for merge input decryption. // +// When keyFile is empty the project key is used, which requires a +// declared context directory; the resolver failure is propagated so +// a user running `ctx pad merge` without CTX_DIR gets a clear error +// at the source instead of a confusing downstream decryption failure. +// +// A missing key on disk (stat/read failure) is still tolerated +// silently because merge is designed to work on mixed plaintext / +// encrypted inputs. +// // Parameters: // - keyFile: explicit key file path (empty string = use project key). // // Returns: -// - []byte: the loaded key, or nil if no key is available. -func LoadKey(keyFile string) []byte { +// - []byte: the loaded key, or nil if the key file is absent +// - error: propagated when the project key path cannot be +// resolved (e.g. no declared context directory) +func LoadKey(keyFile string) ([]byte, error) { path := keyFile if path == "" { - path = store.KeyPath() + projectKey, kpErr := store.KeyPath() + if kpErr != nil { + return nil, kpErr + } + path = projectKey } key, loadErr := crypto.LoadKey(path) if loadErr != nil { - return nil + return nil, nil } - return key + return key, nil } // BuildBlobLabelMap creates a map of blob labels to their full entry strings. diff --git a/internal/cli/pad/core/store/store.go b/internal/cli/pad/core/store/store.go index b0abf32c8..dd62e753a 100644 --- a/internal/cli/pad/core/store/store.go +++ b/internal/cli/pad/core/store/store.go @@ -30,11 +30,16 @@ import ( // // Returns: // - string: Encrypted or plaintext path based on rc.ScratchpadEncrypt() -func ScratchpadPath() string { +// - error: non-nil when the context directory is not declared +func ScratchpadPath() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } if rc.ScratchpadEncrypt() { - return filepath.Join(rc.ContextDir(), pad.Enc) + return filepath.Join(ctxDir, pad.Enc), nil } - return filepath.Join(rc.ContextDir(), pad.Md) + return filepath.Join(ctxDir, pad.Md), nil } // KeyPath returns the full path to the encryption key file. @@ -44,7 +49,9 @@ func ScratchpadPath() string { // // Returns: // - string: Resolved key file path -func KeyPath() string { +// - error: propagated from [rc.KeyPath] when the context +// directory is not declared or otherwise unresolvable +func KeyPath() (string, error) { return rc.KeyPath() } @@ -60,7 +67,10 @@ func KeyPath() string { // Returns: // - error: Non-nil on missing key with existing data, or generation failure func EnsureKey(cmd *cobra.Command) error { - kp := KeyPath() + kp, kpErr := KeyPath() + if kpErr != nil { + return kpErr + } // Key already exists - nothing to do. if _, statErr := os.Stat(kp); statErr == nil { @@ -69,7 +79,11 @@ func EnsureKey(cmd *cobra.Command) error { // Encrypted file already exists without a key - we can't generate a new // one because it wouldn't decrypt the existing data. - if _, statErr := os.Stat(ScratchpadPath()); statErr == nil { + padPath, padErr := ScratchpadPath() + if padErr != nil { + return padErr + } + if _, statErr := os.Stat(padPath); statErr == nil { return errCrypto.NoKeyAt(kp) } @@ -152,7 +166,10 @@ func ReadEntriesWithIDs() ([]parse.Entry, error) { func WriteEntriesWithIDs( cmd *cobra.Command, entries []parse.Entry, ) error { - path := ScratchpadPath() + path, pathErr := ScratchpadPath() + if pathErr != nil { + return pathErr + } plaintext := parse.FormatEntriesWithIDs(entries) if !rc.ScratchpadEncrypt() { @@ -163,7 +180,10 @@ func WriteEntriesWithIDs( return ensureErr } - kp := KeyPath() + kp, kpErr := KeyPath() + if kpErr != nil { + return kpErr + } key, loadErr := crypto.LoadKey(kp) if loadErr != nil { return errCrypto.LoadKey(loadErr, kp) diff --git a/internal/cli/pad/core/store/store_raw.go b/internal/cli/pad/core/store/store_raw.go index f33937481..b6a5918b9 100644 --- a/internal/cli/pad/core/store/store_raw.go +++ b/internal/cli/pad/core/store/store_raw.go @@ -24,7 +24,10 @@ import ( // - []byte: Decrypted plaintext, or nil if file missing // - error: Non-nil on key or decryption errors func readRaw() ([]byte, error) { - path := ScratchpadPath() + path, pathErr := ScratchpadPath() + if pathErr != nil { + return nil, pathErr + } dir := filepath.Dir(path) name := filepath.Base(path) @@ -40,7 +43,10 @@ func readRaw() ([]byte, error) { return data, nil } - kp := KeyPath() + kp, kpErr := KeyPath() + if kpErr != nil { + return nil, kpErr + } key, loadErr := crypto.LoadKey(kp) if loadErr != nil { return nil, errCrypto.LoadKey(loadErr, kp) diff --git a/internal/cli/pad/pad_test.go b/internal/cli/pad/pad_test.go index 2c718f9e8..c696f3211 100644 --- a/internal/cli/pad/pad_test.go +++ b/internal/cli/pad/pad_test.go @@ -21,6 +21,7 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/pad/core/store" "github.com/ActiveMemory/ctx/internal/cli/pad/core/validate" "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/config/pad" errPad "github.com/ActiveMemory/ctx/internal/err/pad" "github.com/spf13/cobra" @@ -28,6 +29,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/crypto" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // setupEncrypted creates a temp dir with a .context/ @@ -47,8 +49,7 @@ func setupEncrypted(t *testing.T) string { rc.Reset() }) - rc.Reset() - rc.OverrideContextDir(dir.Context) + testctx.Declare(t, tmpDir) ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { @@ -93,7 +94,7 @@ func setupPlaintext(t *testing.T) string { t.Fatal(err) } - rc.Reset() + testctx.Declare(t, tmpDir) ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { @@ -500,10 +501,10 @@ func TestNoKey_EncryptedFileExists(t *testing.T) { rc.Reset() }) + ctxDir := filepath.Join(tmpDir, dir.Context) + t.Setenv(env.CtxDir, ctxDir) rc.Reset() - rc.OverrideContextDir(dir.Context) - ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } @@ -536,7 +537,10 @@ func TestDecryptionFailure_WrongKey(t *testing.T) { // Replace the key with a different one newKey, _ := crypto.GenerateKey() - kp := rc.KeyPath() + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } if err := crypto.SaveKey(kp, newKey); err != nil { t.Fatal(err) } @@ -799,7 +803,10 @@ func TestEnsureGitignore_AppendToExisting(t *testing.T) { func TestScratchpadPath_Plaintext(t *testing.T) { setupPlaintext(t) - path := store.ScratchpadPath() + path, err := store.ScratchpadPath() + if err != nil { + t.Fatalf("ScratchpadPath: %v", err) + } if !strings.HasSuffix(path, pad.Md) { t.Errorf("core.ScratchpadPath() = %q, want suffix %q", path, pad.Md) } @@ -808,7 +815,10 @@ func TestScratchpadPath_Plaintext(t *testing.T) { func TestScratchpadPath_Encrypted(t *testing.T) { setupEncrypted(t) - path := store.ScratchpadPath() + path, err := store.ScratchpadPath() + if err != nil { + t.Fatalf("ScratchpadPath: %v", err) + } if !strings.HasSuffix(path, pad.Enc) { t.Errorf("core.ScratchpadPath() = %q, want suffix %q", path, pad.Enc) } @@ -817,7 +827,10 @@ func TestScratchpadPath_Encrypted(t *testing.T) { func TestKeyPath(t *testing.T) { setupEncrypted(t) - path := store.KeyPath() + path, err := store.KeyPath() + if err != nil { + t.Fatalf("store.KeyPath() error = %v", err) + } if !strings.HasSuffix(path, ".key") { t.Errorf("core.KeyPath() = %q, want suffix %q", path, ".key") } @@ -848,10 +861,10 @@ func TestEnsureKey_EncFileExistsNoKey(t *testing.T) { rc.Reset() }) + ctxDir := filepath.Join(tmpDir, dir.Context) + t.Setenv(env.CtxDir, ctxDir) rc.Reset() - rc.OverrideContextDir(dir.Context) - ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } @@ -883,10 +896,10 @@ func TestEnsureKey_GeneratesNewKey(t *testing.T) { rc.Reset() }) + ctxDir := filepath.Join(tmpDir, dir.Context) + t.Setenv(env.CtxDir, ctxDir) rc.Reset() - rc.OverrideContextDir(dir.Context) - ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } @@ -897,7 +910,10 @@ func TestEnsureKey_GeneratesNewKey(t *testing.T) { t.Fatalf("ensureKey error: %v", err) } - kp := rc.KeyPath() + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatalf("rc.KeyPath() error = %v", kpErr) + } if _, statErr := os.Stat(kp); statErr != nil { t.Errorf("key file should have been created at %s", kp) } @@ -911,7 +927,10 @@ func TestWriteEntries_Plaintext(t *testing.T) { t.Fatalf("writeEntries error: %v", err) } - path := store.ScratchpadPath() + path, pErr := store.ScratchpadPath() + if pErr != nil { + t.Fatal(pErr) + } data, err := os.ReadFile(path) //nolint:gosec // test temp path if err != nil { t.Fatal(err) @@ -924,7 +943,10 @@ func TestWriteEntries_Plaintext(t *testing.T) { func TestReadEntries_Plaintext(t *testing.T) { setupPlaintext(t) - path := store.ScratchpadPath() + path, pErr := store.ScratchpadPath() + if pErr != nil { + t.Fatal(pErr) + } if err := os.WriteFile(path, []byte("alpha\nbeta\n"), 0600); err != nil { t.Fatal(err) } @@ -978,7 +1000,11 @@ func TestResolve_WithConflictFiles(t *testing.T) { setupEncrypted(t) // Load the key - key, err := crypto.LoadKey(rc.KeyPath()) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } + key, err := crypto.LoadKey(kp) if err != nil { t.Fatal(err) } @@ -1028,7 +1054,11 @@ func TestResolve_WithConflictFiles(t *testing.T) { func TestResolve_OnlyOursFile(t *testing.T) { setupEncrypted(t) - key, err := crypto.LoadKey(rc.KeyPath()) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } + key, err := crypto.LoadKey(kp) if err != nil { t.Fatal(err) } @@ -2610,7 +2640,11 @@ func TestMerge_EncryptedInput(t *testing.T) { } // Create encrypted file using the same project key. - key, loadErr := crypto.LoadKey(rc.KeyPath()) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } + key, loadErr := crypto.LoadKey(kp) if loadErr != nil { t.Fatal(loadErr) } @@ -2654,7 +2688,11 @@ func TestMerge_PlaintextFallback(t *testing.T) { func TestMerge_MixedEncPlain(t *testing.T) { tmpDir := setupEncrypted(t) - key, loadErr := crypto.LoadKey(rc.KeyPath()) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } + key, loadErr := crypto.LoadKey(kp) if loadErr != nil { t.Fatal(loadErr) } @@ -2943,7 +2981,11 @@ func TestMerge_EncryptedWithBlobDedup(t *testing.T) { } // Get the project key. - key, loadErr := crypto.LoadKey(rc.KeyPath()) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + t.Fatal(kpErr) + } + key, loadErr := crypto.LoadKey(kp) if loadErr != nil { t.Fatal(loadErr) } diff --git a/internal/cli/pause/cmd/root/run.go b/internal/cli/pause/cmd/root/run.go index 87ded9446..dd55c827e 100644 --- a/internal/cli/pause/cmd/root/run.go +++ b/internal/cli/pause/cmd/root/run.go @@ -28,7 +28,9 @@ func Run(cmd *cobra.Command, sessionID string) error { if sessionID == "" { sessionID = coreSession.ReadID(os.Stdin) } - nudge.Pause(sessionID) + if pauseErr := nudge.Pause(sessionID); pauseErr != nil { + return pauseErr + } session.Paused(cmd, sessionID) return nil } diff --git a/internal/cli/pause/pause_test.go b/internal/cli/pause/pause_test.go index 646c3239d..a3f8c24bf 100644 --- a/internal/cli/pause/pause_test.go +++ b/internal/cli/pause/pause_test.go @@ -19,14 +19,17 @@ import ( func setupStateDir(t *testing.T) string { t.Helper() - tmpDir := t.TempDir() - t.Setenv("CTX_DIR", tmpDir) + ctxDir := filepath.Join(t.TempDir(), dir.Context) + if mkErr := os.MkdirAll(ctxDir, 0o750); mkErr != nil { + t.Fatal(mkErr) + } + t.Setenv("CTX_DIR", ctxDir) rc.Reset() - stateDir := filepath.Join(tmpDir, dir.State) + stateDir := filepath.Join(ctxDir, dir.State) if mkErr := os.MkdirAll(stateDir, 0o750); mkErr != nil { t.Fatal(mkErr) } - return tmpDir + return ctxDir } func TestCmd_WithSessionIDFlag(t *testing.T) { diff --git a/internal/cli/prune/run.go b/internal/cli/prune/run.go index cad3edaba..5ec22114f 100644 --- a/internal/cli/prune/run.go +++ b/internal/cli/prune/run.go @@ -35,7 +35,10 @@ import ( // Returns: // - error: Non-nil on state directory read failure func Run(cmd *cobra.Command, days int, dryRun bool) error { - dir := state.Dir() + dir, dirErr := state.Dir() + if dirErr != nil { + return dirErr + } entries, readErr := os.ReadDir(dir) if readErr != nil { diff --git a/internal/cli/reindex/cmd/root/run.go b/internal/cli/reindex/cmd/root/run.go index 80e3987e5..fbdef7c3d 100644 --- a/internal/cli/reindex/cmd/root/run.go +++ b/internal/cli/reindex/cmd/root/run.go @@ -27,7 +27,11 @@ import ( // - error: Non-nil if either file read/write fails func Run(cmd *cobra.Command, _ []string) error { w := cmd.OutOrStdout() - ctxDir := rc.ContextDir() + ctxDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } decisionsPath := filepath.Join(ctxDir, ctx.Decision) decisionsErr := index.Reindex( diff --git a/internal/cli/reindex/reindex_test.go b/internal/cli/reindex/reindex_test.go index 8c5e1a5d2..55d2ebd81 100644 --- a/internal/cli/reindex/reindex_test.go +++ b/internal/cli/reindex/reindex_test.go @@ -14,6 +14,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestCmd(t *testing.T) { @@ -46,7 +47,7 @@ func TestRunReindex_NoFiles(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() cmd := Cmd() @@ -63,7 +64,7 @@ func TestRunReindex_BothFiles(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() ctxDir := filepath.Join(tempDir, dir.Context) @@ -128,7 +129,7 @@ func TestRunReindex_DecisionsMissingLearningsPresent(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() ctxDir := filepath.Join(tempDir, dir.Context) @@ -154,7 +155,7 @@ func TestRunReindex_EmptyFiles(t *testing.T) { _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() - rc.Reset() + testctx.Declare(t, tempDir) defer rc.Reset() ctxDir := filepath.Join(tempDir, dir.Context) diff --git a/internal/cli/remind/core/store/store.go b/internal/cli/remind/core/store/store.go index 2d6dad131..d1cf2bfc8 100644 --- a/internal/cli/remind/core/store/store.go +++ b/internal/cli/remind/core/store/store.go @@ -26,7 +26,11 @@ import ( // - []Reminder: The parsed reminders (nil when file absent) // - error: Non-nil on read or parse failure func Read() ([]Reminder, error) { - data, readErr := io.SafeReadUserFile(Path()) + path, pathErr := Path() + if pathErr != nil { + return nil, pathErr + } + data, readErr := io.SafeReadUserFile(path) if readErr != nil { if errors.Is(readErr, os.ErrNotExist) { return nil, nil @@ -56,7 +60,11 @@ func Write(reminders []Reminder) error { if marshalErr != nil { return marshalErr } - return io.SafeWriteFile(Path(), data, fs.PermFile) + path, pathErr := Path() + if pathErr != nil { + return pathErr + } + return io.SafeWriteFile(path, data, fs.PermFile) } // NextID returns the next available reminder ID @@ -81,6 +89,11 @@ func NextID(reminders []Reminder) int { // // Returns: // - string: Absolute path to reminders.json -func Path() string { - return filepath.Join(rc.ContextDir(), reminder.File) +// - error: non-nil when the context directory is not declared +func Path() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, reminder.File), nil } diff --git a/internal/cli/remind/remind_test.go b/internal/cli/remind/remind_test.go index dfe05799f..f11bb111b 100644 --- a/internal/cli/remind/remind_test.go +++ b/internal/cli/remind/remind_test.go @@ -15,13 +15,15 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/cli/remind/core/store" "github.com/ActiveMemory/ctx/internal/rc" ) -// setup creates a temp dir with a .context/ directory and sets the RC override. +// setup creates a temp dir with a .context/ directory and declares +// CTX_DIR for the duration of the test. func setup(t *testing.T) string { t.Helper() tmpDir := t.TempDir() @@ -34,13 +36,12 @@ func setup(t *testing.T) string { rc.Reset() }) - rc.Reset() - rc.OverrideContextDir(dir.Context) - ctxDir := filepath.Join(tmpDir, dir.Context) if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } + t.Setenv(env.CtxDir, ctxDir) + rc.Reset() return tmpDir } @@ -73,7 +74,11 @@ func TestAdd_Basic(t *testing.T) { } // Verify JSON file content. - data, readErr := os.ReadFile(store.Path()) + path, pathErr := store.Path() + if pathErr != nil { + t.Fatalf("resolve reminders path: %v", pathErr) + } + data, readErr := os.ReadFile(path) if readErr != nil { t.Fatalf("read reminders file: %v", readErr) } diff --git a/internal/cli/resume/cmd/root/run.go b/internal/cli/resume/cmd/root/run.go index 5ed5fffe3..09bc04b50 100644 --- a/internal/cli/resume/cmd/root/run.go +++ b/internal/cli/resume/cmd/root/run.go @@ -28,7 +28,9 @@ func Run(cmd *cobra.Command, sessionID string) error { if sessionID == "" { sessionID = coreSession.ReadID(os.Stdin) } - nudge.Resume(sessionID) + if resumeErr := nudge.Resume(sessionID); resumeErr != nil { + return resumeErr + } session.Resumed(cmd, sessionID) return nil } diff --git a/internal/cli/resume/resume_test.go b/internal/cli/resume/resume_test.go index c668916ea..21f2faac6 100644 --- a/internal/cli/resume/resume_test.go +++ b/internal/cli/resume/resume_test.go @@ -20,14 +20,17 @@ import ( func setupStateDir(t *testing.T) string { t.Helper() - tmpDir := t.TempDir() - t.Setenv("CTX_DIR", tmpDir) + ctxDir := filepath.Join(t.TempDir(), dir.Context) + if mkErr := os.MkdirAll(ctxDir, 0o750); mkErr != nil { + t.Fatal(mkErr) + } + t.Setenv("CTX_DIR", ctxDir) rc.Reset() - stateDir := filepath.Join(tmpDir, dir.State) + stateDir := filepath.Join(ctxDir, dir.State) if mkErr := os.MkdirAll(stateDir, 0o750); mkErr != nil { t.Fatal(mkErr) } - return tmpDir + return ctxDir } func TestCmd_WithSessionIDFlag(t *testing.T) { @@ -54,7 +57,9 @@ func TestCmd_PauseResume_Roundtrip(t *testing.T) { sessionID := "test-roundtrip" // Pause first - creates the marker file. - nudge.Pause(sessionID) + if pauseErr := nudge.Pause(sessionID); pauseErr != nil { + t.Fatalf("nudge.Pause() error = %v", pauseErr) + } markerPath := filepath.Join(tmpDir, dir.State, "ctx-paused-"+sessionID) if _, statErr := os.Stat(markerPath); statErr != nil { diff --git a/internal/cli/serve/cmd/root/run.go b/internal/cli/serve/cmd/root/run.go index 3c7e637cb..74ec0c7fa 100644 --- a/internal/cli/serve/cmd/root/run.go +++ b/internal/cli/serve/cmd/root/run.go @@ -32,7 +32,11 @@ func Run(args []string) error { if len(args) > 0 { d = args[0] } else { - d = filepath.Join(rc.ContextDir(), dir.JournalSite) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + return ctxErr + } + d = filepath.Join(ctxDir, dir.JournalSite) } // Verify directory exists diff --git a/internal/cli/serve/serve_test.go b/internal/cli/serve/serve_test.go index c33a2764f..a3952dfae 100644 --- a/internal/cli/serve/serve_test.go +++ b/internal/cli/serve/serve_test.go @@ -16,6 +16,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/zensical" "github.com/ActiveMemory/ctx/internal/err/fs" errSite "github.com/ActiveMemory/ctx/internal/err/site" + "github.com/ActiveMemory/ctx/internal/rc" ) func TestCmd(t *testing.T) { @@ -129,17 +130,20 @@ func TestRunServe_ZensicalNotFound(t *testing.T) { func TestRunServe_DefaultDir(t *testing.T) { // When no args are given, serveRoot.Run uses the default - // journal-site directory under the resolved context dir. + // journal-site directory under the declared context dir. // - // Chdir to a clean tempdir so rc.ContextDir()'s upward walk falls - // back to /.context (which does not exist), ensuring the - // default resolves to a nonexistent path and the run errors with - // "directory not found" rather than accidentally hitting a real - // context dir anchored by a parent repository. + // Declare CTX_DIR to point at a nonexistent .context/ so the + // default journal-site path resolves to something that does + // not exist on disk and the run errors with "directory not + // found" rather than the "context directory not declared" + // message. tempDir := t.TempDir() origDir, _ := os.Getwd() _ = os.Chdir(tempDir) defer func() { _ = os.Chdir(origDir) }() + t.Setenv("CTX_DIR", filepath.Join(tempDir, ".context")) + rc.Reset() + t.Cleanup(rc.Reset) err := serveRoot.Run([]string{}) if err == nil { diff --git a/internal/cli/skill/cmd/install/cmd.go b/internal/cli/skill/cmd/install/cmd.go index 4b69902b8..c6d89a427 100644 --- a/internal/cli/skill/cmd/install/cmd.go +++ b/internal/cli/skill/cmd/install/cmd.go @@ -47,7 +47,12 @@ func Cmd() *cobra.Command { // Returns: // - error: nil on success, or a skill installation error func Run(c *cobra.Command, source string) error { - skillsDir := filepath.Join(rc.ContextDir(), dir.Skills) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + c.SilenceUsage = true + return ctxErr + } + skillsDir := filepath.Join(ctxDir, dir.Skills) sk, err := skill.Install(source, skillsDir) if err != nil { diff --git a/internal/cli/skill/cmd/list/cmd.go b/internal/cli/skill/cmd/list/cmd.go index f4f204677..2fb6f0244 100644 --- a/internal/cli/skill/cmd/list/cmd.go +++ b/internal/cli/skill/cmd/list/cmd.go @@ -45,7 +45,12 @@ func Cmd() *cobra.Command { // Returns: // - error: nil on success, or a skill loading error func Run(c *cobra.Command) error { - skillsDir := filepath.Join(rc.ContextDir(), dir.Skills) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + c.SilenceUsage = true + return ctxErr + } + skillsDir := filepath.Join(ctxDir, dir.Skills) skills, err := skill.LoadAll(skillsDir) if err != nil { diff --git a/internal/cli/skill/cmd/remove/cmd.go b/internal/cli/skill/cmd/remove/cmd.go index ddd94c4af..d7d5a649d 100644 --- a/internal/cli/skill/cmd/remove/cmd.go +++ b/internal/cli/skill/cmd/remove/cmd.go @@ -47,7 +47,12 @@ func Cmd() *cobra.Command { // Returns: // - error: nil on success, or a skill removal error func Run(c *cobra.Command, name string) error { - skillsDir := filepath.Join(rc.ContextDir(), dir.Skills) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + c.SilenceUsage = true + return ctxErr + } + skillsDir := filepath.Join(ctxDir, dir.Skills) if err := skill.Remove(skillsDir, name); err != nil { return err diff --git a/internal/cli/status/status_test.go b/internal/cli/status/status_test.go index c37ac0ae3..36d8adf7b 100644 --- a/internal/cli/status/status_test.go +++ b/internal/cli/status/status_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestStatusCommand tests the status command. @@ -27,6 +28,8 @@ func TestStatusCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -57,6 +60,8 @@ func TestStatusJSONOutput(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/steering/cmd/add/cmd.go b/internal/cli/steering/cmd/add/cmd.go index 737616753..e30a72f25 100644 --- a/internal/cli/steering/cmd/add/cmd.go +++ b/internal/cli/steering/cmd/add/cmd.go @@ -52,7 +52,11 @@ func Cmd() *cobra.Command { // Returns: // - error: nil on success, or a file creation error func Run(c *cobra.Command, name string) error { - contextDir := rc.ContextDir() + contextDir, err := rc.RequireContextDir() + if err != nil { + c.SilenceUsage = true + return err + } // Check that .context/ directory exists. if _, statErr := ctxIo.SafeStat(contextDir); os.IsNotExist(statErr) { diff --git a/internal/cli/steering/cmd/initcmd/cmd.go b/internal/cli/steering/cmd/initcmd/cmd.go index de70a29bd..d70227652 100644 --- a/internal/cli/steering/cmd/initcmd/cmd.go +++ b/internal/cli/steering/cmd/initcmd/cmd.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/assets/read/desc" + "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/cmd" "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/config/fs" @@ -52,8 +53,26 @@ func Cmd() *cobra.Command { // Returns: // - error: nil on success, or if the context directory is missing func Run(c *cobra.Command) error { - contextDir := rc.ContextDir() + contextDir, err := rc.RequireContextDir() + if err != nil { + c.SilenceUsage = true + return err + } + return RunWithDir(c, contextDir) +} +// RunWithDir is the implementation of Run that accepts an explicit +// context directory. Used by `ctx init`, which has just created the +// directory and needs to scaffold foundation steering files without +// requiring the user to have declared CTX_DIR first. +// +// Parameters: +// - c: The cobra command for output +// - contextDir: absolute path to the .context/ directory +// +// Returns: +// - error: nil on success, or a file creation error +func RunWithDir(c *cobra.Command, contextDir string) error { // Check that .context/ directory exists. if _, statErr := ctxIo.SafeStat( contextDir, @@ -61,7 +80,7 @@ func Run(c *cobra.Command) error { return errSteering.ContextDirMissing() } - steeringDir := rc.SteeringDir() + steeringDir := filepath.Join(contextDir, dir.Steering) // Ensure the steering directory exists. if mkdirErr := ctxIo.SafeMkdirAll( diff --git a/internal/cli/sync/cmd/root/run.go b/internal/cli/sync/cmd/root/run.go index 0d01ffb7d..f8441cc6f 100644 --- a/internal/cli/sync/cmd/root/run.go +++ b/internal/cli/sync/cmd/root/run.go @@ -15,6 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/context/load" errCtx "github.com/ActiveMemory/ctx/internal/err/context" errInit "github.com/ActiveMemory/ctx/internal/err/initialize" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/sync" ) @@ -31,6 +32,10 @@ import ( // Returns: // - error: Non-nil if context loading fails or .context/ is not found func Run(cmd *cobra.Command, dryRun bool) error { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } ctx, err := load.Do("") if err != nil { if _, ok := errors.AsType[*errCtx.NotFoundError](err); ok { @@ -39,7 +44,10 @@ func Run(cmd *cobra.Command, dryRun bool) error { return err } - actions := action.Detect(ctx) + actions, detectErr := action.Detect(ctx) + if detectErr != nil { + return detectErr + } if len(actions) == 0 { sync.AllClear(cmd) diff --git a/internal/cli/sync/core/action/action.go b/internal/cli/sync/core/action/action.go index 679069df3..7715b22d6 100644 --- a/internal/cli/sync/core/action/action.go +++ b/internal/cli/sync/core/action/action.go @@ -24,11 +24,18 @@ import ( // // Returns: // - []Action: List of suggested actions to reconcile context with codebase -func Detect(ctx *entity.Context) []validate.Action { +// - error: non-nil when a check cannot confirm its answer (e.g. the +// project root directory cannot be read); callers surface this +// rather than printing a confident empty suggestion list +func Detect(ctx *entity.Context) ([]validate.Action, error) { var actions []validate.Action // Check for new top-level directories not mentioned in ARCHITECTURE.md - actions = append(actions, validate.CheckNewDirectories(ctx)...) + newDirs, newDirsErr := validate.CheckNewDirectories(ctx) + if newDirsErr != nil { + return nil, newDirsErr + } + actions = append(actions, newDirs...) // Check for package manager files actions = append(actions, validate.CheckPackageFiles(ctx)...) @@ -36,5 +43,5 @@ func Detect(ctx *entity.Context) []validate.Action { // Check for common config files that might need documenting actions = append(actions, validate.CheckConfigFiles(ctx)...) - return actions + return actions, nil } diff --git a/internal/cli/sync/core/core_test.go b/internal/cli/sync/core/core_test.go index 5d15c55da..fd8aab59a 100644 --- a/internal/cli/sync/core/core_test.go +++ b/internal/cli/sync/core/core_test.go @@ -18,6 +18,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/context/load" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // setupSyncDir creates a temp dir, initializes context, and returns cleanup. @@ -30,6 +31,8 @@ func setupSyncDir(t *testing.T) string { } t.Cleanup(func() { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -47,7 +50,10 @@ func TestDetectSyncActions_NoActions(t *testing.T) { } _ = tmpDir - actions := action.Detect(ctx) + actions, detectErr := action.Detect(ctx) + if detectErr != nil { + t.Fatalf("Detect() error = %v", detectErr) + } // Just verify it runs without error _ = actions } @@ -67,7 +73,10 @@ func TestCheckNewDirectories_ImportantDirs(t *testing.T) { } } - actions := validate.CheckNewDirectories(ctx) + actions, checkErr := validate.CheckNewDirectories(ctx) + if checkErr != nil { + t.Fatalf("CheckNewDirectories() error = %v", checkErr) + } if len(actions) == 0 { t.Error("expected actions for undocumented directories") } @@ -93,7 +102,10 @@ func TestCheckNewDirectories_SkipsHiddenAndVendor(t *testing.T) { } } - actions := validate.CheckNewDirectories(ctx) + actions, checkErr := validate.CheckNewDirectories(ctx) + if checkErr != nil { + t.Fatalf("CheckNewDirectories() error = %v", checkErr) + } for _, a := range actions { for _, skip := range []string{ ".git", "node_modules", "vendor", "dist", "build", @@ -124,7 +136,10 @@ func TestCheckNewDirectories_DocumentedDirsIgnored(t *testing.T) { t.Fatal(mkErr) } - actions := validate.CheckNewDirectories(ctx) + actions, checkErr := validate.CheckNewDirectories(ctx) + if checkErr != nil { + t.Fatalf("CheckNewDirectories() error = %v", checkErr) + } for _, a := range actions { if strings.Contains(a.Description, "'src/'") { t.Error("documented directory 'src' should not produce an action") @@ -343,7 +358,10 @@ func TestRunSync_ActionWithEmptySuggestion(t *testing.T) { t.Fatal(err) } - actions := action.Detect(ctx) + actions, detectErr := action.Detect(ctx) + if detectErr != nil { + t.Fatalf("Detect() error = %v", detectErr) + } for _, a := range actions { // All actions should have a non-empty Description if a.Description == "" { diff --git a/internal/cli/sync/core/validate/validate.go b/internal/cli/sync/core/validate/validate.go index 23e482035..c8386a775 100644 --- a/internal/cli/sync/core/validate/validate.go +++ b/internal/cli/sync/core/validate/validate.go @@ -18,9 +18,8 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/text" cfgSync "github.com/ActiveMemory/ctx/internal/config/sync" "github.com/ActiveMemory/ctx/internal/config/token" - "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/entity" - ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" + "github.com/ActiveMemory/ctx/internal/rc" ) // CheckPackageFiles detects package manager files without dependency @@ -127,12 +126,20 @@ func CheckConfigFiles(ctx *entity.Context) []Action { // Skips hidden directories and common non-code directories (node_modules, // vendor, dist, build). // +// Returns (nil, nil) when the context directory is not declared: there is +// no project root to scan, which is an ordinary "nothing to suggest" state +// rather than an error. A resolver failure for any other reason, and a +// directory read failure, are propagated so the caller does not report a +// confident empty suggestion list when we actually failed to look. +// // Parameters: // - ctx: Loaded context containing the files // // Returns: // - []Action: Suggested actions for undocumented directories -func CheckNewDirectories(ctx *entity.Context) []Action { +// - error: non-nil on resolver failure (other than not-declared) or +// on a directory read failure at the project root +func CheckNewDirectories(ctx *entity.Context) ([]Action, error) { var actions []Action // Get ARCHITECTURE.md content @@ -141,16 +148,17 @@ func CheckNewDirectories(ctx *entity.Context) []Action { archContent = strings.ToLower(string(f.Content)) } - // Scan top-level directories - cwd, cwdErr := os.Getwd() - if cwdErr != nil { - ctxLog.Warn(warn.Getwd, cwdErr) - return actions + // Scan top-level directories at the project root (parent of the + // declared context directory). Under the explicit-context-dir + // model this is authoritative; CWD may be a subdir. + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return nil, ctxErr } - entries, readDirErr := os.ReadDir(cwd) + projectRoot := filepath.Dir(ctxDir) + entries, readDirErr := os.ReadDir(projectRoot) if readDirErr != nil { - ctxLog.Warn(warn.Readdir, cwd, readDirErr) - return actions + return nil, readDirErr } for _, entry := range entries { @@ -178,5 +186,5 @@ func CheckNewDirectories(ctx *entity.Context) []Action { } } - return actions + return actions, nil } diff --git a/internal/cli/sync/sync_test.go b/internal/cli/sync/sync_test.go index 3fa8523c4..4f81eda94 100644 --- a/internal/cli/sync/sync_test.go +++ b/internal/cli/sync/sync_test.go @@ -14,6 +14,8 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/config/env" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // runSyncCmd executes a sync command and captures output. @@ -37,6 +39,8 @@ func setupSyncDir(t *testing.T) string { } t.Cleanup(func() { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -59,6 +63,8 @@ func TestSyncCommand(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -81,13 +87,16 @@ func TestSyncCommand_NoContext(t *testing.T) { t.Fatal(err) } t.Cleanup(func() { _ = os.Chdir(origDir) }) + t.Setenv(env.CtxDir, "") _, err := runSyncCmd() if err == nil { t.Fatal("expected error when no .context/ exists") } - if !strings.Contains(err.Error(), "ctx init") { - t.Errorf("error = %q, want 'ctx init' suggestion", err.Error()) + // Under the explicit-context-dir model, the error reports that + // no context directory has been declared. + if !strings.Contains(err.Error(), "context directory") { + t.Errorf("error = %q, want context directory mention", err.Error()) } } diff --git a/internal/cli/system/README.md b/internal/cli/system/README.md index 1f27f752b..8f06dd510 100644 --- a/internal/cli/system/README.md +++ b/internal/cli/system/README.md @@ -1,4 +1,4 @@ -# internal/cli/system — Hook Plumbing +# internal/cli/system: Hook Plumbing The `ctx system` command hosts 7 visible subcommands and 26 hidden subcommands that implement Claude Code hook logic. See `doc.go` @@ -14,7 +14,7 @@ All hook subcommands follow the same contract: 3. The subcommand reads JSON from stdin (2-second timeout) 4. Runs its check logic 5. Writes advisory output to stdout (or JSON for block commands) -6. Exits 0 — hooks never block initialization +6. Exits 0; hooks never block initialization ### stdin JSON (from Claude Code) @@ -68,7 +68,6 @@ Counter state lives in `.context/state/` via `core/counter/`. | Command | Purpose | |---------|---------| -| `backup` | Timestamped tar.gz of context + Claude data | | `bootstrap` | Print context dir path (for agent init) | | `events` | Display event log entries | | `message` | Manage hook message templates (list/show/edit/reset) | @@ -84,7 +83,7 @@ Counter state lives in `.context/state/` via `core/counter/`. | `mark-wrapped-up` | Record wrap-up ceremony timestamp | | `session-event` | Record session start/end lifecycle | -### UserPromptSubmit hooks (hidden, 14 checks) +### UserPromptSubmit hooks (hidden, 13 checks) | Command | Trigger | Throttle | |---------|---------|----------| @@ -99,16 +98,14 @@ Counter state lives in `.context/state/` via `core/counter/`. | `check-memory-drift` | Every prompt | Daily | | `check-reminder` | Every prompt | None (always runs) | | `check-freshness` | Every prompt | Daily | -| `check-backup-age` | Every prompt | Daily | | `check-skill-discovery` | Every prompt | One-shot | | `heartbeat` | Every prompt | None (telemetry) | -### PreToolUse hooks (hidden, 6 matchers) +### PreToolUse hooks (hidden, 5 matchers) | Command | Matches | Action | |---------|---------|--------| | `block-non-path-ctx` | Bash | Block bare `./ctx` invocations | -| `block-dangerous-command` | Bash | Block destructive patterns | | `context-load-gate` | All tools | Inject context with cooldown | | `qa-reminder` | Bash | Lint/test reminder before commits | | `specs-nudge` | EnterPlanMode | Save plans to specs/ | @@ -131,7 +128,6 @@ Counter state lives in `.context/state/` via `core/counter/`. | `core/persistence/` | Persistence state tracking | | `core/heartbeat/` | Heartbeat mtime management | | `core/load/` | Context-load-gate state | -| `core/archive/` | Backup to SMB shares | ## Adding a New Hook diff --git a/internal/cli/system/cmd/block_dangerous_command/cmd.go b/internal/cli/system/cmd/block_dangerous_command/cmd.go deleted file mode 100644 index 5eabcdfb7..000000000 --- a/internal/cli/system/cmd/block_dangerous_command/cmd.go +++ /dev/null @@ -1,35 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package block_dangerous_command - -import ( - "os" - - "github.com/spf13/cobra" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/embed/cmd" -) - -// Cmd returns the "ctx system block-dangerous-command" subcommand. -// -// Returns: -// - *cobra.Command: Configured block-dangerous-command subcommand -func Cmd() *cobra.Command { - short, long := desc.Command(cmd.DescKeySystemBlockDangerousCommand) - - return &cobra.Command{ - Use: cmd.UseSystemBlockDangerousCommand, - Short: short, - Long: long, - Example: desc.Example(cmd.DescKeySystemBlockDangerousCommand), - Hidden: true, - RunE: func(cmd *cobra.Command, _ []string) error { - return Run(cmd, os.Stdin) - }, - } -} diff --git a/internal/cli/system/cmd/block_dangerous_command/doc.go b/internal/cli/system/cmd/block_dangerous_command/doc.go deleted file mode 100644 index 811a8092c..000000000 --- a/internal/cli/system/cmd/block_dangerous_command/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package block_dangerous_command implements the -// **`ctx system block-dangerous-command`** hidden hook, -// which intercepts shell commands that match dangerous -// patterns before they execute. -// -// # What It Does -// -// The hook reads a JSON envelope from stdin containing -// the command the agent is about to run. It tests the -// command against a set of compiled regular expressions -// and, if any match, emits a JSON block response that -// prevents execution. The patterns catch: -// -// - **mid-command sudo**: e.g. "echo foo | sudo rm" -// - **git push**: any direct push attempt -// - **cp/mv to bin**: copying files into system -// binary directories -// - **install to /usr/local/bin**: direct binary -// installation outside package managers -// -// When a command is blocked, a relay notification is -// also sent to the nudge channel so the agent sees the -// reason for the block. -// -// # Input -// -// A JSON hook envelope on stdin with a ToolInput.Command -// field containing the shell command string. -// -// # Output -// -// On match: a JSON [entity.BlockResponse] with decision -// "block" and a human-readable reason. On no match: -// no output (silent pass-through). -// -// # Delegation -// -// [Cmd] builds the hidden cobra command. [Run] reads -// stdin, tests each regex in priority order, loads the -// appropriate message template via [core/message.Load], -// and marshals the block response. Relay notifications -// are sent through [core/nudge.Relay]. -package block_dangerous_command diff --git a/internal/cli/system/cmd/block_dangerous_command/run.go b/internal/cli/system/cmd/block_dangerous_command/run.go deleted file mode 100644 index 87550a081..000000000 --- a/internal/cli/system/cmd/block_dangerous_command/run.go +++ /dev/null @@ -1,95 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package block_dangerous_command - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/spf13/cobra" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/cli/system/core/message" - "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/config/embed/text" - "github.com/ActiveMemory/ctx/internal/config/hook" - "github.com/ActiveMemory/ctx/internal/config/regex" - "github.com/ActiveMemory/ctx/internal/entity" - "github.com/ActiveMemory/ctx/internal/notify" - writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" -) - -// Run executes the block-dangerous-commands hook logic. -// -// Reads a hook input from stdin, checks the command against dangerous -// patterns (mid-command sudo, git push, cp/mv to bin), and emits a -// block response if matched. -// -// Parameters: -// - cmd: Cobra command for output -// - stdin: standard input for hook JSON -// -// Returns: -// - error: Always nil (hook errors are non-fatal) -func Run(cmd *cobra.Command, stdin *os.File) error { - input := coreSession.ReadInput(stdin) - command := input.ToolInput.Command - - if command == "" { - return nil - } - - var variant, fallback string - - if regex.MidSudo.MatchString(command) { - variant = hook.VariantMidSudo - fallback = desc.Text(text.DescKeyBlockMidSudo) - } - - if variant == "" && regex.GitPush.MatchString(command) { - variant = hook.VariantMidGitPush - fallback = desc.Text(text.DescKeyBlockMidGitPush) - } - - if variant == "" && regex.CpMvToBin.MatchString(command) { - variant = hook.VariantCpToBin - fallback = desc.Text(text.DescKeyBlockCpToBin) - } - - if variant == "" && regex.InstallToLocalBin.MatchString(command) { - variant = hook.VariantInstallToLocalBin - fallback = desc.Text(text.DescKeyBlockInstallToLocalBin) - } - - var reason string - if variant != "" { - reason = message.Load( - hook.BlockDangerousCommand, variant, nil, fallback, - ) - } - - if reason != "" { - resp := entity.BlockResponse{ - Decision: hook.DecisionBlock, - Reason: reason, - } - data, _ := json.Marshal(resp) - writeSetup.BlockResponse(cmd, string(data)) - ref := notify.NewTemplateRef(hook.BlockDangerousCommand, variant, nil) - nudge.Relay(fmt.Sprintf( - desc.Text(text.DescKeyRelayPrefixFormat), - hook.BlockDangerousCommand, - reason, - ), - input.SessionID, ref, - ) - } - - return nil -} diff --git a/internal/cli/system/cmd/block_non_path_ctx/run.go b/internal/cli/system/cmd/block_non_path_ctx/run.go index eb7666390..0a2ee699d 100644 --- a/internal/cli/system/cmd/block_non_path_ctx/run.go +++ b/internal/cli/system/cmd/block_non_path_ctx/run.go @@ -82,7 +82,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { data, _ := json.Marshal(resp) writeSetup.BlockResponse(cmd, string(data)) blockRef := notify.NewTemplateRef(hook.BlockNonPathCtx, variant, nil) - nudge.Relay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), + return nudge.Relay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), hook.BlockNonPathCtx, desc.Text(text.DescKeyBlockNonPathRelayMessage)), input.SessionID, blockRef, ) diff --git a/internal/cli/system/cmd/bootstrap/run.go b/internal/cli/system/cmd/bootstrap/run.go index b7b791913..ebb00ec0a 100644 --- a/internal/cli/system/cmd/bootstrap/run.go +++ b/internal/cli/system/cmd/bootstrap/run.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/text" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" errBackup "github.com/ActiveMemory/ctx/internal/err/backup" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/bootstrap" ) @@ -24,14 +25,28 @@ import ( // Run executes the bootstrap command, emitting context directory info, // rules, and next steps for the calling agent. // +// Resolution under the explicit-context-dir model +// (spec: specs/explicit-context-dir.md): +// +// - When --context-dir or CTX_DIR is declared, bootstrap validates +// the directory exists and then emits its usual report. +// - When neither is declared, bootstrap returns the tailored +// "not declared" error with a candidate-count hint. Bootstrap +// does NOT walk to guess; walk logic lives only in +// `ctx activate`. +// // Parameters: // - cmd: Cobra command providing flags and output streams. // // Returns: -// - error: non-nil if the context directory does not exist or JSON -// encoding fails. +// - error: non-nil if the context directory is not declared, does +// not exist, or JSON encoding fails. func Run(cmd *cobra.Command) error { - dir := rc.ContextDir() + dir, err := rc.ContextDir() + if err != nil { + cwd, _ := os.Getwd() + return errCtx.NotDeclared(rc.ScanCandidates(cwd)) + } if _, statErr := os.Stat(dir); os.IsNotExist(statErr) { return errBackup.ContextDirNotFound(dir) diff --git a/internal/cli/system/cmd/check_backup_age/cmd.go b/internal/cli/system/cmd/check_anchor_drift/cmd.go similarity index 64% rename from internal/cli/system/cmd/check_backup_age/cmd.go rename to internal/cli/system/cmd/check_anchor_drift/cmd.go index b905e2cb5..9b112cf40 100644 --- a/internal/cli/system/cmd/check_backup_age/cmd.go +++ b/internal/cli/system/cmd/check_anchor_drift/cmd.go @@ -4,7 +4,7 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -package check_backup_age +package check_anchor_drift import ( "os" @@ -15,18 +15,18 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/cmd" ) -// Cmd returns the "ctx system check-backup-age" subcommand. +// Cmd returns the "ctx system check-anchor-drift" subcommand. // // Returns: -// - *cobra.Command: Configured check-backup-age subcommand +// - *cobra.Command: configured check-anchor-drift subcommand func Cmd() *cobra.Command { - short, long := desc.Command(cmd.DescKeySystemCheckBackupAge) + short, long := desc.Command(cmd.DescKeySystemCheckAnchorDrift) return &cobra.Command{ - Use: cmd.UseSystemCheckBackupAge, + Use: cmd.UseSystemCheckAnchorDrift, Short: short, Long: long, - Example: desc.Example(cmd.DescKeySystemCheckBackupAge), + Example: desc.Example(cmd.DescKeySystemCheckAnchorDrift), Hidden: true, RunE: func(cmd *cobra.Command, _ []string) error { return Run(cmd, os.Stdin) diff --git a/internal/cli/system/cmd/check_anchor_drift/doc.go b/internal/cli/system/cmd/check_anchor_drift/doc.go new file mode 100644 index 000000000..822c68149 --- /dev/null +++ b/internal/cli/system/cmd/check_anchor_drift/doc.go @@ -0,0 +1,64 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package check_anchor_drift implements the +// **`ctx system check-anchor-drift`** hook (added by +// specs/single-source-context-anchor.md). +// +// The hook fires on every UserPromptSubmit. Each operating +// hook line under [internal/assets/claude/hooks/hooks.json] +// exports +// `CTX_DIR="${CLAUDE_PROJECT_DIR:?…}/.context"` inline, which +// **overwrites** the parent shell's CTX_DIR for the hook +// subprocess. That is correct for operating hooks (they must +// write to the right .context/ regardless of what the user +// shell exported), but useless for any hook whose job is to +// *compare* the inherited CTX_DIR against the Claude-injected +// anchor: the comparison would always be tautologically equal. +// +// To break the tautology, this hook's command line in +// hooks.json prepends one extra assignment: +// +// CTX_DIR_INHERITED="${CTX_DIR:-}" \ +// CTX_DIR="${CLAUDE_PROJECT_DIR:?…}/.context" \ +// ctx system check-anchor-drift +// +// Bash evaluates env-var assignments left-to-right *before* +// invoking the command, so CTX_DIR_INHERITED snapshots the +// parent's CTX_DIR (empty if unset) before the standard +// CTX_DIR injection runs. The hook reads both and emits a +// VERBATIM warning banner only when they disagree. +// +// Behavior matrix: +// +// - CTX_DIR_INHERITED empty: silent. The user has not run +// `ctx activate`; there is no shell-level declaration to +// drift from. Operating hooks still work via the standard +// injection on every other hook line. +// - CTX_DIR_INHERITED non-empty and equal to CTX_DIR after +// [filepath.Clean] on both: silent. Correctly anchored. +// - CTX_DIR_INHERITED non-empty and unequal to CTX_DIR: +// emit a warning banner naming both values so the user +// can see which project's .context/ their CLI / +// `!`-pragma calls are writing to vs. which project +// Claude Code is in. +// +// # Public Surface +// +// - **[Cmd]**: cobra command (hidden under +// `ctx system`). +// - **[Run]**: reads [env.CtxDirInherited] and +// [env.CtxDir] directly via [os.Getenv] (NOT through +// [rc.ContextDir]), compares, emits the box if drifted. +// +// # Why bypass rc.ContextDir +// +// `rc.ContextDir` is the operating shape validator and would +// reject inherited values that fail the basename guard. This +// hook is a diagnostic: it must accept any observed value +// (including unset, including non-canonical) so it can +// describe reality, not impose policy. +package check_anchor_drift diff --git a/internal/cli/system/cmd/check_anchor_drift/run.go b/internal/cli/system/cmd/check_anchor_drift/run.go new file mode 100644 index 000000000..df9691bba --- /dev/null +++ b/internal/cli/system/cmd/check_anchor_drift/run.go @@ -0,0 +1,73 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package check_anchor_drift + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/assets/read/desc" + "github.com/ActiveMemory/ctx/internal/cli/system/core/anchor" + "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" + coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" + "github.com/ActiveMemory/ctx/internal/config/embed/text" + "github.com/ActiveMemory/ctx/internal/config/env" + "github.com/ActiveMemory/ctx/internal/config/hook" +) + +// Run executes the check-anchor-drift hook logic. +// +// Reads the parent-shell CTX_DIR (snapshotted into +// [env.CtxDirInherited] before the standard hook injection) +// and the Claude-injected [env.CtxDir]. Emits a VERBATIM +// warning banner only when both are non-empty and refer to +// genuinely different directories. The banner goes through the +// standard nudge+relay path so the event is recorded in the +// local audit log. +// +// Bypasses [rc.ContextDir]: this is a diagnostic, not an +// operating command. It must accept any observed value +// (including unset, including non-canonical) so it can +// describe reality rather than impose policy. +// +// Symlink-equivalent paths are treated as the same directory +// via [anchor.Equal]. See its package doc for the rationale +// (the canonical case is macOS's `/tmp` → `/private/tmp`). +// +// Parameters: +// - cmd: cobra command for output. Nil is a no-op. +// - stdin: standard input for the hook JSON envelope. +// +// Returns: +// - error: always nil. Diagnostics never fail the hook. +func Run(cmd *cobra.Command, stdin *os.File) error { + inherited := os.Getenv(env.CtxDirInherited) + if inherited == "" { + // No shell-level declaration to drift from. + return nil + } + injected := os.Getenv(env.CtxDir) + if anchor.Equal(inherited, injected) { + // Correctly anchored (possibly via symlink-equivalent paths). + return nil + } + + content := fmt.Sprintf( + desc.Text(text.DescKeyCheckAnchorDriftContent), + inherited, injected, + ) + input := coreSession.ReadInput(stdin) + return nudge.Emit(cmd, content, + desc.Text(text.DescKeyCheckAnchorDriftRelayPrefix), + desc.Text(text.DescKeyCheckAnchorDriftBoxTitle), + hook.CheckAnchorDrift, hook.VariantNudge, + desc.Text(text.DescKeyCheckAnchorDriftRelayMessage), + input.SessionID, nil, "", + ) +} diff --git a/internal/cli/system/cmd/check_anchor_drift/run_test.go b/internal/cli/system/cmd/check_anchor_drift/run_test.go new file mode 100644 index 000000000..3e9d960ad --- /dev/null +++ b/internal/cli/system/cmd/check_anchor_drift/run_test.go @@ -0,0 +1,153 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package check_anchor_drift + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/cobra" + + "github.com/ActiveMemory/ctx/internal/config/env" +) + +// runDrift invokes Run with both env vars set to the given values +// and returns whatever the hook printed to stdout. +func runDrift(t *testing.T, inherited, injected string) string { + t.Helper() + t.Setenv(env.CtxDirInherited, inherited) + t.Setenv(env.CtxDir, injected) + + // Provide an empty stdin (no JSON envelope). ReadInput + // gracefully returns a zero-value HookInput when the body is + // empty, which the hook treats as "no session ID". + stdinPath := filepath.Join(t.TempDir(), "stdin") + if err := os.WriteFile(stdinPath, []byte("{}"), 0o600); err != nil { + t.Fatalf("write stdin: %v", err) + } + stdin, err := os.Open(stdinPath) + if err != nil { + t.Fatalf("open stdin: %v", err) + } + t.Cleanup(func() { _ = stdin.Close() }) + + c := &cobra.Command{} + var out bytes.Buffer + c.SetOut(&out) + c.SetErr(&out) + if runErr := Run(c, stdin); runErr != nil { + t.Fatalf("Run() err = %v, want nil", runErr) + } + return out.String() +} + +// TestCheckAnchorDrift_Match: inherited and injected values equal +// after filepath.Clean → silent (no banner emitted). +func TestCheckAnchorDrift_Match(t *testing.T) { + out := runDrift(t, "/project-a/.context", "/project-a/.context") + if out != "" { + t.Errorf("hook should be silent on match, got %q", out) + } +} + +// TestCheckAnchorDrift_MatchAfterClean: trailing slash on one +// side normalizes via filepath.Clean and comparison matches. +func TestCheckAnchorDrift_MatchAfterClean(t *testing.T) { + out := runDrift(t, "/project-a/.context/", "/project-a/.context") + if out != "" { + t.Errorf("hook should be silent on match after Clean, got %q", out) + } +} + +// TestCheckAnchorDrift_Mismatch: inherited points at project A, +// injected points at project B → emit banner naming both. +func TestCheckAnchorDrift_Mismatch(t *testing.T) { + out := runDrift(t, "/project-a/.context", "/project-b/.context") + if out == "" { + t.Fatal("hook should emit banner on mismatch") + } + if !strings.Contains(out, "/project-a/.context") { + t.Errorf("banner should name inherited path, got %q", out) + } + if !strings.Contains(out, "/project-b/.context") { + t.Errorf("banner should name injected path, got %q", out) + } + if !strings.Contains(out, "Anchor Drift") { + t.Errorf("banner should carry the box title, got %q", out) + } +} + +// TestCheckAnchorDrift_InheritedEmpty: user has not run +// `ctx activate`; no shell-level declaration to drift from → +// silent regardless of injected value. +func TestCheckAnchorDrift_InheritedEmpty(t *testing.T) { + out := runDrift(t, "", "/project-a/.context") + if out != "" { + t.Errorf("hook should be silent when inherited is empty, got %q", out) + } +} + +// TestCheckAnchorDrift_AcceptsNonCanonicalInherited: the hook is a +// diagnostic — it must accept any inherited value (including +// non-canonical) so it can describe reality, not impose policy. +// Verifies the hook bypasses rc.ContextDir's basename guard. +func TestCheckAnchorDrift_AcceptsNonCanonicalInherited(t *testing.T) { + out := runDrift(t, + "/some/random/path", "/project-a/.context", + ) + if out == "" { + t.Fatal("hook should emit banner on mismatch even with non-canonical inherited") + } + if !strings.Contains(out, "/some/random/path") { + t.Errorf("banner should name inherited path verbatim, got %q", out) + } +} + +// TestCheckAnchorDrift_SymlinkEquivalent: paths that differ +// byte-for-byte but resolve to the same directory via a symlink +// (the canonical macOS case: `/tmp` → `/private/tmp`) must NOT +// trip the drift alarm. The smoke-test surfacing this case +// blocked step 9; without symlink resolution the banner fires +// every prompt for any session run from `/tmp/*` on macOS, and +// for any user with a symlinked workspace path elsewhere. +func TestCheckAnchorDrift_SymlinkEquivalent(t *testing.T) { + tempDir := t.TempDir() + target := filepath.Join(tempDir, "target", ".context") + if err := os.MkdirAll(target, 0o700); err != nil { + t.Fatalf("mkdir target: %v", err) + } + link := filepath.Join(tempDir, "link") + if err := os.Symlink(filepath.Join(tempDir, "target"), link); err != nil { + t.Skipf("symlink unsupported: %v", err) + } + linkedContext := filepath.Join(link, ".context") + + // `target` and `linkedContext` differ as strings but resolve + // to the same directory. Hook should be silent. + out := runDrift(t, linkedContext, target) + if out != "" { + t.Errorf("hook should be silent for symlink-equivalent paths, got %q", out) + } +} + +// TestCheckAnchorDrift_SymlinkResolutionFails_FallsBackToString: +// when the inherited path can't be resolved (e.g. it points at a +// deleted directory), genuine drift should still fire. Defends +// against an over-eager symlink fix that would silently swallow +// real misalignment. +func TestCheckAnchorDrift_SymlinkResolutionFails_FallsBackToString(t *testing.T) { + out := runDrift(t, + "/definitely/does/not/exist/.context", + "/project-b/.context", + ) + if out == "" { + t.Fatal("hook should still fire when inherited resolution fails") + } +} diff --git a/internal/cli/system/cmd/check_anchor_drift/testmain_test.go b/internal/cli/system/cmd/check_anchor_drift/testmain_test.go new file mode 100644 index 000000000..7501d6560 --- /dev/null +++ b/internal/cli/system/cmd/check_anchor_drift/testmain_test.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package check_anchor_drift + +import ( + "os" + "testing" + + "github.com/ActiveMemory/ctx/internal/assets/read/lookup" +) + +// TestMain initializes the embedded text-asset lookup so the hook's +// box title and content templates resolve instead of returning +// empty strings. +func TestMain(m *testing.M) { + lookup.Init() + os.Exit(m.Run()) +} diff --git a/internal/cli/system/cmd/check_backup_age/doc.go b/internal/cli/system/cmd/check_backup_age/doc.go deleted file mode 100644 index 94d72b995..000000000 --- a/internal/cli/system/cmd/check_backup_age/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package check_backup_age implements the -// **`ctx system check-backup-age`** hidden hook, which -// warns when project backups are stale or the backup -// share is unreachable. -// -// # What It Does -// -// The hook runs two checks at session start: -// -// 1. **SMB mount check**: if the CTX_BACKUP_SMB_URL -// environment variable is set, verifies that the -// SMB share is currently mounted. If not, adds a -// warning. -// 2. **Backup marker freshness**: reads the backup -// marker file (~/.ctx-backup/last-backup) and warns -// if the last backup timestamp exceeds the maximum -// age threshold. -// -// When either check produces warnings, the hook emits -// a nudge box via the relay channel and touches a -// daily throttle file to avoid repeated alerts. -// -// # Input -// -// A JSON hook envelope on stdin with session metadata. -// -// # Output -// -// On warning: a formatted nudge box listing the backup -// issues. On success or throttled: no output. -// -// # Throttling -// -// The hook is throttled to fire at most once per day -// using a marker file in the state directory. -// -// # Delegation -// -// [Cmd] builds the hidden cobra command. [Run] reads -// stdin via [core/check.Preamble], delegates the SMB -// and marker checks to [core/archive], and emits -// warnings through [core/nudge.LoadAndEmit]. -package check_backup_age diff --git a/internal/cli/system/cmd/check_backup_age/run.go b/internal/cli/system/cmd/check_backup_age/run.go deleted file mode 100644 index 211dc04b4..000000000 --- a/internal/cli/system/cmd/check_backup_age/run.go +++ /dev/null @@ -1,93 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package check_backup_age - -import ( - "os" - "path/filepath" - - "github.com/spf13/cobra" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - coreArchive "github.com/ActiveMemory/ctx/internal/cli/system/core/archive" - coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" - "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" - "github.com/ActiveMemory/ctx/internal/config/archive" - "github.com/ActiveMemory/ctx/internal/config/embed/text" - "github.com/ActiveMemory/ctx/internal/config/env" - "github.com/ActiveMemory/ctx/internal/config/hook" - "github.com/ActiveMemory/ctx/internal/config/token" -) - -// Run executes the check-backup-age hook logic. -// -// Reads a hook input from stdin, checks whether the SMB share is mounted -// and whether the backup marker file is fresh, then emits a relay warning -// if any issue is detected. Throttled to once per day. -// -// Parameters: -// - cmd: Cobra command for output -// - stdin: standard input for hook JSON -// -// Returns: -// - error: Always nil (hook errors are non-fatal) -func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - tmpDir := state.Dir() - throttleFile := filepath.Join(tmpDir, archive.BackupThrottleID) - - if coreCheck.DailyThrottled(throttleFile) { - return nil - } - - home, homeErr := os.UserHomeDir() - if homeErr != nil { - return nil - } - - var warnings []string - - // Check 1: Is the SMB share mounted? - if smbURL := os.Getenv(env.BackupSMBURL); smbURL != "" { - warnings = coreArchive.CheckSMBMountWarnings(smbURL, warnings) - } - - // Check 2: Is the backup stale? - markerPath := filepath.Join( - home, archive.BackupMarkerDir, archive.BackupMarkerFile, - ) - warnings = coreArchive.CheckBackupMarker(markerPath, warnings) - - if len(warnings) == 0 { - return nil - } - - // Build pre-formatted warnings for the template variable - var warningText string - for _, w := range warnings { - warningText += w + token.NewlineLF - } - - vars := map[string]any{archive.VarWarnings: warningText} - nudge.LoadAndEmit(cmd, - hook.CheckBackupAge, hook.VariantWarning, vars, warningText, - desc.Text(text.DescKeyBackupRelayPrefix), - desc.Text(text.DescKeyBackupBoxTitle), - desc.Text(text.DescKeyBackupRelayMessage), - input.SessionID, throttleFile, - ) - - return nil -} diff --git a/internal/cli/system/cmd/check_ceremony/run.go b/internal/cli/system/cmd/check_ceremony/run.go index 0b4ef7c5d..215e5d255 100644 --- a/internal/cli/system/cmd/check_ceremony/run.go +++ b/internal/cli/system/cmd/check_ceremony/run.go @@ -17,11 +17,10 @@ import ( coreCeremony "github.com/ActiveMemory/ctx/internal/cli/system/core/ceremony" coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/ceremony" + "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" - ctxResolve "github.com/ActiveMemory/ctx/internal/context/resolve" internalIo "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/notify" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" @@ -40,23 +39,19 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + input, _, ctxDir, stateDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - remindedFile := filepath.Join(state.Dir(), ceremony.ThrottleID) - + remindedFile := filepath.Join(stateDir, ceremony.ThrottleID) if coreCheck.DailyThrottled(remindedFile) { return nil } + journalDir := filepath.Join(ctxDir, dir.Journal) files := coreCeremony.RecentJournalFiles( - ctxResolve.JournalDir(), ceremony.JournalLookback, + journalDir, ceremony.JournalLookback, ) if len(files) == 0 { @@ -75,10 +70,17 @@ func Run(cmd *cobra.Command, stdin *os.File) error { return nil } ref := notify.NewTemplateRef(hook.CheckCeremony, variant, nil) - nudge.EmitAndRelay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), - hook.CheckCeremony, desc.Text(text.DescKeyCeremonyRelayMessage)), + emitErr := nudge.EmitAndRelay( + fmt.Sprintf( + desc.Text(text.DescKeyRelayPrefixFormat), + hook.CheckCeremony, + desc.Text(text.DescKeyCeremonyRelayMessage), + ), input.SessionID, ref, ) + if emitErr != nil { + return emitErr + } internalIo.TouchFile(remindedFile) return nil } diff --git a/internal/cli/system/cmd/check_context_size/run.go b/internal/cli/system/cmd/check_context_size/run.go index 160ed4525..9796b50e4 100644 --- a/internal/cli/system/cmd/check_context_size/run.go +++ b/internal/cli/system/cmd/check_context_size/run.go @@ -26,8 +26,10 @@ import ( "github.com/ActiveMemory/ctx/internal/config/event" "github.com/ActiveMemory/ctx/internal/config/session" "github.com/ActiveMemory/ctx/internal/config/stats" + "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/entity" "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) @@ -46,7 +48,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } input := coreSession.ReadInput(stdin) @@ -61,9 +68,22 @@ func Run(cmd *cobra.Command, stdin *os.File) error { return nil } - tmpDir := state.Dir() + tmpDir, dirErr := state.Dir() + if dirErr != nil { + logWarn.Warn(warn.StateDirProbe, dirErr) + return nil + } counterFile := filepath.Join(tmpDir, stats.ContextSizeCounterPrefix+sessionID) - logFile := filepath.Join(rc.ContextDir(), dir.Logs, stats.ContextSizeLogFile) + // Unreachable under normal flow: state.Initialized() above already + // proved ContextDir succeeds. Kept defensive so a future ContextDir + // failure mode lands on stderr instead of silently skipping the + // hook. + ctxDir, err := rc.ContextDir() + if err != nil { + logWarn.Warn(warn.ContextDirResolve, err) + return nil + } + logFile := filepath.Join(ctxDir, dir.Logs, stats.ContextSizeLogFile) // Increment counter count := counter.Read(counterFile) + 1 @@ -86,12 +106,14 @@ func Run(cmd *cobra.Command, stdin *os.File) error { billingHit := billingThreshold > 0 && tokens >= billingThreshold if billingHit { - writeSetup.NudgeBlock(cmd, - nudge.EmitBillingWarning( - logFile, sessionID, - count, tokens, billingThreshold, - ), + box, billingErr := nudge.EmitBillingWarning( + logFile, sessionID, + count, tokens, billingThreshold, ) + if billingErr != nil { + return billingErr + } + writeSetup.NudgeBlock(cmd, box) } // Wrap-up suppression: if the user recently ran /ctx-wrap-up, @@ -104,7 +126,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { fmt.Sprintf( desc.Text(text.DescKeyCheckContextSizeSuppressedLogFormat), count), ) - coreSession.WriteStats(sessionID, entity.Stats{ + return coreSession.WriteStats(sessionID, entity.Stats{ Timestamp: time.Now().Format(time.RFC3339), Prompt: count, Tokens: tokens, @@ -113,7 +135,6 @@ func Run(cmd *cobra.Command, stdin *os.File) error { Model: info.Model, Event: event.Suppressed, }) - return nil } // Percentage-based triggers: checkpoint at 60% (one-shot), @@ -132,19 +153,23 @@ func Run(cmd *cobra.Command, stdin *os.File) error { evt := trigger.Event switch { case trigger.Window: - writeSetup.NudgeBlock(cmd, - nudge.EmitWindowWarning( - logFile, sessionID, - count, tokens, pct, - ), + box, windowErr := nudge.EmitWindowWarning( + logFile, sessionID, + count, tokens, pct, ) + if windowErr != nil { + return windowErr + } + writeSetup.NudgeBlock(cmd, box) case trigger.Checkpoint: - writeSetup.NudgeBlock(cmd, - nudge.EmitCheckpoint( - logFile, sessionID, - count, tokens, pct, windowSize, - ), + box, checkpointErr := nudge.EmitCheckpoint( + logFile, sessionID, ctxDir, + count, tokens, pct, windowSize, ) + if checkpointErr != nil { + return checkpointErr + } + writeSetup.NudgeBlock(cmd, box) default: log.Message(logFile, sessionID, fmt.Sprintf(desc.Text( @@ -152,7 +177,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { ) } - coreSession.WriteStats(sessionID, entity.Stats{ + return coreSession.WriteStats(sessionID, entity.Stats{ Timestamp: time.Now().Format(time.RFC3339), Prompt: count, Tokens: tokens, @@ -161,6 +186,4 @@ func Run(cmd *cobra.Command, stdin *os.File) error { Model: info.Model, Event: evt, }) - - return nil } diff --git a/internal/cli/system/cmd/check_freshness/run.go b/internal/cli/system/cmd/check_freshness/run.go index f1846d571..3d738a2d8 100644 --- a/internal/cli/system/cmd/check_freshness/run.go +++ b/internal/cli/system/cmd/check_freshness/run.go @@ -17,7 +17,6 @@ import ( coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/drift" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/freshness" "github.com/ActiveMemory/ctx/internal/config/hook" @@ -43,19 +42,17 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - files := rc.FreshnessFiles() if len(files) == 0 { return nil } - tmpDir := state.Dir() + input, _, _, tmpDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { + return nil + } throttleFile := filepath.Join(tmpDir, freshness.ThrottleID) - if coreCheck.DailyThrottled(throttleFile) { return nil } @@ -97,13 +94,11 @@ func Run(cmd *cobra.Command, stdin *os.File) error { staleText := drift.FormatStaleEntries(staleEntries) vars := map[string]any{freshness.VarStaleFiles: staleText} - nudge.LoadAndEmit(cmd, + return nudge.LoadAndEmit(cmd, hook.CheckFreshness, hook.VariantStale, vars, staleText, desc.Text(text.DescKeyFreshnessRelayPrefix), desc.Text(text.DescKeyFreshnessBoxTitle), desc.Text(text.DescKeyFreshnessRelayMessage), input.SessionID, throttleFile, ) - - return nil } diff --git a/internal/cli/system/cmd/check_hub_sync/run.go b/internal/cli/system/cmd/check_hub_sync/run.go index 617c963f3..87e02ee81 100644 --- a/internal/cli/system/cmd/check_hub_sync/run.go +++ b/internal/cli/system/cmd/check_hub_sync/run.go @@ -14,9 +14,10 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/hubsync" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" cfgHub "github.com/ActiveMemory/ctx/internal/config/hub" + "github.com/ActiveMemory/ctx/internal/config/warn" internalIo "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) @@ -33,22 +34,22 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + _, sessionID, ctxDir, stateDir, ok := check.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - _, sessionID, paused := check.Preamble(stdin) - if paused { + connected, connErr := hubsync.Connected(ctxDir) + if connErr != nil { + logWarn.Warn(warn.HubConnectedProbe, connErr) return nil } - - if !hubsync.Connected() { + if !connected { return nil } - markerPath := filepath.Join( - state.Dir(), cfgHub.ThrottleHubSync, - ) + markerPath := filepath.Join(stateDir, cfgHub.ThrottleHubSync) if check.DailyThrottled(markerPath) { return nil } diff --git a/internal/cli/system/cmd/check_journal/run.go b/internal/cli/system/cmd/check_journal/run.go index 10a699167..0a2f58c0b 100644 --- a/internal/cli/system/cmd/check_journal/run.go +++ b/internal/cli/system/cmd/check_journal/run.go @@ -18,13 +18,12 @@ import ( coreJournal "github.com/ActiveMemory/ctx/internal/cli/system/core/journal" "github.com/ActiveMemory/ctx/internal/cli/system/core/message" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" + "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/journal" - ctxResolve "github.com/ActiveMemory/ctx/internal/context/resolve" internalIo "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/notify" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" @@ -43,15 +42,11 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + input, _, ctxDir, tmpDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - tmpDir := state.Dir() remindedFile := filepath.Join(tmpDir, journal.ThrottleID) claudeProjectsDir := filepath.Join( os.Getenv(env.Home), journal.ClaudeProjectsSubdir, @@ -63,7 +58,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { } // Bail out if journal or Claude projects directories don't exist - jDir := ctxResolve.JournalDir() + jDir := filepath.Join(ctxDir, dir.Journal) if _, statErr := os.Stat(jDir); os.IsNotExist(statErr) { return nil } @@ -126,7 +121,10 @@ func Run(cmd *cobra.Command, stdin *os.File) error { desc.Text(text.DescKeyCheckJournalRelayFormat), unimported, unenriched, )) - nudge.EmitAndRelay(journalMsg, input.SessionID, ref) + emitErr := nudge.EmitAndRelay(journalMsg, input.SessionID, ref) + if emitErr != nil { + return emitErr + } internalIo.TouchFile(remindedFile) return nil diff --git a/internal/cli/system/cmd/check_knowledge/run.go b/internal/cli/system/cmd/check_knowledge/run.go index 3d555fcc3..f00bb56fd 100644 --- a/internal/cli/system/cmd/check_knowledge/run.go +++ b/internal/cli/system/cmd/check_knowledge/run.go @@ -14,7 +14,6 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/check" coreKnowledge "github.com/ActiveMemory/ctx/internal/cli/system/core/knowledge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/knowledge" internalIo "github.com/ActiveMemory/ctx/internal/io" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" @@ -34,21 +33,21 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + _, sessionID, ctxDir, stateDir, ok := check.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - - _, sessionID, paused := check.Preamble(stdin) - if paused { - return nil - } - - markerPath := filepath.Join(state.Dir(), knowledge.ThrottleID) + markerPath := filepath.Join(stateDir, knowledge.ThrottleID) if check.DailyThrottled(markerPath) { return nil } - if box, warned := coreKnowledge.CheckHealth(sessionID); warned { + box, warned, checkErr := coreKnowledge.CheckHealth(sessionID, ctxDir) + if checkErr != nil { + return checkErr + } + if warned { writeSetup.Nudge(cmd, box) internalIo.TouchFile(markerPath) } diff --git a/internal/cli/system/cmd/check_map_staleness/run.go b/internal/cli/system/cmd/check_map_staleness/run.go index 6f79b63c8..70f8cd518 100644 --- a/internal/cli/system/cmd/check_map_staleness/run.go +++ b/internal/cli/system/cmd/check_map_staleness/run.go @@ -15,10 +15,11 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/health" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/architecture" cfgTime "github.com/ActiveMemory/ctx/internal/config/time" + "github.com/ActiveMemory/ctx/internal/config/warn" internalIo "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) @@ -36,22 +37,23 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - - input, _, paused := check.Preamble(stdin) - if paused { + input, _, ctxDir, stateDir, ok := check.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } markerPath := filepath.Join( - state.Dir(), architecture.MapStalenessThrottleID, + stateDir, architecture.MapStalenessThrottleID, ) if check.DailyThrottled(markerPath) { return nil } - info := health.ReadMapTracking() + info, readErr := health.ReadMapTracking(ctxDir) + if readErr != nil { + logWarn.Warn(warn.ReadMapTracking, readErr) + return nil + } if info == nil || info.OptedOut { return nil } @@ -74,9 +76,13 @@ func Run(cmd *cobra.Command, stdin *os.File) error { } dateStr := lastRun.Format(cfgTime.DateFormat) - writeSetup.Nudge(cmd, health.EmitMapStalenessWarning( + box, emitErr := health.EmitMapStalenessWarning( input.SessionID, dateStr, moduleCommits, - )) + ) + if emitErr != nil { + return emitErr + } + writeSetup.Nudge(cmd, box) internalIo.TouchFile(markerPath) diff --git a/internal/cli/system/cmd/check_memory_drift/run.go b/internal/cli/system/cmd/check_memory_drift/run.go index c6eebf2aa..7afec9cf3 100644 --- a/internal/cli/system/cmd/check_memory_drift/run.go +++ b/internal/cli/system/cmd/check_memory_drift/run.go @@ -15,11 +15,9 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/memory" - "github.com/ActiveMemory/ctx/internal/rc" ) // Run executes the check-memory-drift hook logic. @@ -31,24 +29,20 @@ import ( // Returns: // - error: Non-nil if the drift check encounters an unrecoverable error func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - - input, sessionID, paused := coreCheck.Preamble(stdin) - if paused { + input, sessionID, contextDir, stateDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } // Session tombstone: nudge once per session, per session ID tombstone := filepath.Join( - state.Dir(), hook.PrefixMemoryDriftThrottle+sessionID, + stateDir, hook.PrefixMemoryDriftThrottle+sessionID, ) if _, statErr := os.Stat(tombstone); statErr == nil { return nil } - contextDir := rc.ContextDir() projectRoot := filepath.Dir(contextDir) sourcePath, discoverErr := memory.DiscoverPath(projectRoot) @@ -61,7 +55,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { return nil } - nudge.LoadAndEmit(cmd, + return nudge.LoadAndEmit(cmd, hook.CheckMemoryDrift, hook.VariantNudge, nil, desc.Text(text.DescKeyCheckMemoryDriftContent), desc.Text(text.DescKeyCheckMemoryDriftRelayPrefix), @@ -69,6 +63,4 @@ func Run(cmd *cobra.Command, stdin *os.File) error { desc.Text(text.DescKeyCheckMemoryDriftRelayMessage), input.SessionID, tombstone, ) - - return nil } diff --git a/internal/cli/system/cmd/check_persistence/run.go b/internal/cli/system/cmd/check_persistence/run.go index 372d71483..e6e481fa9 100644 --- a/internal/cli/system/cmd/check_persistence/run.go +++ b/internal/cli/system/cmd/check_persistence/run.go @@ -20,7 +20,6 @@ import ( coreNudge "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" "github.com/ActiveMemory/ctx/internal/cli/system/core/persistence" coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/cli/system/core/time" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/text" @@ -28,7 +27,6 @@ import ( "github.com/ActiveMemory/ctx/internal/config/nudge" "github.com/ActiveMemory/ctx/internal/config/stats" "github.com/ActiveMemory/ctx/internal/notify" - "github.com/ActiveMemory/ctx/internal/rc" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) @@ -45,17 +43,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + _, sessionID, contextDir, tmpDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - _, sessionID, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - tmpDir := state.Dir() stateFile := filepath.Join(tmpDir, nudge.PersistencePrefix+sessionID) - contextDir := rc.ContextDir() logFile := filepath.Join(contextDir, dir.Logs, nudge.PersistenceLogFile) // Initialize state if needed @@ -140,7 +133,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { nudge.VarSinceNudge: sinceNudge, }, ) - _ = notify.Send( + sendErr := notify.Send( hook.NotifyChannelNudge, fmt.Sprintf( desc.Text(text.DescKeyRelayPrefixFormat), @@ -152,7 +145,10 @@ func Run(cmd *cobra.Command, stdin *os.File) error { ), sessionID, ref, ) - coreNudge.Relay( + if sendErr != nil { + return sendErr + } + relayErr := coreNudge.Relay( fmt.Sprintf( desc.Text(text.DescKeyRelayPrefixFormat), hook.CheckPersistence, @@ -162,6 +158,9 @@ func Run(cmd *cobra.Command, stdin *os.File) error { ), sessionID, ref, ) + if relayErr != nil { + return relayErr + } ps.LastNudge = ps.Count } else { log.Message( diff --git a/internal/cli/system/cmd/check_reminder/run.go b/internal/cli/system/cmd/check_reminder/run.go index 511dad668..cf74248ac 100644 --- a/internal/cli/system/cmd/check_reminder/run.go +++ b/internal/cli/system/cmd/check_reminder/run.go @@ -24,6 +24,8 @@ import ( "github.com/ActiveMemory/ctx/internal/config/reminder" cfgTime "github.com/ActiveMemory/ctx/internal/config/time" "github.com/ActiveMemory/ctx/internal/config/token" + "github.com/ActiveMemory/ctx/internal/config/warn" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" ) // Run executes the check-reminders hook logic. @@ -46,7 +48,12 @@ func Run(cmd *cobra.Command, stdin *os.File) error { // regardless of initialized/paused state. coreProv.Emit(cmd, input.SessionID) - if !state.Initialized() || paused { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized || paused { return nil } @@ -86,13 +93,11 @@ func Run(cmd *cobra.Command, stdin *os.File) error { desc.Text(text.DescKeyCheckReminderNudgeFormat), len(due), ) - nudge.LoadAndEmit(cmd, + return nudge.LoadAndEmit(cmd, hook.CheckReminder, hook.VariantReminders, vars, fallback, desc.Text(text.DescKeyCheckReminderRelayPrefix), desc.Text(text.DescKeyCheckReminderBoxTitle), relayMsg, input.SessionID, "", ) - - return nil } diff --git a/internal/cli/system/cmd/check_resource/run.go b/internal/cli/system/cmd/check_resource/run.go index bfa578df6..08cc354e1 100644 --- a/internal/cli/system/cmd/check_resource/run.go +++ b/internal/cli/system/cmd/check_resource/run.go @@ -14,7 +14,6 @@ import ( "github.com/ActiveMemory/ctx/internal/assets/read/desc" coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/stats" @@ -35,11 +34,9 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, _, paused := coreCheck.Preamble(stdin) - if paused { + input, _, _, _, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } @@ -67,7 +64,7 @@ func Run(cmd *cobra.Command, stdin *os.File) error { desc.Text( text.DescKeyCheckResourceFallbackEnd) vars := map[string]any{stats.VarAlertMessages: alertMessages} - nudge.LoadAndEmit(cmd, + return nudge.LoadAndEmit(cmd, hook.CheckResource, hook.VariantAlert, vars, fallback, desc.Text(text.DescKeyCheckResourceRelayPrefix), @@ -75,6 +72,4 @@ func Run(cmd *cobra.Command, stdin *os.File) error { desc.Text(text.DescKeyCheckResourceRelayMessage), input.SessionID, "", ) - - return nil } diff --git a/internal/cli/system/cmd/check_skill_discovery/run.go b/internal/cli/system/cmd/check_skill_discovery/run.go index 76fc855b3..8765a19a0 100644 --- a/internal/cli/system/cmd/check_skill_discovery/run.go +++ b/internal/cli/system/cmd/check_skill_discovery/run.go @@ -16,7 +16,6 @@ import ( coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/counter" "github.com/ActiveMemory/ctx/internal/cli/system/core/message" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" cfgHook "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/stats" @@ -37,18 +36,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + _, sessionID, _, tmpDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - sessionID := input.SessionID - tmpDir := state.Dir() - // One-shot guard: skip if already fired this session. guardFile := filepath.Join( tmpDir, diff --git a/internal/cli/system/cmd/check_task_completion/run.go b/internal/cli/system/cmd/check_task_completion/run.go index dc8255588..69a919246 100644 --- a/internal/cli/system/cmd/check_task_completion/run.go +++ b/internal/cli/system/cmd/check_task_completion/run.go @@ -19,7 +19,6 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/message" coreNudge "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/nudge" @@ -41,20 +40,16 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, sessionID, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - interval := rc.TaskNudgeInterval() if interval <= 0 { return nil } - - counterPath := filepath.Join(state.Dir(), nudge.PrefixTask+sessionID) + input, sessionID, _, stateDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { + return nil + } + counterPath := filepath.Join(stateDir, nudge.PrefixTask+sessionID) count := counter.Read(counterPath) count++ @@ -81,10 +76,8 @@ func Run(cmd *cobra.Command, stdin *os.File) error { ref := notify.NewTemplateRef( hook.CheckTaskCompletion, hook.VariantNudge, nil, ) - coreNudge.Relay( + return coreNudge.Relay( fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), hook.CheckTaskCompletion, nudgeMsg), input.SessionID, ref, ) - - return nil } diff --git a/internal/cli/system/cmd/check_version/run.go b/internal/cli/system/cmd/check_version/run.go index 1bef49f25..2c386837e 100644 --- a/internal/cli/system/cmd/check_version/run.go +++ b/internal/cli/system/cmd/check_version/run.go @@ -18,7 +18,6 @@ import ( coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/message" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" coreVersion "github.com/ActiveMemory/ctx/internal/cli/system/core/version" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" @@ -41,18 +40,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + input, _, _, tmpDir, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } - - input, _, paused := coreCheck.Preamble(stdin) - if paused { - return nil - } - - tmpDir := state.Dir() markerFile := filepath.Join(tmpDir, version.ThrottleID) - if coreCheck.DailyThrottled(markerFile) { return nil } @@ -116,12 +109,18 @@ func Run(cmd *cobra.Command, stdin *os.File) error { hook.CheckVersion, fmt.Sprintf( desc.Text(text.DescKeyCheckVersionMismatchRelayFormat), binaryVer, pluginVer)) - nudge.EmitAndRelay(versionMsg, input.SessionID, ref) + if err := nudge.EmitAndRelay(versionMsg, input.SessionID, ref); err != nil { + return err + } internalIo.TouchFile(markerFile) - // Key age check: piggyback on the daily version check - writeSetup.Nudge(cmd, coreVersion.CheckKeyAge(input.SessionID)) + // Key age check: piggyback on the daily version check. + keyBox, keyErr := coreVersion.CheckKeyAge(input.SessionID) + if keyErr != nil { + return keyErr + } + writeSetup.Nudge(cmd, keyBox) return nil } diff --git a/internal/cli/system/cmd/context_load_gate/run.go b/internal/cli/system/cmd/context_load_gate/run.go index 59d108c37..1acc96aae 100644 --- a/internal/cli/system/cmd/context_load_gate/run.go +++ b/internal/cli/system/cmd/context_load_gate/run.go @@ -28,9 +28,11 @@ import ( "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/load_gate" "github.com/ActiveMemory/ctx/internal/config/token" + "github.com/ActiveMemory/ctx/internal/config/warn" ctxToken "github.com/ActiveMemory/ctx/internal/context/token" "github.com/ActiveMemory/ctx/internal/entity" internalIo "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) @@ -51,7 +53,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } @@ -64,7 +71,11 @@ func Run(cmd *cobra.Command, stdin *os.File) error { return nil } - tmpDir := state.Dir() + tmpDir, dirErr := state.Dir() + if dirErr != nil { + logWarn.Warn(warn.StateDirProbe, dirErr) + return nil + } marker := filepath.Join(tmpDir, load_gate.PrefixCtxLoaded+input.SessionID) if _, statErr := os.Stat(marker); statErr == nil { @@ -79,7 +90,14 @@ func Run(cmd *cobra.Command, stdin *os.File) error { // Runs once per session at startup - fast directory scan. health.AutoPrune(load_gate.AutoPruneStaleDays) - dir := rc.ContextDir() + // Unreachable under normal flow: state.Initialized() above already + // proved ContextDir succeeds. Kept defensive so a future ContextDir + // failure mode surfaces loudly instead of silently hanging the gate. + dir, err := rc.ContextDir() + if err != nil { + logWarn.Warn(warn.ContextDirResolve, err) + return nil + } var content strings.Builder var totalTokens int var filesLoaded int @@ -133,11 +151,19 @@ func Run(cmd *cobra.Command, stdin *os.File) error { cmd, coreSession.FormatContext(hook.EventPreToolUse, content.String()), ) - // Webhook: metadata only - never send file content externally + // Webhook: metadata only - never send file content externally. + // Log-first: Relay writes the event log then sends the webhook; + // if either fails, the oversize flag is NOT written; we do not + // want an oversize nudge to fire for a gate event we never + // recorded. webhookMsg := fmt.Sprintf( desc.Text(text.DescKeyContextLoadGateWebhook), filesLoaded, totalTokens) - nudge.Relay(webhookMsg, input.SessionID, nil) + if relayErr := nudge.Relay( + webhookMsg, input.SessionID, nil, + ); relayErr != nil { + return relayErr + } // Oversize nudge: write the flag for check-context-size to pick up load.WriteOversizeFlag(dir, totalTokens, perFile) diff --git a/internal/cli/system/cmd/heartbeat/run.go b/internal/cli/system/cmd/heartbeat/run.go index f959f3692..db662d149 100644 --- a/internal/cli/system/cmd/heartbeat/run.go +++ b/internal/cli/system/cmd/heartbeat/run.go @@ -26,7 +26,9 @@ import ( "github.com/ActiveMemory/ctx/internal/config/heartbeat" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/stats" + "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/log/event" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/notify" "github.com/ActiveMemory/ctx/internal/rc" ) @@ -45,7 +47,12 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(_ *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } _, sessionID, paused := coreCheck.Preamble(stdin) @@ -53,14 +60,25 @@ func Run(_ *cobra.Command, stdin *os.File) error { return nil } - tmpDir := state.Dir() + tmpDir, dirErr := state.Dir() + if dirErr != nil { + logWarn.Warn(warn.StateDirProbe, dirErr) + return nil + } counterFile := filepath.Join( tmpDir, heartbeat.CounterPrefix+sessionID, ) mtimeFile := filepath.Join( tmpDir, heartbeat.MtimePrefix+sessionID, ) - contextDir := rc.ContextDir() + // Unreachable under normal flow: state.Initialized() above already + // proved ContextDir succeeds. Kept defensive so a future ContextDir + // failure surfaces instead of the heartbeat silently going dark. + contextDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + logWarn.Warn(warn.ContextDirResolve, ctxErr) + return nil + } logFile := filepath.Join(contextDir, dir.Logs, heartbeat.LogFile) // Increment prompt counter. @@ -101,8 +119,22 @@ func Run(_ *cobra.Command, stdin *os.File) error { msg = fmt.Sprintf(desc.Text(text.DescKeyHeartbeatNotifyPlain), count, contextModified) } - _ = notify.Send(hook.NotifyChannelHeartbeat, msg, sessionID, ref) - event.Append(hook.NotifyChannelHeartbeat, msg, sessionID, ref) + // Log-first: if the event log cannot be written, neither the + // webhook nor the operational log line should claim the + // heartbeat happened. See docs/security/reporting.md → + // "Log-First Audit Trail". + appendErr := event.Append( + hook.NotifyChannelHeartbeat, msg, sessionID, ref, + ) + if appendErr != nil { + return appendErr + } + sendErr := notify.Send( + hook.NotifyChannelHeartbeat, msg, sessionID, ref, + ) + if sendErr != nil { + return sendErr + } var logLine string if tokens > 0 { diff --git a/internal/cli/system/cmd/mark_journal/run.go b/internal/cli/system/cmd/mark_journal/run.go index 0e40bc227..4b219db69 100644 --- a/internal/cli/system/cmd/mark_journal/run.go +++ b/internal/cli/system/cmd/mark_journal/run.go @@ -12,6 +12,8 @@ import ( coreJournal "github.com/ActiveMemory/ctx/internal/cli/system/core/journal" "github.com/ActiveMemory/ctx/internal/cli/system/core/state" cFlag "github.com/ActiveMemory/ctx/internal/config/flag" + "github.com/ActiveMemory/ctx/internal/config/warn" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" writeJournal "github.com/ActiveMemory/ctx/internal/write/mark_journal" ) @@ -28,7 +30,12 @@ import ( // Returns: // - error: Non-nil on state load/save failure or unknown stage func Run(cmd *cobra.Command, filename, stage string) error { - if !state.Initialized() { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } diff --git a/internal/cli/system/cmd/mark_wrapped_up/run.go b/internal/cli/system/cmd/mark_wrapped_up/run.go index b31b9b359..e9998c253 100644 --- a/internal/cli/system/cmd/mark_wrapped_up/run.go +++ b/internal/cli/system/cmd/mark_wrapped_up/run.go @@ -13,8 +13,10 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/fs" + "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/config/wrap" ctxIo "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/write/session" ) @@ -29,11 +31,21 @@ import ( // Returns: // - error: Non-nil if the marker file cannot be written func Run(cmd *cobra.Command) error { - if !state.Initialized() { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } - markerPath := filepath.Join(state.Dir(), wrap.Marker) + stateDir, dirErr := state.Dir() + if dirErr != nil { + logWarn.Warn(warn.StateDirProbe, dirErr) + return nil + } + markerPath := filepath.Join(stateDir, wrap.Marker) if writeErr := ctxIo.SafeWriteFile( markerPath, []byte(wrap.Content), fs.PermSecret, diff --git a/internal/cli/system/cmd/pause/run.go b/internal/cli/system/cmd/pause/run.go index 7f3d36b38..46b498b9f 100644 --- a/internal/cli/system/cmd/pause/run.go +++ b/internal/cli/system/cmd/pause/run.go @@ -11,12 +11,10 @@ import ( "github.com/spf13/cobra" - "github.com/ActiveMemory/ctx/internal/cli/system/core/counter" + coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" - cFlag "github.com/ActiveMemory/ctx/internal/config/flag" - "github.com/ActiveMemory/ctx/internal/config/session" + "github.com/ActiveMemory/ctx/internal/config/warn" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" writePause "github.com/ActiveMemory/ctx/internal/write/pause" ) @@ -33,21 +31,14 @@ import ( // Returns: // - error: Always nil func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + sessionID, ok := coreCheck.PausePreamble(cmd, stdin) + if !ok { return nil } - - sessionID, _ := cmd.Flags().GetString(cFlag.SessionID) - if sessionID == "" { - input := coreSession.ReadInput(stdin) - sessionID = input.SessionID - } - if sessionID == "" { - sessionID = session.IDUnknown + if pauseErr := nudge.Pause(sessionID); pauseErr != nil { + logWarn.Warn(warn.StateDirProbe, pauseErr) + return nil } - - path := nudge.PauseMarkerPath(sessionID) - counter.Write(path, 0) writePause.Confirmed(cmd, sessionID) return nil } diff --git a/internal/cli/system/cmd/post_commit/run.go b/internal/cli/system/cmd/post_commit/run.go index 92c9b2131..d6f0329a0 100644 --- a/internal/cli/system/cmd/post_commit/run.go +++ b/internal/cli/system/cmd/post_commit/run.go @@ -19,7 +19,6 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" corePC "github.com/ActiveMemory/ctx/internal/cli/system/core/post_commit" coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/regex" @@ -42,11 +41,9 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, sessionID, paused := coreCheck.Preamble(stdin) - if paused { + input, sessionID, _, _, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } @@ -68,25 +65,32 @@ func Run(cmd *cobra.Command, stdin *os.File) error { if msg == "" { return nil } - msg = ctxContext.AppendDir(msg) + msg, appendErr := ctxContext.AppendDir(msg) + if appendErr != nil { + return appendErr + } writeSetup.Context( cmd, coreSession.FormatContext(hook.EventPostToolUse, msg), ) ref := notify.NewTemplateRef(hookName, variant, nil) - nudge.Relay( + if relayErr := nudge.Relay( fmt.Sprintf( desc.Text(text.DescKeyRelayPrefixFormat), hookName, desc.Text(text.DescKeyPostCommitRelayMessage), ), input.SessionID, ref, - ) + ); relayErr != nil { + return relayErr + } - if driftResponse := drift.CheckVersion( - sessionID, - ); driftResponse != "" { + driftResponse, driftErr := drift.CheckVersion(sessionID) + if driftErr != nil { + return driftErr + } + if driftResponse != "" { writeSetup.Context(cmd, driftResponse) } diff --git a/internal/cli/system/cmd/qa_reminder/run.go b/internal/cli/system/cmd/qa_reminder/run.go index ce31345ae..b34e55a8f 100644 --- a/internal/cli/system/cmd/qa_reminder/run.go +++ b/internal/cli/system/cmd/qa_reminder/run.go @@ -18,7 +18,6 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/message" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" cfgGit "github.com/ActiveMemory/ctx/internal/config/git" "github.com/ActiveMemory/ctx/internal/config/hook" @@ -39,11 +38,9 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, _, paused := coreCheck.Preamble(stdin) - if paused { + input, _, _, _, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } if !strings.Contains(input.ToolInput.Command, cfgGit.Binary) { @@ -56,14 +53,16 @@ func Run(cmd *cobra.Command, stdin *os.File) error { if msg == "" { return nil } - msg = ctxContext.AppendDir(msg) + msg, appendErr := ctxContext.AppendDir(msg) + if appendErr != nil { + return appendErr + } writeSetup.Context(cmd, coreSession.FormatContext(hook.EventPreToolUse, msg)) ref := notify.NewTemplateRef(hook.QAReminder, hook.VariantGate, nil) - nudge.Relay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), + return nudge.Relay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), hook.QAReminder, desc.Text(text.DescKeyQaReminderRelayMessage)), input.SessionID, ref, ) - return nil } diff --git a/internal/cli/system/cmd/resume/run.go b/internal/cli/system/cmd/resume/run.go index 9e5963bb7..0f8abddd1 100644 --- a/internal/cli/system/cmd/resume/run.go +++ b/internal/cli/system/cmd/resume/run.go @@ -11,11 +11,8 @@ import ( "github.com/spf13/cobra" + coreCheck "github.com/ActiveMemory/ctx/internal/cli/system/core/check" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" - coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" - cFlag "github.com/ActiveMemory/ctx/internal/config/flag" - "github.com/ActiveMemory/ctx/internal/config/session" "github.com/ActiveMemory/ctx/internal/config/warn" ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" writeSession "github.com/ActiveMemory/ctx/internal/write/session" @@ -33,22 +30,13 @@ import ( // Returns: // - error: Always nil func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { + sessionID, ok := coreCheck.PausePreamble(cmd, stdin) + if !ok { return nil } - - sessionID, _ := cmd.Flags().GetString(cFlag.SessionID) - if sessionID == "" { - input := coreSession.ReadInput(stdin) - sessionID = input.SessionID - } - if sessionID == "" { - sessionID = session.IDUnknown - } - - path := nudge.PauseMarkerPath(sessionID) - if removeErr := os.Remove(path); removeErr != nil { - ctxLog.Warn(warn.Remove, path, removeErr) + if resumeErr := nudge.Resume(sessionID); resumeErr != nil { + ctxLog.Warn(warn.StateDirProbe, resumeErr) + return nil } writeSession.Resumed(cmd, sessionID) return nil diff --git a/internal/cli/system/cmd/session_event/run.go b/internal/cli/system/cmd/session_event/run.go index b61e75891..0f08f902c 100644 --- a/internal/cli/system/cmd/session_event/run.go +++ b/internal/cli/system/cmd/session_event/run.go @@ -14,9 +14,11 @@ import ( coreState "github.com/ActiveMemory/ctx/internal/cli/system/core/state" cfgEvent "github.com/ActiveMemory/ctx/internal/config/event" cfgHook "github.com/ActiveMemory/ctx/internal/config/hook" + "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/entity" errSession "github.com/ActiveMemory/ctx/internal/err/session" "github.com/ActiveMemory/ctx/internal/log/event" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/notify" wSession "github.com/ActiveMemory/ctx/internal/write/session" @@ -38,7 +40,12 @@ import ( // Returns: // - error: Non-nil if eventType is invalid func Run(cmd *cobra.Command, eventType, caller string) error { - if !coreState.Initialized() { + initialized, initErr := coreState.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return nil + } + if !initialized { return nil } @@ -51,8 +58,21 @@ func Run(cmd *cobra.Command, eventType, caller string) error { ref := entity.NewTemplateRef(cfgHook.SessionEvent, eventType, map[string]any{cfgEvent.VarCaller: caller}) - event.Append(cfgEvent.CategorySession, msg, "", ref) - _ = notify.Send(cfgEvent.CategorySession, msg, "", ref) + // Log-first: the event-log entry IS the authoritative record of + // the session lifecycle. If it cannot be written, neither the + // webhook nor the stdout marker should run; both would claim a + // session event whose audit trail never landed. See + // docs/security/reporting.md → "Log-First Audit Trail". + if appendErr := event.Append( + cfgEvent.CategorySession, msg, "", ref, + ); appendErr != nil { + return appendErr + } + if sendErr := notify.Send( + cfgEvent.CategorySession, msg, "", ref, + ); sendErr != nil { + return sendErr + } wSession.Event(cmd, eventType, caller) return nil diff --git a/internal/cli/system/cmd/specs_nudge/run.go b/internal/cli/system/cmd/specs_nudge/run.go index 2f876d714..a9385edd0 100644 --- a/internal/cli/system/cmd/specs_nudge/run.go +++ b/internal/cli/system/cmd/specs_nudge/run.go @@ -17,7 +17,6 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/system/core/message" "github.com/ActiveMemory/ctx/internal/cli/system/core/nudge" coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" - "github.com/ActiveMemory/ctx/internal/cli/system/core/state" "github.com/ActiveMemory/ctx/internal/config/embed/text" "github.com/ActiveMemory/ctx/internal/config/hook" ctxContext "github.com/ActiveMemory/ctx/internal/context/resolve" @@ -38,11 +37,9 @@ import ( // Returns: // - error: Always nil (hook errors are non-fatal) func Run(cmd *cobra.Command, stdin *os.File) error { - if !state.Initialized() { - return nil - } - input, _, paused := coreCheck.Preamble(stdin) - if paused { + input, _, _, _, ok := coreCheck.FullPreamble(stdin) + bailSilently := !ok + if bailSilently { return nil } fallback := desc.Text(text.DescKeySpecsNudgeFallback) @@ -52,16 +49,18 @@ func Run(cmd *cobra.Command, stdin *os.File) error { if msg == "" { return nil } - msg = ctxContext.AppendDir(msg) + msg, appendErr := ctxContext.AppendDir(msg) + if appendErr != nil { + return appendErr + } writeSetup.Context(cmd, coreSession.FormatContext(hook.EventPreToolUse, msg)) nudgeMsg := desc.Text(text.DescKeySpecsNudgeNudgeMessage) ref := notify.NewTemplateRef(hook.SpecsNudge, hook.VariantNudge, nil) - nudge.Relay( + return nudge.Relay( fmt.Sprintf( desc.Text(text.DescKeyRelayPrefixFormat), hook.SpecsNudge, nudgeMsg, ), input.SessionID, ref, ) - return nil } diff --git a/internal/cli/system/core/anchor/doc.go b/internal/cli/system/core/anchor/doc.go new file mode 100644 index 000000000..b5157cc91 --- /dev/null +++ b/internal/cli/system/core/anchor/doc.go @@ -0,0 +1,26 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package anchor holds path-comparison helpers for the +// `ctx system check-anchor-drift` hook. +// +// The drift hook compares the parent-shell CTX_DIR (snapshotted +// before the standard `${CLAUDE_PROJECT_DIR:?…}/.context` injection) +// against the Claude-injected CTX_DIR. Naive byte-for-byte +// comparison after [filepath.Clean] over-reports: on macOS, `/tmp` +// is a symlink to `/private/tmp`, so a shell activated under +// `/tmp/foo/.context` and a Claude session whose +// `CLAUDE_PROJECT_DIR` resolves to `/private/tmp/foo` would trip +// a false alarm on every prompt: same physical directory, different +// strings. Any user with a symlinked workspace path runs into the +// same trap. +// +// # Public Surface +// +// - [Equal] reports whether two paths refer to the same directory, +// resolving symlinks before comparison and falling back to +// cleaned-string comparison when resolution fails. +package anchor diff --git a/internal/cli/system/core/anchor/equal.go b/internal/cli/system/core/anchor/equal.go new file mode 100644 index 000000000..3085c1c94 --- /dev/null +++ b/internal/cli/system/core/anchor/equal.go @@ -0,0 +1,44 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package anchor + +import "path/filepath" + +// Equal reports whether a and b refer to the same directory. +// First compares cleaned paths byte-for-byte (the cheap case); +// then falls back to [filepath.EvalSymlinks] resolution to catch +// symlink-equivalent paths like macOS's `/tmp` → `/private/tmp`. +// +// Resolution failure on either side defaults to "different," +// which is the correct call: an inherited CTX_DIR pointing at a +// deleted directory is genuine drift even if the injected one +// resolves cleanly. The drift hook's job is to surface real +// misalignment; an over-eager symlink fix that swallowed +// resolution failures would silently hide it. +// +// Parameters: +// - a: first path (typically the parent-shell inherited CTX_DIR). +// - b: second path (typically the Claude-injected CTX_DIR). +// +// Returns: +// - bool: true when the two paths resolve to the same directory. +func Equal(a, b string) bool { + aClean := filepath.Clean(a) + bClean := filepath.Clean(b) + if aClean == bClean { + return true + } + aResolved, aErr := filepath.EvalSymlinks(aClean) + if aErr != nil { + return false + } + bResolved, bErr := filepath.EvalSymlinks(bClean) + if bErr != nil { + return false + } + return aResolved == bResolved +} diff --git a/internal/cli/system/core/archive/archive.go b/internal/cli/system/core/archive/archive.go deleted file mode 100644 index 3ed278cfc..000000000 --- a/internal/cli/system/core/archive/archive.go +++ /dev/null @@ -1,178 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -import ( - "archive/tar" - "io" - "io/fs" - "os" - "path/filepath" - - "github.com/ActiveMemory/ctx/internal/config/warn" - "github.com/ActiveMemory/ctx/internal/entity" - errBackup "github.com/ActiveMemory/ctx/internal/err/backup" - internalIo "github.com/ActiveMemory/ctx/internal/io" - ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" - writeBackup "github.com/ActiveMemory/ctx/internal/write/backup" -) - -// finalize creates the archive, populates the result with size, -// and optionally copies to an SMB share. -// -// Parameters: -// - w: writer for diagnostic output (typically stderr) -// - archivePath: output file path for the archive -// - archiveName: archive filename (for SMB destination path) -// - scope: backup scope label (e.g., "project", "global") -// - entries: directories and files to include -// - smb: optional SMB configuration (nil to skip remote copy) -// -// Returns: -// - BackupResult: archive path, size, and optional SMB destination -// - error: non-nil on archive creation or SMB failure -func finalize( - w io.Writer, archivePath, archiveName, scope string, - entries []entity.ArchiveEntry, smb *SMBConfig, -) (entity.BackupResult, error) { - if archiveErr := Create(archivePath, entries, w); archiveErr != nil { - return entity.BackupResult{}, archiveErr - } - - result := entity.BackupResult{Scope: scope, Archive: archivePath} - if info, statErr := os.Stat(archivePath); statErr == nil { - result.Size = info.Size() - } - - if smb != nil { - if mountErr := EnsureSMBMount(smb); mountErr != nil { - return result, mountErr - } - if copyErr := CopyToSMB(smb, archivePath); copyErr != nil { - return result, copyErr - } - result.SMBDest = filepath.Join(smb.GVFSPath, smb.Subdir, archiveName) - } - - return result, nil -} - -// addEntry adds a single ArchiveEntry (file or directory) to the tar writer. -// Optional entries that are not found emit a diagnostic message -// and are skipped. -// -// Parameters: -// - tw: tar writer to add the entry to -// - entry: archive entry describing the source and target -// - w: writer for diagnostic output (typically stderr) -// -// Returns: -// - error: non-nil on stat, walk, or tar write failure -func addEntry(tw *tar.Writer, entry entity.ArchiveEntry, w io.Writer) error { - info, statErr := os.Stat(entry.SourcePath) - if os.IsNotExist(statErr) { - if entry.Optional { - writeBackup.SkipEntry(w, entry.Prefix) - return nil - } - return errBackup.SourceNotFound(entry.SourcePath) - } - if statErr != nil { - return statErr - } - - if !info.IsDir() { - return addSingleFile(tw, entry.SourcePath, entry.Prefix, info) - } - - return filepath.WalkDir(entry.SourcePath, - func(path string, d fs.DirEntry, walkErr error) error { - if walkErr != nil { - return walkErr - } - if d.IsDir() && entry.ExcludeDir != "" && d.Name() == entry.ExcludeDir { - return filepath.SkipDir - } - if d.Type()&os.ModeSymlink != 0 { - return nil - } - - rel, relErr := filepath.Rel(entry.SourcePath, path) - if relErr != nil { - return relErr - } - - name := filepath.ToSlash(filepath.Join(entry.Prefix, rel)) - - fileInfo, infoErr := d.Info() - if infoErr != nil { - return infoErr - } - - header, headerErr := tar.FileInfoHeader(fileInfo, "") - if headerErr != nil { - return headerErr - } - header.Name = name - - if writeErr := tw.WriteHeader(header); writeErr != nil { - return writeErr - } - - if d.IsDir() { - return nil - } - return copyFileToTar(tw, path) - }) -} - -// addSingleFile writes a single non-directory file entry into the tar. -// -// Parameters: -// - tw: tar writer -// - path: absolute source file path -// - name: name to use inside the archive -// - info: file info for the tar header -// -// Returns: -// - error: non-nil on header or content write failure -func addSingleFile( - tw *tar.Writer, path, name string, info fs.FileInfo, -) error { - header, headerErr := tar.FileInfoHeader(info, "") - if headerErr != nil { - return headerErr - } - header.Name = name - - if writeErr := tw.WriteHeader(header); writeErr != nil { - return writeErr - } - return copyFileToTar(tw, path) -} - -// copyFileToTar reads a file and writes its contents to the tar writer. -// -// Parameters: -// - tw: tar writer -// - path: absolute file path to read -// -// Returns: -// - error: non-nil on open, read, or write failure -func copyFileToTar(tw *tar.Writer, path string) error { - f, openErr := internalIo.SafeOpenUserFile(path) - if openErr != nil { - return openErr - } - defer func() { - if closeErr := f.Close(); closeErr != nil { - ctxLog.Warn(warn.Close, path, closeErr) - } - }() - _, copyErr := io.Copy(tw, f) - return copyErr -} diff --git a/internal/cli/system/core/archive/backup.go b/internal/cli/system/core/archive/backup.go deleted file mode 100644 index 6e83bd6fc..000000000 --- a/internal/cli/system/core/archive/backup.go +++ /dev/null @@ -1,227 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/archive" - "github.com/ActiveMemory/ctx/internal/config/dir" - "github.com/ActiveMemory/ctx/internal/config/embed/text" - cfgFs "github.com/ActiveMemory/ctx/internal/config/fs" - cfgTime "github.com/ActiveMemory/ctx/internal/config/time" - "github.com/ActiveMemory/ctx/internal/config/warn" - "github.com/ActiveMemory/ctx/internal/entity" - errBackup "github.com/ActiveMemory/ctx/internal/err/backup" - internalIo "github.com/ActiveMemory/ctx/internal/io" - logWarn "github.com/ActiveMemory/ctx/internal/log/warn" -) - -// Create builds a tar.gz archive from the given entries. -// -// Parameters: -// - archivePath: output file path for the archive -// - entries: directories and files to include -// - w: writer for diagnostic output (typically stderr) -// -// Returns: -// - error: non-nil on file creation or tar writing failure -func Create( - archivePath string, entries []entity.ArchiveEntry, w io.Writer, -) error { - outFile, createErr := internalIo.SafeCreateFile(archivePath, cfgFs.PermFile) - if createErr != nil { - return errBackup.CreateArchive(createErr) - } - defer func() { - if closeErr := outFile.Close(); closeErr != nil { - logWarn.Warn( - warn.Close, archivePath, closeErr, - ) - } - }() - - gzw := gzip.NewWriter(outFile) - defer func() { - if closeErr := gzw.Close(); closeErr != nil { - logWarn.Warn(warn.Close, archive.WriterGzip, closeErr) - } - }() - - tw := tar.NewWriter(gzw) - defer func() { - if closeErr := tw.Close(); closeErr != nil { - logWarn.Warn(warn.Close, archive.WriterTar, closeErr) - } - }() - - for _, entry := range entries { - if addErr := addEntry(tw, entry, w); addErr != nil { - return addErr - } - } - return nil -} - -// BackupProject creates a project-scoped backup archive. -// -// Parameters: -// - w: writer for diagnostic output (typically stderr) -// - home: user home directory -// - timestamp: formatted timestamp for the archive filename -// - smb: optional SMB configuration (nil to skip remote copy) -// -// Returns: -// - BackupResult: archive path, size, and optional SMB destination -// - error: non-nil on archive or SMB failure -func BackupProject( - w io.Writer, home, timestamp string, smb *SMBConfig, -) (entity.BackupResult, error) { - cwd, cwdErr := os.Getwd() - if cwdErr != nil { - return entity.BackupResult{}, cwdErr - } - - archiveName := fmt.Sprintf(archive.TplProjectArchive, timestamp) - archivePath := filepath.Join(os.TempDir(), archiveName) - - entries := []entity.ArchiveEntry{ - { - SourcePath: filepath.Join(cwd, dir.Context), - Prefix: dir.Context, - ExcludeDir: dir.JournalSite, - }, - { - SourcePath: filepath.Join(cwd, dir.Claude), - Prefix: dir.Claude, - }, - { - SourcePath: filepath.Join(cwd, dir.Ideas), - Prefix: dir.Ideas, - Optional: true, - }, - {SourcePath: filepath.Join(home, archive.Bashrc), Prefix: archive.Bashrc}, - } - - result, finalizeErr := finalize( - w, archivePath, archiveName, archive.BackupScopeProject, entries, smb, - ) - if finalizeErr != nil { - return result, finalizeErr - } - - // Touch marker file for check-backup-age hook. - markerDir := filepath.Join(home, archive.BackupMarkerDir) - if mkdirErr := internalIo.SafeMkdirAll( - markerDir, cfgFs.PermExec, - ); mkdirErr != nil { - logWarn.Warn(warn.Mkdir, markerDir, mkdirErr) - } - markerPath := filepath.Join(markerDir, archive.BackupMarkerFile) - internalIo.TouchFile(markerPath) - - return result, nil -} - -// BackupGlobal creates a global-scoped backup archive. -// -// Parameters: -// - w: writer for diagnostic output (typically stderr) -// - home: user home directory -// - timestamp: formatted timestamp for the archive filename -// - smb: optional SMB configuration (nil to skip remote copy) -// -// Returns: -// - BackupResult: archive path, size, and optional SMB destination -// - error: non-nil on archive or SMB failure -func BackupGlobal( - w io.Writer, home, timestamp string, smb *SMBConfig, -) (entity.BackupResult, error) { - archiveName := fmt.Sprintf(archive.TplGlobalArchive, timestamp) - archivePath := filepath.Join(os.TempDir(), archiveName) - - entries := []entity.ArchiveEntry{ - { - SourcePath: filepath.Join(home, dir.Claude), - Prefix: dir.Claude, - ExcludeDir: archive.BackupExcludeTodos, - }, - { - SourcePath: filepath.Join(home, dir.CtxData), - Prefix: dir.CtxData, - Optional: true, - }, - } - - return finalize( - w, archivePath, archiveName, archive.BackupScopeGlobal, entries, smb, - ) -} - -// CheckSMBMountWarnings checks whether the GVFS mount for the given SMB URL -// exists and appends warning strings if the share is not mounted. -// -// Parameters: -// - smbURL: the SMB share URL from the environment -// - warnings: existing warning slice to append to -// -// Returns: -// - []string: the warnings slice, possibly with SMB mount warnings appended -func CheckSMBMountWarnings(smbURL string, warnings []string) []string { - cfg, cfgErr := ParseSMBConfig(smbURL, "") - if cfgErr != nil { - return warnings - } - - if _, statErr := os.Stat(cfg.GVFSPath); os.IsNotExist(statErr) { - warnings = append(warnings, - fmt.Sprintf(desc.Text(text.DescKeyBackupSMBNotMounted), cfg.Host), - desc.Text(text.DescKeyBackupSMBUnavailable), - ) - } - - return warnings -} - -// CheckBackupMarker checks the backup marker file age and appends warnings -// when the marker is missing or older than config.BackupMaxAgeDays. -// -// Parameters: -// - markerPath: absolute path to the backup marker file -// - warnings: existing warning slice to append to -// -// Returns: -// - []string: the warnings slice, possibly with staleness warnings appended -func CheckBackupMarker(markerPath string, warnings []string) []string { - info, statErr := os.Stat(markerPath) - if os.IsNotExist(statErr) { - return append(warnings, - desc.Text(text.DescKeyBackupNoMarker), - desc.Text(text.DescKeyBackupRunHint), - ) - } - if statErr != nil { - return warnings - } - - ageDays := int(time.Since(info.ModTime()).Hours() / cfgTime.HoursPerDay) - if ageDays >= archive.BackupMaxAgeDays { - return append(warnings, - fmt.Sprintf(desc.Text(text.DescKeyBackupStale), ageDays), - desc.Text(text.DescKeyBackupRunHint), - ) - } - - return warnings -} diff --git a/internal/cli/system/core/archive/doc.go b/internal/cli/system/core/archive/doc.go deleted file mode 100644 index af8702431..000000000 --- a/internal/cli/system/core/archive/doc.go +++ /dev/null @@ -1,52 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package archive creates **compressed tar archives** of ctx -// state for the `ctx backup` family of commands, bundling -// `.context/`, `.claude/`, `ideas/`, and (optionally) the -// global `~/.claude/` directory into a timestamped `.tar.gz` -// for off-machine storage on an SMB share or local disk. -// -// The package is the engine. The user-facing knobs -// (`--scope project|global|all`, `--no-smb`, etc.) live in -// `internal/cli/backup`. -// -// # Public Surface -// -// - **[Create](outPath, entries)**: builds a tar.gz at -// `outPath` containing every entry in `entries`. -// Optional entries that resolve to a missing path -// emit a diagnostic message but do not fail the -// operation (a common case: `ideas/` may not exist -// in fresh projects). -// - **[BackupProject](contextDir, outDir)**: wraps -// [Create] with the project-scope entry list: -// `.context/`, `.claude/`, `ideas/`, `.bashrc`-like -// project shell extensions. -// - **[BackupGlobal](outDir)**: global scope; -// `~/.claude/` minus `todos/` (excluded because -// it can be huge and is regenerated by Claude Code). -// - **[CheckSMBMountWarnings](url)**: sanity-checks -// the configured SMB mount and warns about common -// misconfigurations (mount path missing, write -// permission missing, etc.). -// - **[CheckBackupMarker](contextDir)**: reads the -// last-backup timestamp marker so -// `ctx system check-backup-age` can nudge the user. -// -// # Output Naming -// -// Archives are named `ctx--YYYY-MM-DD-HHMMSS.tar.gz` -// for unambiguous chronological sorting in the destination -// directory. -// -// # Concurrency -// -// Filesystem-bound and stateless. Concurrent invocations -// against the same destination would compete for the same -// timestamped filename; single-process is the assumed -// model. -package archive diff --git a/internal/cli/system/core/archive/smb.go b/internal/cli/system/core/archive/smb.go deleted file mode 100644 index cf53d01dc..000000000 --- a/internal/cli/system/core/archive/smb.go +++ /dev/null @@ -1,110 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/archive" - "github.com/ActiveMemory/ctx/internal/config/embed/text" - "github.com/ActiveMemory/ctx/internal/config/fs" - errBackup "github.com/ActiveMemory/ctx/internal/err/backup" - errFs "github.com/ActiveMemory/ctx/internal/err/fs" - execGio "github.com/ActiveMemory/ctx/internal/exec/gio" - "github.com/ActiveMemory/ctx/internal/io" -) - -// ParseSMBConfig parses an SMB URL and subdirectory into a config struct -// with the derived GVFS mount path. -// -// Parameters: -// - smbURL: SMB share URL (e.g. smb://host/share) -// - subdir: Subdirectory on share (empty uses default) -// -// Returns: -// - *SMBConfig: Parsed config -// - error: Non-nil on invalid URL -func ParseSMBConfig(smbURL, subdir string) (*SMBConfig, error) { - u, parseErr := url.Parse(smbURL) - if parseErr != nil || u.Host == "" { - return nil, errBackup.InvalidSMBURL(smbURL) - } - - host := u.Host - share := u.Path - if len(share) > 0 && share[0] == '/' { - share = share[1:] - } - if share == "" { - return nil, errBackup.SMBMissingShare(smbURL) - } - - if subdir == "" { - subdir = archive.BackupDefaultSubdir - } - - gvfsPath := fmt.Sprintf(desc.Text(text.DescKeyWriteFormatGVFSPath), - os.Getuid(), host, share) - - return &SMBConfig{ - Host: host, - Share: share, - Subdir: subdir, - GVFSPath: gvfsPath, - SourceURL: smbURL, - }, nil -} - -// EnsureSMBMount checks if the GVFS mount exists and attempts gio mount if not. -// -// Parameters: -// - cfg: SMB configuration -// -// Returns: -// - error: Non-nil if mount fails -func EnsureSMBMount(cfg *SMBConfig) error { - if _, statErr := os.Stat(cfg.GVFSPath); statErr == nil { - return nil - } - - if mountErr := execGio.Mount(cfg.SourceURL); mountErr != nil { - return errBackup.MountFailed(cfg.SourceURL, mountErr) - } - - return nil -} - -// CopyToSMB copies a local file to the SMB share destination directory. -// -// Parameters: -// - cfg: SMB configuration -// - localPath: Path to the local file to copy -// -// Returns: -// - error: Non-nil on copy failure -func CopyToSMB(cfg *SMBConfig, localPath string) error { - dest := filepath.Join(cfg.GVFSPath, cfg.Subdir) - if mkdirErr := io.SafeMkdirAll(dest, fs.PermExec); mkdirErr != nil { - return errFs.CreateDir(dest, mkdirErr) - } - - data, readErr := io.SafeReadUserFile(localPath) - if readErr != nil { - return errFs.ReadFile(readErr) - } - - destFile := filepath.Join(dest, filepath.Base(localPath)) - if writeErr := io.SafeWriteFile(destFile, data, fs.PermFile); writeErr != nil { - return errBackup.WriteSMB(writeErr) - } - - return nil -} diff --git a/internal/cli/system/core/archive/types.go b/internal/cli/system/core/archive/types.go deleted file mode 100644 index 8219a79ad..000000000 --- a/internal/cli/system/core/archive/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -// SMBConfig holds parsed SMB share connection details. -// -// Fields: -// - Host: SMB server hostname -// - Share: Share name -// - Subdir: Subdirectory within the share -// - GVFSPath: GVFS mount path for the share -// - SourceURL: Original smb:// URL -type SMBConfig struct { - Host string - Share string - Subdir string - GVFSPath string - SourceURL string -} diff --git a/internal/cli/system/core/check/full_preamble.go b/internal/cli/system/core/check/full_preamble.go new file mode 100644 index 000000000..80c134c37 --- /dev/null +++ b/internal/cli/system/core/check/full_preamble.go @@ -0,0 +1,100 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package check + +import ( + "os" + + "github.com/ActiveMemory/ctx/internal/cli/system/core/state" + "github.com/ActiveMemory/ctx/internal/config/warn" + "github.com/ActiveMemory/ctx/internal/entity" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" + "github.com/ActiveMemory/ctx/internal/rc" +) + +// FullPreamble runs the standard hook prelude: verifies ctx is +// initialized, reads hook input (via [Preamble]), checks the pause +// state, and resolves the context and state directories. Every +// daily-throttled ctx hook opens with this sequence; the helper +// collapses the twenty-plus lines of gate / probe / log boilerplate +// that each hook would otherwise repeat verbatim. +// +// Returning ctxDir alongside stateDir lets hooks that need to +// [filepath.Join] paths under the context root skip a second +// [rc.ContextDir] call: the Initialized gate above already proves +// ContextDir succeeds, so re-checking ErrDirNotDeclared is dead code. +// +// Returns ok=false when the hook should bail silently. The bail +// reasons and how callers see them: +// +// - Uninitialized ctx: silent bail. +// - state.Initialized resolver failure: logs [warn.StateInitializedProbe] +// then bails. +// - Paused session: silent bail. +// - state.Dir resolver failure: logs [warn.StateDirProbe] then bails. +// - rc.ContextDir resolver failure after Initialized returned true: +// logs [warn.ContextDirResolve] then bails. Reachable only if a +// future ContextDir error is added beyond ErrDirNotDeclared. +// +// Recommended call shape: alias `!ok` as `bailSilently` so the +// intent reads as a deliberate bail rather than swallowed-error +// suppression at every site. +// +// input, _, ctxDir, stateDir, ok := check.FullPreamble(stdin) +// bailSilently := !ok +// if bailSilently { +// return nil +// } +// +// The returned sessionID is the [Preamble]-normalized value and falls +// back to [cfgSession.IDUnknown] when the hook input omits it. Prefer +// it over input.SessionID when touching state files keyed by session. +// +// The regular [Preamble] stays available for hooks that do not need +// the Initialized gate or a state directory (e.g. check_reminder, +// which emits provenance unconditionally and gates Initialized inline). +// +// Parameters: +// - stdin: Standard input for hook JSON. +// +// Returns: +// - entity.HookInput: Parsed hook input (zero value when ok=false). +// - string: Normalized session ID (IDUnknown when missing). +// - string: Absolute context directory; always usable when ok=true. +// - string: Absolute state directory; always usable when ok=true. +// - bool: true when the caller should proceed. +func FullPreamble( + stdin *os.File, +) (entity.HookInput, string, string, string, bool) { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return entity.HookInput{}, "", "", "", false + } + if !initialized { + return entity.HookInput{}, "", "", "", false + } + + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + logWarn.Warn(warn.ContextDirResolve, ctxErr) + return entity.HookInput{}, "", "", "", false + } + + input, sessionID, paused := Preamble(stdin) + if paused { + return entity.HookInput{}, "", "", "", false + } + + stateDir, dirErr := state.Dir() + if dirErr != nil { + logWarn.Warn(warn.StateDirProbe, dirErr) + return entity.HookInput{}, "", "", "", false + } + + return input, sessionID, ctxDir, stateDir, true +} diff --git a/internal/cli/system/core/check/pause_preamble.go b/internal/cli/system/core/check/pause_preamble.go new file mode 100644 index 000000000..db1219178 --- /dev/null +++ b/internal/cli/system/core/check/pause_preamble.go @@ -0,0 +1,62 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package check + +import ( + "os" + + "github.com/spf13/cobra" + + coreSession "github.com/ActiveMemory/ctx/internal/cli/system/core/session" + "github.com/ActiveMemory/ctx/internal/cli/system/core/state" + cFlag "github.com/ActiveMemory/ctx/internal/config/flag" + cfgSession "github.com/ActiveMemory/ctx/internal/config/session" + "github.com/ActiveMemory/ctx/internal/config/warn" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" +) + +// PausePreamble resolves the shared preamble for the pause and resume +// hooks: gate on [state.Initialized], then resolve the session ID from +// the --session-id flag, stdin JSON, and finally the IDUnknown +// fallback. Returns ok=false when the hook should bail; a probe +// warning is logged internally when the resolver reports a non-benign +// failure, so callers only need to check ok. +// +// The regular [Preamble] can't be reused here because pause / resume +// accept --session-id on the CLI (they're also callable directly by +// the user, not just by hook JSON) and they never read the pause +// counter (being asked to pause when already paused is a no-op, not a +// gate). +// +// Parameters: +// - cmd: Cobra command for flag access. +// - stdin: Standard input for hook JSON fallback. +// +// Returns: +// - string: Resolved session identifier. +// - bool: true when the caller should proceed; false when the hook +// should bail silently. +func PausePreamble(cmd *cobra.Command, stdin *os.File) (string, bool) { + initialized, initErr := state.Initialized() + if initErr != nil { + logWarn.Warn(warn.StateInitializedProbe, initErr) + return "", false + } + if !initialized { + return "", false + } + + sessionID, _ := cmd.Flags().GetString(cFlag.SessionID) + if sessionID == "" { + input := coreSession.ReadInput(stdin) + sessionID = input.SessionID + } + if sessionID == "" { + sessionID = cfgSession.IDUnknown + } + return sessionID, true +} diff --git a/internal/cli/system/core/check/wrapup.go b/internal/cli/system/core/check/wrapup.go index 91e6e3992..ff0b0041b 100644 --- a/internal/cli/system/core/check/wrapup.go +++ b/internal/cli/system/core/check/wrapup.go @@ -18,12 +18,20 @@ import ( // WrappedUpRecently checks whether the wrap-up marker exists and is // less than the configured expiry old. // -// Returns true if nudges should be suppressed. +// Returns false when the state directory cannot be resolved: hooks +// that gate on this are already downstream of [state.Initialized], +// where the resolver failure surfaced once; here we fail-closed +// (assume not wrapped up, let nudges fire) rather than silently +// suppress everything. // // Returns: // - bool: True if wrap-up marker is fresh func WrappedUpRecently() bool { - markerPath := filepath.Join(state.Dir(), wrap.Marker) + stateDir, dirErr := state.Dir() + if dirErr != nil { + return false + } + markerPath := filepath.Join(stateDir, wrap.Marker) info, statErr := os.Stat(markerPath) if statErr != nil { diff --git a/internal/cli/system/core/drift/version_drift.go b/internal/cli/system/core/drift/version_drift.go index c84f8ec8e..219ec0074 100644 --- a/internal/cli/system/core/drift/version_drift.go +++ b/internal/cli/system/core/drift/version_drift.go @@ -32,24 +32,29 @@ import ( // // Returns: // - string: JSON hook response to print, or empty string if no drift -func CheckVersion(sessionID string) string { +// - error: propagated from [nudge.Relay] when the drift-relay event +// cannot be logged or its webhook cannot be sent. Callers should +// not emit the hook response when this is non-nil: printing a +// drift warning whose audit trail failed would claim a check +// happened without the log to prove it. +func CheckVersion(sessionID string) (string, error) { fileVer := ReadVersionFile() if fileVer == "" { - return "" + return "", nil } pluginVer, pluginErr := claude.PluginVersion() if pluginErr != nil || pluginVer == "" { - return "" + return "", nil } marketVer := ReadMarketplaceVersion() if marketVer == "" { - return "" + return "", nil } if fileVer == pluginVer && pluginVer == marketVer { - return "" + return "", nil } vars := map[string]any{ @@ -63,17 +68,21 @@ func CheckVersion(sessionID string) string { ) msg := message.Load(hook.VersionDrift, hook.VariantNudge, vars, fallback) if msg == "" { - return "" + return "", nil } response := coreSession.FormatContext(hook.EventPostToolUse, msg) ref := notify.NewTemplateRef(hook.VersionDrift, hook.VariantNudge, vars) - nudge.Relay(fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), + relayMsg := fmt.Sprintf( + desc.Text(text.DescKeyRelayPrefixFormat), hook.VersionDrift, desc.Text(text.DescKeyVersionDriftRelayMessage), - ), sessionID, ref) + ) + if relayErr := nudge.Relay(relayMsg, sessionID, ref); relayErr != nil { + return "", relayErr + } - return response + return response, nil } // ReadVersionFile reads and trims the VERSION file from the project root. diff --git a/internal/cli/system/core/health/map_staleness.go b/internal/cli/system/core/health/map_staleness.go index ffe6e205f..d50c8a2f3 100644 --- a/internal/cli/system/core/health/map_staleness.go +++ b/internal/cli/system/core/health/map_staleness.go @@ -9,6 +9,7 @@ package health import ( "encoding/json" "fmt" + "os" "strings" "time" @@ -25,26 +26,42 @@ import ( execGit "github.com/ActiveMemory/ctx/internal/exec/git" "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/notify" - "github.com/ActiveMemory/ctx/internal/rc" ) // ReadMapTracking reads and parses the map-tracking.json file from the // context directory. // +// ctxDir is supplied by the caller (typically a FullPreamble-gated +// hook) so this function does not re-resolve it; a second resolution +// would be dead code today and would pair an ambiguous (nil, err) +// return with the genuine "no tracking yet" result. +// +// Returns (nil, nil) when the tracking file is simply absent: ordinary +// "nothing to track yet" state. A read or parse failure is propagated +// so the caller can distinguish "no tracking yet" from "tracking data +// is corrupt". +// +// Parameters: +// - ctxDir: absolute path to the context directory +// // Returns: -// - *MapTrackingInfo: parsed tracking info, or nil if not found or invalid -func ReadMapTracking() *MapTrackingInfo { - data, readErr := io.SafeReadFile(rc.ContextDir(), architecture.MapTracking) +// - *MapTrackingInfo: parsed tracking info, or nil if absent +// - error: non-nil on I/O failure or JSON parse failure +func ReadMapTracking(ctxDir string) (*MapTrackingInfo, error) { + data, readErr := io.SafeReadFile(ctxDir, architecture.MapTracking) if readErr != nil { - return nil + if os.IsNotExist(readErr) { + return nil, nil + } + return nil, readErr } var info MapTrackingInfo if jsonErr := json.Unmarshal(data, &info); jsonErr != nil { - return nil + return nil, jsonErr } - return &info + return &info, nil } // CountModuleCommits counts git commits touching internal/ @@ -83,10 +100,13 @@ func CountModuleCommits(since string) int { // - moduleCommits: number of commits touching modules since last refresh // // Returns: -// - string: formatted nudge box, or empty string if silenced +// - string: formatted nudge box, or empty string if silenced. +// - error: propagated from [nudge.EmitAndRelay] so callers can +// honor the log-first principle: if the relay audit entry or +// webhook fails, the nudge box should not be printed. func EmitMapStalenessWarning( sessionID, dateStr string, moduleCommits int, -) string { +) (string, error) { fallback := fmt.Sprintf( desc.Text(text.DescKeyCheckMapStalenessFallback), dateStr, moduleCommits, @@ -97,7 +117,7 @@ func EmitMapStalenessWarning( architecture.VarModuleCount: moduleCommits, }, fallback) if content == "" { - return "" + return "", nil } box := message.NudgeBox( @@ -115,6 +135,8 @@ func EmitMapStalenessWarning( hook.CheckMapStaleness, desc.Text(text.DescKeyCheckMapStalenessRelayMessage), ) - nudge.EmitAndRelay(notifyMsg, sessionID, ref) - return box + if err := nudge.EmitAndRelay(notifyMsg, sessionID, ref); err != nil { + return "", err + } + return box, nil } diff --git a/internal/cli/system/core/health/prune.go b/internal/cli/system/core/health/prune.go index 3cf73bc48..e196d527b 100644 --- a/internal/cli/system/core/health/prune.go +++ b/internal/cli/system/core/health/prune.go @@ -30,8 +30,21 @@ import ( // Returns: // - int: Number of files pruned func AutoPrune(days int) int { - dir := state.Dir() + // Best-effort: this runs from context_load_gate as fire-and-forget + // and must never block session startup. Any state.Dir failure + // (including the ErrDirNotDeclared bail signal) is swallowed + // uniformly. ErrDirNotDeclared is unreachable here because + // context_load_gate already ran state.Initialized; the check + // stays defensive in case a future caller invokes AutoPrune + // outside the gate. + dir, dirErr := state.Dir() + if dirErr != nil { + return 0 + } + // Same best-effort rationale: a transient read failure should not + // stall session startup. Stale files accumulate for one session + // and get pruned on the next gate invocation. entries, readErr := os.ReadDir(dir) if readErr != nil { return 0 diff --git a/internal/cli/system/core/hubsync/sync.go b/internal/cli/system/core/hubsync/sync.go index 73e64a713..3f4963e2e 100644 --- a/internal/cli/system/core/hubsync/sync.go +++ b/internal/cli/system/core/hubsync/sync.go @@ -8,6 +8,7 @@ package hubsync import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -18,17 +19,36 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/text" cfgHub "github.com/ActiveMemory/ctx/internal/config/hub" "github.com/ActiveMemory/ctx/internal/hub" - "github.com/ActiveMemory/ctx/internal/rc" ) // Connected reports whether a hub connection config exists. // +// ctxDir is supplied by the caller (typically a FullPreamble-gated +// hook) so this function does not re-resolve it; a second resolution +// would be dead code today and would pair an ambiguous (false, err) +// return with the genuine "no hub configured" result. +// +// Returns (false, nil) when the encrypted connect file is absent: +// ordinary "no hub configured" state. A stat failure other than +// not-exist is propagated so the caller can distinguish "no +// connection" from "we could not check." +// +// Parameters: +// - ctxDir: absolute path to the context directory +// // Returns: // - bool: true if .context/.connect.enc exists -func Connected() bool { - path := filepath.Join(rc.ContextDir(), cfgHub.FileConnect) +// - error: non-nil on stat failure other than not-exist +func Connected(ctxDir string) (bool, error) { + path := filepath.Join(ctxDir, cfgHub.FileConnect) _, statErr := os.Stat(path) - return statErr == nil + if statErr != nil { + if errors.Is(statErr, os.ErrNotExist) { + return false, nil + } + return false, statErr + } + return true, nil } // Sync pulls new entries from the hub and writes them to diff --git a/internal/cli/system/core/journal/mark.go b/internal/cli/system/core/journal/mark.go index 38c2c75b4..d338a4b8c 100644 --- a/internal/cli/system/core/journal/mark.go +++ b/internal/cli/system/core/journal/mark.go @@ -26,7 +26,10 @@ import ( // - CheckResult: Stage value // - error: Non-nil on state load failure, unknown stage, or unset stage func CheckStage(filename, stage string) (CheckResult, error) { - journalDir := ctxResolve.JournalDir() + journalDir, dirErr := ctxResolve.JournalDir() + if dirErr != nil { + return CheckResult{}, dirErr + } jState, loadErr := state.Load(journalDir) if loadErr != nil { return CheckResult{}, errJournal.LoadStateFailed(loadErr) @@ -65,7 +68,10 @@ func CheckStage(filename, stage string) (CheckResult, error) { // Returns: // - error: Non-nil on unknown stage or state load/save failure func MarkStage(filename, stage string) error { - journalDir := ctxResolve.JournalDir() + journalDir, dirErr := ctxResolve.JournalDir() + if dirErr != nil { + return dirErr + } jState, loadErr := state.Load(journalDir) if loadErr != nil { return errJournal.LoadStateFailed(loadErr) diff --git a/internal/cli/system/core/knowledge/knowledge.go b/internal/cli/system/core/knowledge/knowledge.go index c667f979b..e574c6f37 100644 --- a/internal/cli/system/core/knowledge/knowledge.go +++ b/internal/cli/system/core/knowledge/knowledge.go @@ -115,14 +115,17 @@ func FormatWarnings(findings []finding) string { // // Returns: // - string: formatted nudge box, or empty string if silenced -func EmitWarning(sessionID, fileWarnings string) string { +// - error: propagated from [nudge.EmitAndRelay] so callers can +// honor the log-first principle: if the relay audit entry or +// webhook fails, the nudge box should not be printed. +func EmitWarning(sessionID, fileWarnings string) (string, error) { fallback := fileWarnings + token.NewlineLF + desc.Text( text.DescKeyCheckKnowledgeFallback, ) content := message.Load(hook.CheckKnowledge, hook.VariantWarning, map[string]any{knowledge.VarFileWarnings: fileWarnings}, fallback) if content == "" { - return "" + return "", nil } box := message.NudgeBox( @@ -134,37 +137,51 @@ func EmitWarning(sessionID, fileWarnings string) string { map[string]any{knowledge.VarFileWarnings: fileWarnings}) notifyMsg := fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), hook.CheckKnowledge, desc.Text(text.DescKeyCheckKnowledgeRelayMessage)) - nudge.EmitAndRelay(notifyMsg, sessionID, ref) - return box + if err := nudge.EmitAndRelay(notifyMsg, sessionID, ref); err != nil { + return "", err + } + return box, nil } // CheckHealth runs the full knowledge health check: scans files, // formats warnings, and builds output if any thresholds are exceeded. // +// ctxDir is supplied by the caller (typically a FullPreamble-gated +// hook) so this function does not re-resolve it; a second resolution +// would be dead code today and would ambiguously pair (false, err) +// with the genuine "no warnings found" return value. +// // Parameters: // - sessionID: session identifier for notifications +// - ctxDir: absolute path to the context directory // // Returns: // - string: formatted nudge box, or empty string if no warnings // - bool: true if warnings were found -func CheckHealth(sessionID string) (string, bool) { +// - error: propagated from [EmitWarning] so callers can honour the +// log-first principle and skip printing the box when the relay +// audit entry could not be written. +func CheckHealth(sessionID, ctxDir string) (string, bool, error) { lrnThreshold := rc.EntryCountLearnings() decThreshold := rc.EntryCountDecisions() convThreshold := rc.ConventionLineCount() // All disabled - nothing to check if lrnThreshold == 0 && decThreshold == 0 && convThreshold == 0 { - return "", false + return "", false, nil } findings := ScanFiles( - rc.ContextDir(), decThreshold, lrnThreshold, convThreshold, + ctxDir, decThreshold, lrnThreshold, convThreshold, ) if len(findings) == 0 { - return "", false + return "", false, nil } fileWarnings := FormatWarnings(findings) - box := EmitWarning(sessionID, fileWarnings) - return box, true + box, emitErr := EmitWarning(sessionID, fileWarnings) + if emitErr != nil { + return "", false, emitErr + } + return box, true, nil } diff --git a/internal/cli/system/core/log/log.go b/internal/cli/system/core/log/log.go index 2946b79bc..28356468f 100644 --- a/internal/cli/system/core/log/log.go +++ b/internal/cli/system/core/log/log.go @@ -27,6 +27,14 @@ import ( // Rotates the log when it exceeds config.HookLogMaxBytes, keeping one // previous generation (.1 suffix) - same pattern as eventlog. // +// Message is a verbose operational logger for hooks: it records what +// a hook did for later debugging, not an authoritative audit trail. +// Unlike [event.Append], a failed write here is not a correctness +// problem for downstream; callers do not gate side effects on the +// line landing. Write errors are therefore logged to stderr via +// [log/warn.Warn] instead of propagated, preserving the void +// signature and the "never break the editor" contract for hooks. +// // Parameters: // - logFile: Absolute path to the log file // - sessionID: Session identifier (truncated to 8 chars) @@ -48,7 +56,11 @@ func Message(logFile, sessionID, msg string) { line := fmt.Sprintf(desc.Text(text.DescKeyWriteLogLineFormat), time.Now().Format(cfgTime.DateTimePreciseFmt), short, msg) - internalIo.AppendBytes(logFile, []byte(line), fs.PermSecret) + if appendErr := internalIo.AppendBytes( + logFile, []byte(line), fs.PermSecret, + ); appendErr != nil { + ctxLog.Warn(warn.Write, logFile, appendErr) + } } // Rotate checks the log file size and rotates if it exceeds diff --git a/internal/cli/system/core/message/message.go b/internal/cli/system/core/message/message.go index 47f8c69e2..19573bd5a 100644 --- a/internal/cli/system/core/message/message.go +++ b/internal/cli/system/core/message/message.go @@ -7,6 +7,7 @@ package message import ( + "errors" "path/filepath" "strings" @@ -15,8 +16,11 @@ import ( "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/file" "github.com/ActiveMemory/ctx/internal/config/token" + "github.com/ActiveMemory/ctx/internal/config/warn" ctxContext "github.com/ActiveMemory/ctx/internal/context/resolve" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" ) @@ -43,9 +47,23 @@ func Load(hk, variant string, vars map[string]any, fallback string) string { filename := variant + file.ExtTxt // 1. User override in .context/ - overrideDir := filepath.Join(rc.ContextDir(), dir.HooksMessages, hk) - if data, readErr := io.SafeReadFile(overrideDir, filename); readErr == nil { - return renderTemplate(string(data), vars, fallback) + // + // When no context dir is declared there is no place to look + // for a user override, so we fall through to the embedded + // default. Reading the template itself is best-effort: any + // read error just means "no override", same outcome. A non- + // declaration resolver failure gets logged because it points + // at a real regression (bad .ctxrc, permission issue) that + // would otherwise silently disable overrides. + ctxDir, ctxErr := rc.ContextDir() + switch { + case ctxErr == nil: + overrideDir := filepath.Join(ctxDir, dir.HooksMessages, hk) + if data, readErr := io.SafeReadFile(overrideDir, filename); readErr == nil { + return renderTemplate(string(data), vars, fallback) + } + case !errors.Is(ctxErr, errCtx.ErrDirNotDeclared): + logWarn.Warn(warn.ContextDirResolve, ctxErr) } // 2. Embedded default @@ -97,7 +115,15 @@ func NudgeBox(relayPrefix, title, content string) string { box.Top + title + token.Space + strings.Repeat(box.BorderFill, pad) + token.NewlineLF msg += BoxLines(content) - if line := ctxContext.DirLine(); line != "" { + // Rendering-layer swallow: [NudgeBox] returns only a string, so + // there is no channel to propagate a DirLine resolver error. The + // noisy-TUI log inside DirLine already surfaces any unexpected + // failure, and in hook contexts (the usual caller chain) + // FullPreamble already gated ContextDir, so the error path is + // unreachable in practice. If the line is empty or the resolver + // errored, skip the footer. + line, _ := ctxContext.DirLine() + if line != "" { msg += box.LinePrefix + line + token.NewlineLF } msg += box.Bottom diff --git a/internal/cli/system/core/message/message_cmd.go b/internal/cli/system/core/message/message_cmd.go index eee0b5b14..4bc0a06f3 100644 --- a/internal/cli/system/core/message/message_cmd.go +++ b/internal/cli/system/core/message/message_cmd.go @@ -7,6 +7,7 @@ package message import ( + "errors" "fmt" "os" "path/filepath" @@ -45,17 +46,28 @@ func FormatTemplateVars(info *messages.HookMessageInfo) string { // OverridePath returns the user override file path for a hook/variant. // +// Any resolver error (including [errCtx.ErrDirNotDeclared]) is +// propagated. The previous empty-string return silently produced a +// CWD-relative path when joined by callers, which was exactly the +// "silent write to wrong location" class of bug this branch aims to +// eliminate. +// // Parameters: // - hook: hook name // - variant: template variant name // // Returns: // - string: full filesystem path to the override file -func OverridePath(hook, variant string) string { +// - error: non-nil when the context directory cannot be resolved +func OverridePath(hook, variant string) (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } return filepath.Join( - rc.ContextDir(), dir.HooksMessages, + ctxDir, dir.HooksMessages, hook, variant+file.ExtTxt, - ) + ), nil } // HasOverride checks whether a user override file exists. @@ -66,7 +78,19 @@ func OverridePath(hook, variant string) string { // // Returns: // - bool: true if an override file exists -func HasOverride(hook, variant string) bool { - _, statErr := os.Stat(OverridePath(hook, variant)) - return statErr == nil +// - error: non-nil when the context directory cannot be resolved +// or when the override file cannot be stat'd for a reason other +// than not-exist (permission, I/O) +func HasOverride(hook, variant string) (bool, error) { + path, err := OverridePath(hook, variant) + if err != nil { + return false, err + } + if _, statErr := os.Stat(path); statErr != nil { + if errors.Is(statErr, os.ErrNotExist) { + return false, nil + } + return false, statErr + } + return true, nil } diff --git a/internal/cli/system/core/nudge/context_size.go b/internal/cli/system/core/nudge/context_size.go index 8bbdf2c53..b2f0679e6 100644 --- a/internal/cli/system/core/nudge/context_size.go +++ b/internal/cli/system/core/nudge/context_size.go @@ -29,6 +29,8 @@ import ( // Parameters: // - logFile: absolute path to the log file // - sessionID: session identifier +// - ctxDir: absolute path to the context directory (forwarded to +// [oversizeContent] so it does not re-resolve) // - count: current prompt count // - tokens: token usage count // - pct: context window usage percentage @@ -36,10 +38,13 @@ import ( // // Returns: // - string: formatted nudge box, or empty string if silenced +// - error: propagated from [EmitAndRelay] so callers can honour +// the log-first principle and skip printing the box when the +// relay audit entry could not be written. func EmitCheckpoint( - logFile, sessionID string, + logFile, sessionID, ctxDir string, count, tokens, pct, windowSize int, -) string { +) (string, error) { fallback := desc.Text(text.DescKeyCheckContextSizeCheckpointFallback) content := message.Load( hook.CheckContextSize, hook.VariantCheckpoint, @@ -52,13 +57,17 @@ func EmitCheckpoint( count, ), ) - return "" + return "", nil } // Append optional token usage and oversize nudge to content if tokens > 0 { content += token.NewlineLF + TokenUsageLine(tokens, pct, windowSize) } - if extra := oversizeContent(); extra != "" { + extra, oversizeErr := oversizeContent(ctxDir) + if oversizeErr != nil { + return "", oversizeErr + } + if extra != "" { content += token.NewlineLF + extra } box := message.NudgeBox( @@ -82,8 +91,10 @@ func EmitCheckpoint( count, ), ) - EmitAndRelay(checkpointMsg, sessionID, ref) - return box + if err := EmitAndRelay(checkpointMsg, sessionID, ref); err != nil { + return "", err + } + return box, nil } // EmitWindowWarning builds an independent context window warning (>80%). @@ -97,10 +108,13 @@ func EmitCheckpoint( // // Returns: // - string: formatted nudge box, or empty string if silenced +// - error: propagated from [EmitAndRelay] so callers can honor +// the log-first principle and skip printing the box when the +// relay audit entry could not be written. func EmitWindowWarning( logFile, sessionID string, count, tokens, pct int, -) string { +) (string, error) { fallback := fmt.Sprintf( desc.Text(text.DescKeyCheckContextSizeWindowFallback), pct, coreSession.FormatTokenCount(tokens), @@ -117,7 +131,7 @@ func EmitWindowWarning( count, pct, ), ) - return "" + return "", nil } box := message.NudgeBox( desc.Text(text.DescKeyCheckContextSizeRelayPrefix), @@ -138,8 +152,10 @@ func EmitWindowWarning( windowMsg := fmt.Sprintf(desc.Text(text.DescKeyRelayPrefixFormat), hook.CheckContextSize, fmt.Sprintf(desc.Text(text.DescKeyCheckContextSizeWindowRelayFormat), pct)) - EmitAndRelay(windowMsg, sessionID, ref) - return box + if err := EmitAndRelay(windowMsg, sessionID, ref); err != nil { + return "", err + } + return box, nil } // EmitBillingWarning builds a one-shot warning when token usage crosses the @@ -154,16 +170,24 @@ func EmitWindowWarning( // // Returns: // - string: formatted nudge box, or empty string if silenced or already fired +// - error: propagated from [EmitAndRelay] so callers can honour the +// log-first principle. The one-shot "warned" marker is touched +// only on successful emit, so a failed relay will retry next +// invocation rather than silently burn the one-shot chance. func EmitBillingWarning( logFile, sessionID string, count, tokens, threshold int, -) string { +) (string, error) { + stateDir, dirErr := state.Dir() + if dirErr != nil { + return "", dirErr + } // One-shot guard: skip if already warned this session. warnedFile := filepath.Join( - state.Dir(), stats.ContextSizeBillingWarnedPrefix+sessionID, + stateDir, stats.ContextSizeBillingWarnedPrefix+sessionID, ) if _, statErr := os.Stat(warnedFile); statErr == nil { - return "" // already fired + return "", nil // already fired } fallback := fmt.Sprintf(desc.Text(text.DescKeyCheckContextSizeBillingFallback), @@ -182,7 +206,7 @@ func EmitBillingWarning( ), ) io.TouchFile(warnedFile) // silenced counts as fired - return "" + return "", nil } box := message.NudgeBox( @@ -190,7 +214,6 @@ func EmitBillingWarning( desc.Text(text.DescKeyCheckContextSizeBillingBoxTitle), content) - io.TouchFile(warnedFile) // one-shot: mark as fired log.Message( logFile, sessionID, fmt.Sprintf( desc.Text(text.DescKeyCheckContextSizeBillingLogFormat), @@ -211,6 +234,9 @@ func EmitBillingWarning( coreSession.FormatTokenCount(threshold), ), ) - EmitAndRelay(billingMsg, sessionID, ref) - return box + if err := EmitAndRelay(billingMsg, sessionID, ref); err != nil { + return "", err + } + io.TouchFile(warnedFile) // one-shot: mark as fired only on success + return box, nil } diff --git a/internal/cli/system/core/nudge/oversize.go b/internal/cli/system/core/nudge/oversize.go index 70cad312c..ecb364bd5 100644 --- a/internal/cli/system/core/nudge/oversize.go +++ b/internal/cli/system/core/nudge/oversize.go @@ -7,6 +7,7 @@ package nudge import ( + "errors" "fmt" "os" "path/filepath" @@ -19,25 +20,46 @@ import ( "github.com/ActiveMemory/ctx/internal/config/hook" "github.com/ActiveMemory/ctx/internal/config/regex" "github.com/ActiveMemory/ctx/internal/config/stats" - "github.com/ActiveMemory/ctx/internal/config/warn" "github.com/ActiveMemory/ctx/internal/io" - ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" - "github.com/ActiveMemory/ctx/internal/rc" ) -// oversizeContent checks for an injection-oversize flag file and returns -// the raw nudge content if present. Deletes the flag after reading (one-shot). +// oversizeContent checks for an injection-oversize flag file and +// returns the raw nudge content if present. Deletes the flag after +// reading (one-shot). +// +// ctxDir is supplied by the caller (FullPreamble-equivalent gate +// above) so this helper does not re-resolve it; a second resolution +// would be dead code today and would pair an ambiguous (zero, err) +// return with the legitimate "nothing to do" result. +// +// Parameters: +// - ctxDir: absolute path to the context directory // // Returns: -// - string: raw oversize nudge content, or empty string if no flag -func oversizeContent() string { - baseDir := filepath.Join(rc.ContextDir(), dir.State) +// - string: raw oversize nudge content, or empty string when +// there is no flag to report or the template silences itself. +// - error: non-nil when the flag file cannot be read (permission, +// I/O) or cannot be removed after reading. Legitimate "nothing +// to do" paths return ("", nil): flag file absent +// (os.ErrNotExist). A remove failure returns ("", err) rather +// than (content, err): if we cannot clear the one-shot flag we +// must not emit the nudge either, otherwise the flag re-fires on +// every subsequent invocation and the operator sees a nudge +// storm. Log-first principle: don't emit a user-visible nudge +// whose persistence cleanup we could not verify. +func oversizeContent(ctxDir string) (string, error) { + baseDir := filepath.Join(ctxDir, dir.State) flagPath := filepath.Join(baseDir, stats.ContextSizeInjectionOversizeFlag) data, readErr := io.SafeReadFile( baseDir, stats.ContextSizeInjectionOversizeFlag, ) if readErr != nil { - return "" + if errors.Is(readErr, os.ErrNotExist) { + // No flag on disk ⇒ nothing to report; legitimate. + return "", nil + } + // Permission denied, I/O failure: surface. + return "", readErr } tokenCount := extractOversizeTokens(data) @@ -46,17 +68,14 @@ func oversizeContent() string { ) content := message.Load(hook.CheckContextSize, hook.VariantOversize, map[string]any{stats.VarTokenCount: tokenCount}, fallback) - if content == "" { - if removeErr := os.Remove(flagPath); removeErr != nil { - ctxLog.Warn(warn.Remove, flagPath, removeErr) - } - return "" - } + // One-shot: remove the flag regardless of whether the template + // silenced itself, so a silenced template does not leave the + // flag lingering and re-firing every invocation. if removeErr := os.Remove(flagPath); removeErr != nil { - ctxLog.Warn(warn.Remove, flagPath, removeErr) + return "", removeErr } - return content + return content, nil } // extractOversizeTokens parses the token count from an injection-oversize diff --git a/internal/cli/system/core/nudge/pause.go b/internal/cli/system/core/nudge/pause.go index 350712d67..227e6a809 100644 --- a/internal/cli/system/core/nudge/pause.go +++ b/internal/cli/system/core/nudge/pause.go @@ -31,12 +31,20 @@ import ( // // Returns: // - string: Absolute path to the pause marker file -func PauseMarkerPath(sessionID string) string { - return filepath.Join(state.Dir(), hook.PrefixPauseMarker+sessionID) +// - error: non-nil when the state directory cannot be resolved +func PauseMarkerPath(sessionID string) (string, error) { + stateDir, dirErr := state.Dir() + if dirErr != nil { + return "", dirErr + } + return filepath.Join(stateDir, hook.PrefixPauseMarker+sessionID), nil } // Paused checks if the session is paused. If paused, increments the -// turn counter and returns the current count. Returns 0 if not paused. +// turn counter and returns the current count. Returns 0 if not paused +// or if the state directory cannot be resolved (silent bail keeps the +// calling hook lean; the resolver-failure warning fires once at the +// [state.Initialized] gate those hooks check first). // // Parameters: // - sessionID: Session identifier @@ -44,7 +52,10 @@ func PauseMarkerPath(sessionID string) string { // Returns: // - int: Turn count if paused, 0 if not paused func Paused(sessionID string) int { - path := PauseMarkerPath(sessionID) + path, pathErr := PauseMarkerPath(sessionID) + if pathErr != nil { + return 0 + } data, readErr := io.SafeReadUserFile(path) if readErr != nil { return 0 @@ -78,8 +89,16 @@ func PausedMessage(turns int) string { // // Parameters: // - sessionID: Session identifier -func Pause(sessionID string) { - counter.Write(PauseMarkerPath(sessionID), 0) +// +// Returns: +// - error: non-nil when the state directory cannot be resolved +func Pause(sessionID string) error { + path, pathErr := PauseMarkerPath(sessionID) + if pathErr != nil { + return pathErr + } + counter.Write(path, 0) + return nil } // Resume removes the session pause marker. Exported for use by the @@ -87,9 +106,16 @@ func Pause(sessionID string) { // // Parameters: // - sessionID: Session identifier -func Resume(sessionID string) { - p := PauseMarkerPath(sessionID) +// +// Returns: +// - error: non-nil when the state directory cannot be resolved +func Resume(sessionID string) error { + p, pathErr := PauseMarkerPath(sessionID) + if pathErr != nil { + return pathErr + } if removeErr := os.Remove(p); removeErr != nil { ctxLog.Warn(warn.Remove, p, removeErr) } + return nil } diff --git a/internal/cli/system/core/nudge/relay.go b/internal/cli/system/core/nudge/relay.go index 930cfdf49..713bf6e2d 100644 --- a/internal/cli/system/core/nudge/relay.go +++ b/internal/cli/system/core/nudge/relay.go @@ -19,34 +19,63 @@ import ( writeSetup "github.com/ActiveMemory/ctx/internal/write/setup" ) -// Relay sends a relay notification and appends the same event to the -// local event log. This is the standard two-sink pattern used by most -// hooks after emitting output. +// Relay appends a relay event to the local event log and, only on +// success, sends the relay webhook notification. The order is +// deliberate: the log is the authoritative record; the webhook is a +// side effect that must not claim an event happened unless it was +// first recorded. See docs/security/reporting.md → +// "Log-First Audit Trail". // // Parameters: // - msg: human-readable event description // - sessionID: current session identifier // - ref: template reference for filtering/aggregation (may be nil) -func Relay(msg, sessionID string, ref *entity.TemplateRef) { - _ = notify.Send(hook.NotifyChannelRelay, msg, sessionID, ref) - event.Append(hook.NotifyChannelRelay, msg, sessionID, ref) +// +// Returns: +// - error: non-nil when the event-log append fails (webhook is +// skipped in that case) or when the webhook send itself fails +// (log was written). Callers propagate to surface real failures +// rather than pretend the notification succeeded. +func Relay(msg, sessionID string, ref *entity.TemplateRef) error { + if appendErr := event.Append( + hook.NotifyChannelRelay, msg, sessionID, ref, + ); appendErr != nil { + return appendErr + } + return notify.Send(hook.NotifyChannelRelay, msg, sessionID, ref) } // EmitAndRelay sends both a nudge and a relay notification, then -// appends the relay event to the local event log. Used by hooks that -// emit both notification types with the same message. +// appends the relay event to the local event log. +// +// The nudge webhook has no corresponding event-log channel today, +// so log-first ordering cannot apply to the nudge leg; this is a +// known gap. A future refactor may add a nudge channel to +// [event.Append]; until then the nudge webhook can fire even if the +// later relay log fails. The relay leg itself follows [Relay]'s +// log-first ordering. // // Parameters: // - msg: human-readable event description // - sessionID: current session identifier // - ref: template reference for filtering/aggregation (may be nil) -func EmitAndRelay(msg, sessionID string, ref *entity.TemplateRef) { - _ = notify.Send(hook.NotifyChannelNudge, msg, sessionID, ref) - Relay(msg, sessionID, ref) +// +// Returns: +// - error: non-nil when the nudge send, the relay log, or the +// relay webhook fails. A nudge failure short-circuits the relay +// so we do not send half a story. +func EmitAndRelay(msg, sessionID string, ref *entity.TemplateRef) error { + if sendErr := notify.Send( + hook.NotifyChannelNudge, msg, sessionID, ref, + ); sendErr != nil { + return sendErr + } + return Relay(msg, sessionID, ref) } -// LoadAndEmit loads a hook message template and, if non-empty, emits the -// standard nudge box + relay notification + throttle marker sequence. +// LoadAndEmit loads a hook message template and, if non-empty, emits +// the standard nudge box + relay notification + throttle marker +// sequence. // // Parameters: // - cmd: Cobra command for output @@ -59,30 +88,39 @@ func EmitAndRelay(msg, sessionID string, ref *entity.TemplateRef) { // - relayMessage: human-readable relay suffix // - sessionID: current session identifier // - markerPath: throttle file to touch (empty string skips) +// +// Returns: +// - error: propagated from [Emit] when the message is non-empty; +// nil when the template resolved to an empty message (intentional +// silence). func LoadAndEmit( cmd *cobra.Command, hookName, variant string, vars map[string]any, fallback, relayPrefix, boxTitle, relayMessage, sessionID, markerPath string, -) { +) error { content := message.Load(hookName, variant, vars, fallback) if content == "" { - return + return nil } - Emit(cmd, content, + return Emit(cmd, content, relayPrefix, boxTitle, hookName, variant, relayMessage, sessionID, vars, markerPath, ) } -// Emit is the standard hook tail: print nudge box, send -// nudge+relay notifications, and touch the throttle marker. +// Emit is the standard hook tail: print nudge box, send nudge+relay +// notifications, and touch the throttle marker. +// +// The throttle marker is only touched on a successful relay: marking +// a hook as recently-emitted when the emit actually failed would +// suppress retries on a real problem. // // Parameters: // - cmd: Cobra command for output // - content: nudge box content (from Load) -// - relayPrefix: relay prefix text (e.g., "check-backup-age") +// - relayPrefix: relay prefix text (e.g., "check-ceremony") // - boxTitle: nudge box title // - hookName: hook name for notifications // - variant: hook variant for template ref @@ -90,17 +128,25 @@ func LoadAndEmit( // - sessionID: current session identifier // - vars: template variables for the template ref (may be nil) // - markerPath: throttle file to touch (empty string skips) +// +// Returns: +// - error: propagated from [Relay] (log or webhook failure). func Emit( cmd *cobra.Command, content, relayPrefix, boxTitle, hookName, variant, relayMessage, sessionID string, vars map[string]any, markerPath string, -) { +) error { writeSetup.Nudge(cmd, message.NudgeBox(relayPrefix, boxTitle, content)) ref := entity.NewTemplateRef(hookName, variant, vars) - Relay(hookName+token.ColonSpace+relayMessage, sessionID, ref) + if relayErr := Relay( + hookName+token.ColonSpace+relayMessage, sessionID, ref, + ); relayErr != nil { + return relayErr + } if markerPath != "" { internalIo.TouchFile(markerPath) } + return nil } diff --git a/internal/cli/system/core/session/session.go b/internal/cli/system/core/session/session.go index 184d16442..301c32300 100644 --- a/internal/cli/system/core/session/session.go +++ b/internal/cli/system/core/session/session.go @@ -122,8 +122,12 @@ func ReadID(stdin *os.File) string { // Returns: // - int: Latest context window usage percentage (0-100), or 0 if unknown func LatestPct(sessionID string) int { + stateDir, dirErr := state.Dir() + if dirErr != nil { + return 0 + } path := filepath.Join( - state.Dir(), + stateDir, cfgStats.FilePrefix+sessionID+file.ExtJSONL, ) data, readErr := internalIo.SafeReadUserFile(path) @@ -149,21 +153,32 @@ func LatestPct(sessionID string) int { // WriteStats appends a JSONL line to .context/state/stats-{sessionID}.jsonl. // The file is designed for `tail -f` monitoring of token usage across prompts. -// Best-effort: errors are silently ignored. +// Errors are propagated; see Returns for the rationale. // // Parameters: // - sessionID: Session identifier // - stats: Stats entry to write -func WriteStats(sessionID string, stats entity.Stats) { +// +// Returns: +// - error: non-nil when marshaling or the append fails. Stats are +// an audit trail of per-session token usage; surfacing a write +// failure lets callers honour the log-first principle (do not +// claim success for a session action whose stats entry never +// landed). +func WriteStats(sessionID string, stats entity.Stats) error { + stateDir, dirErr := state.Dir() + if dirErr != nil { + return dirErr + } path := filepath.Join( - state.Dir(), + stateDir, cfgStats.FilePrefix+sessionID+file.ExtJSONL, ) data, marshalErr := json.Marshal(stats) if marshalErr != nil { - return + return marshalErr } data = append(data, token.NewlineLF[0]) - internalIo.AppendBytes(path, data, fs.PermSecret) + return internalIo.AppendBytes(path, data, fs.PermSecret) } diff --git a/internal/cli/system/core/session/session_token.go b/internal/cli/system/core/session/session_token.go index 943ef5489..2e0514761 100644 --- a/internal/cli/system/core/session/session_token.go +++ b/internal/cli/system/core/session/session_token.go @@ -71,7 +71,11 @@ func ReadTokenInfo(sessionID string) (entity.TokenInfo, error) { // - error: Non-nil only on unexpected errors func FindJSONLPath(sessionID string) (string, error) { // Check cache first - cacheFile := filepath.Join(state.Dir(), stats.JsonlPathCachePrefix+sessionID) + stateDir, dirErr := state.Dir() + if dirErr != nil { + return "", dirErr + } + cacheFile := filepath.Join(stateDir, stats.JsonlPathCachePrefix+sessionID) if data, readErr := internalIo.SafeReadUserFile(cacheFile); readErr == nil { cached := strings.TrimSpace(string(data)) if cached != "" { diff --git a/internal/cli/system/core/state/state.go b/internal/cli/system/core/state/state.go index 1e7b588ed..c9777a14f 100644 --- a/internal/cli/system/core/state/state.go +++ b/internal/cli/system/core/state/state.go @@ -7,32 +7,56 @@ package state import ( + "errors" "path/filepath" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/fs" - "github.com/ActiveMemory/ctx/internal/config/warn" ctxContext "github.com/ActiveMemory/ctx/internal/context/validate" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" ctxIo "github.com/ActiveMemory/ctx/internal/io" - ctxLog "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" ) // Dir returns the project-scoped runtime state directory -// (.context/state/). Ensures the directory exists on each call. MkdirAll -// is a no-op when the directory is already present. +// (`/state/`). Ensures the directory exists on each call; +// MkdirAll is a no-op when the directory is already present. +// +// **Always returns an error when the path is empty.** Specifically, +// when CTX_DIR is not declared, Dir returns +// ("", [errCtx.ErrDirNotDeclared]) so callers that gate on +// `dirErr != nil` are uniformly safe. Defensive callers that need +// to special-case the legitimate-absence path can match with +// `errors.Is(dirErr, errCtx.ErrDirNotDeclared)`. +// +// The contract was tightened from the earlier ("", nil) form because +// that form silently invited `filepath.Join("", rel)` traps: +// callers that only checked `dirErr != nil` would join to a +// CWD-relative path and write to the wrong location. Returning an +// explicit error makes the empty-path case unrepresentable in a +// "looks fine" branch. // // Returns: -// - string: Absolute path to the state directory -func Dir() string { +// - string: Absolute path to the state directory; always non-empty +// when the error is nil. +// - error: [errCtx.ErrDirNotDeclared] when CTX_DIR is unset, +// resolver errors otherwise, mkdir failures otherwise. +func Dir() (string, error) { if dirOverride != "" { - return dirOverride + return dirOverride, nil + } + ctxDir, err := rc.ContextDir() + if err != nil { + // Propagate every resolver error (including + // ErrDirNotDeclared) so callers can match on it via + // errors.Is when they need to special-case the absence. + return "", err } - d := filepath.Join(rc.ContextDir(), dir.State) + d := filepath.Join(ctxDir, dir.State) if mkdirErr := ctxIo.SafeMkdirAll(d, fs.PermRestrictedDir); mkdirErr != nil { - ctxLog.Warn(warn.Mkdir, d, mkdirErr) + return "", mkdirErr } - return d + return d, nil } // dirOverride allows tests to redirect Dir() to a temp directory. @@ -51,8 +75,22 @@ func SetDirForTest(d string) { // via "ctx init". Hooks should no-op when this returns false to avoid // creating a partial state (e.g., logs/) before initialization. // +// Returns (false, nil) when the context directory is not declared: there +// is no directory to inspect, which is a legitimate "not initialized" +// answer. Any other resolver failure is propagated so callers can +// distinguish "properly not initialized" from "we could not tell" and +// surface the failure instead of letting hooks silently stop firing. +// // Returns: // - bool: True if the context directory is initialized -func Initialized() bool { - return ctxContext.Initialized(rc.ContextDir()) +// - error: non-nil on resolver failure (other than not-declared) +func Initialized() (bool, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + if errors.Is(err, errCtx.ErrDirNotDeclared) { + return false, nil + } + return false, err + } + return ctxContext.Initialized(ctxDir), nil } diff --git a/internal/cli/system/core/version/version.go b/internal/cli/system/core/version/version.go index f136df106..e2bb8de25 100644 --- a/internal/cli/system/core/version/version.go +++ b/internal/cli/system/core/version/version.go @@ -59,18 +59,24 @@ func ParseMajorMinor(ver string) (major, minor int, ok bool) { // // Returns: // - string: formatted nudge box (with leading newline), or empty string -func CheckKeyAge(sessionID string) string { - kp := rc.KeyPath() +// - error: propagated from [nudge.EmitAndRelay] so callers can honour +// the log-first principle: if the relay audit entry or webhook +// fails, the nudge box should not be printed. +func CheckKeyAge(sessionID string) (string, error) { + kp, kpErr := rc.KeyPath() + if kpErr != nil { + return "", kpErr + } info, statErr := os.Stat(kp) if statErr != nil { - return "" // no key: nothing to check + return "", nil // no key: nothing to check } ageDays := int(time.Since(info.ModTime()).Hours() / cfgTime.HoursPerDay) threshold := rc.KeyRotationDays() if ageDays < threshold { - return "" + return "", nil } keyFallback := fmt.Sprintf( @@ -79,7 +85,7 @@ func CheckKeyAge(sessionID string) string { keyContent := message.Load(hook.CheckVersion, hook.VariantKeyRotation, map[string]any{version.VarKeyAgeDays: ageDays}, keyFallback) if keyContent == "" { - return "" + return "", nil } boxTitle := desc.Text(text.DescKeyCheckVersionKeyBoxTitle) @@ -96,6 +102,8 @@ func CheckKeyAge(sessionID string) string { desc.Text(text.DescKeyCheckVersionKeyRelayFormat), ageDays, ), ) - nudge.EmitAndRelay(keyNotifyMsg, sessionID, keyRef) - return box + if err := nudge.EmitAndRelay(keyNotifyMsg, sessionID, keyRef); err != nil { + return "", err + } + return box, nil } diff --git a/internal/cli/system/doc.go b/internal/cli/system/doc.go index abac87399..7ff1799bc 100644 --- a/internal/cli/system/doc.go +++ b/internal/cli/system/doc.go @@ -37,6 +37,7 @@ // commands output JSON with a "decision" field. // // UserPromptSubmit hooks (hidden): +// - check-anchor-drift: stale CTX_DIR vs project anchor warning // - check-context-size: adaptive prompt counter // - check-persistence: context file mtime watcher // - check-ceremony: session ceremony reminder @@ -49,13 +50,11 @@ // - check-reminder: session reminder surfacing // - check-freshness: constant staleness check // - check-hub-sync: auto-sync Hub entries -// - check-backup-age: backup staleness check // - check-skill-discovery: skill tip nudge // - heartbeat: token telemetry and billing check // // PreToolUse hooks (hidden): // - block-non-path-ctx: blocks non-PATH ctx calls -// - block-dangerous-command: blocks dangerous patterns // - context-load-gate: context injection with cooldown // - qa-reminder: lint/test before done reminder // - specs-nudge: save plans to specs/ reminder diff --git a/internal/cli/system/system.go b/internal/cli/system/system.go index 659d419a8..51ff4e9b2 100644 --- a/internal/cli/system/system.go +++ b/internal/cli/system/system.go @@ -10,10 +10,9 @@ import ( "github.com/spf13/cobra" "github.com/ActiveMemory/ctx/internal/cli/parent" - "github.com/ActiveMemory/ctx/internal/cli/system/cmd/block_dangerous_command" "github.com/ActiveMemory/ctx/internal/cli/system/cmd/block_non_path_ctx" sysBootstrap "github.com/ActiveMemory/ctx/internal/cli/system/cmd/bootstrap" - "github.com/ActiveMemory/ctx/internal/cli/system/cmd/check_backup_age" + "github.com/ActiveMemory/ctx/internal/cli/system/cmd/check_anchor_drift" "github.com/ActiveMemory/ctx/internal/cli/system/cmd/check_ceremony" "github.com/ActiveMemory/ctx/internal/cli/system/cmd/check_context_size" "github.com/ActiveMemory/ctx/internal/cli/system/cmd/check_freshness" @@ -44,10 +43,10 @@ import ( // Cmd returns the "ctx system" parent command. // // Hosts hidden Claude Code hook plumbing and agent-only commands. -// User-facing maintenance commands (backup, prune, sysinfo, usage) -// are top-level; hook-facing commands (event, message, notify, -// pause, resume) live under "ctx hook". Both groups are registered -// in internal/bootstrap/group.go. Bootstrap remains here as +// User-facing maintenance commands (prune, sysinfo, usage) are +// top-level; hook-facing commands (event, message, notify, pause, +// resume) live under "ctx hook". Both groups are registered in +// internal/bootstrap/group.go. Bootstrap remains here as // agent-only plumbing. // // Hook subcommands implement Claude Code hook logic as native Go @@ -57,10 +56,9 @@ import ( // - *cobra.Command: Parent command with hook plumbing subcommands func Cmd() *cobra.Command { return parent.Cmd(cmd.DescKeySystem, cmd.UseSystem, - block_dangerous_command.Cmd(), sysBootstrap.Cmd(), block_non_path_ctx.Cmd(), - check_backup_age.Cmd(), + check_anchor_drift.Cmd(), check_ceremony.Cmd(), check_context_size.Cmd(), check_freshness.Cmd(), diff --git a/internal/cli/task/cmd/complete/run.go b/internal/cli/task/cmd/complete/run.go index 7141b09aa..6ee02787e 100644 --- a/internal/cli/task/cmd/complete/run.go +++ b/internal/cli/task/cmd/complete/run.go @@ -38,7 +38,11 @@ func Run(cmd *cobra.Command, args []string) error { ref := fmt.Sprintf( cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum, ) - _ = trace.Record(ref, state.Dir()) + stateDir, dirErr := state.Dir() + if dirErr != nil { + return dirErr + } + _ = trace.Record(ref, stateDir) return nil } diff --git a/internal/cli/task/cmd/snapshot/run.go b/internal/cli/task/cmd/snapshot/run.go index 9f362d742..1417c236b 100644 --- a/internal/cli/task/cmd/snapshot/run.go +++ b/internal/cli/task/cmd/snapshot/run.go @@ -37,8 +37,16 @@ import ( // Returns: // - error: Non-nil if TASKS.md doesn't exist or file operations fail func Run(cmd *cobra.Command, args []string) error { - tasksPath := path.File() - archivePath := path.ArchiveDir() + tasksPath, pathErr := path.File() + if pathErr != nil { + cmd.SilenceUsage = true + return pathErr + } + archivePath, archiveErr := path.ArchiveDir() + if archiveErr != nil { + cmd.SilenceUsage = true + return archiveErr + } // Check if TASKS.md exists if _, statErr := os.Stat(tasksPath); os.IsNotExist(statErr) { diff --git a/internal/cli/task/core/archive/archive.go b/internal/cli/task/core/archive/archive.go index 2a49a26ef..1c15bca30 100644 --- a/internal/cli/task/core/archive/archive.go +++ b/internal/cli/task/core/archive/archive.go @@ -30,7 +30,10 @@ import ( // - Result: Parsed archive plan // - error: Non-nil if TASKS.md doesn't exist or can't be read func Plan() (Result, error) { - tasksPath := path.File() + tasksPath, pathErr := path.File() + if pathErr != nil { + return Result{}, pathErr + } nl := token.NewlineLF if _, statErr := os.Stat(tasksPath); os.IsNotExist(statErr) { @@ -87,7 +90,10 @@ func Execute(r Result) (string, error) { return "", writeErr } - tasksPath := path.File() + tasksPath, pathErr := path.File() + if pathErr != nil { + return "", pathErr + } if updateErr := io.SafeWriteFile( tasksPath, []byte(r.NewTasksBody), fs.PermFile, ); updateErr != nil { diff --git a/internal/cli/task/core/complete/complete.go b/internal/cli/task/core/complete/complete.go index 34c58d5c6..0b822493f 100644 --- a/internal/cli/task/core/complete/complete.go +++ b/internal/cli/task/core/complete/complete.go @@ -36,7 +36,11 @@ import ( // operations fail func Complete(query, contextDir string) (string, int, error) { if contextDir == "" { - contextDir = rc.ContextDir() + declared, ctxErr := rc.ContextDir() + if ctxErr != nil { + return "", 0, ctxErr + } + contextDir = declared } filePath := filepath.Join(contextDir, ctx.Task) diff --git a/internal/cli/task/core/path/path.go b/internal/cli/task/core/path/path.go index 67837f5b7..84194cba3 100644 --- a/internal/cli/task/core/path/path.go +++ b/internal/cli/task/core/path/path.go @@ -18,14 +18,24 @@ import ( // // Returns: // - string: Full path to .context/TASKS.md -func File() string { - return filepath.Join(rc.ContextDir(), ctx.Task) +// - error: non-nil when the context directory is not declared +func File() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, ctx.Task), nil } // ArchiveDir returns the path to the archive directory. // // Returns: // - string: Full path to .context/archive/ -func ArchiveDir() string { - return filepath.Join(rc.ContextDir(), dir.Archive) +// - error: non-nil when the context directory is not declared +func ArchiveDir() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, dir.Archive), nil } diff --git a/internal/cli/task/task_test.go b/internal/cli/task/task_test.go index 9a49ed2e2..20ea15437 100644 --- a/internal/cli/task/task_test.go +++ b/internal/cli/task/task_test.go @@ -20,6 +20,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestTasksCommands tests the tasks subcommands. @@ -36,6 +37,8 @@ func TestTasksCommands(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // First init initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -98,6 +101,8 @@ func setupTaskDir(t *testing.T) string { rc.Reset() }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -163,18 +168,24 @@ func TestCountPendingTasks(t *testing.T) { func TestTasksFilePath(t *testing.T) { setupTaskDir(t) - path := path.File() - if !strings.Contains(path, ctx.Task) { - t.Errorf("File() = %q, want to contain %q", path, ctx.Task) + p, err := path.File() + if err != nil { + t.Fatalf("File: %v", err) + } + if !strings.Contains(p, ctx.Task) { + t.Errorf("File() = %q, want to contain %q", p, ctx.Task) } } func TestArchiveDirPath(t *testing.T) { setupTaskDir(t) - path := path.ArchiveDir() - if !strings.Contains(path, dir.Archive) { - t.Errorf("ArchiveDir() = %q, want to contain %q", path, dir.Archive) + p, err := path.ArchiveDir() + if err != nil { + t.Fatalf("ArchiveDir: %v", err) + } + if !strings.Contains(p, dir.Archive) { + t.Errorf("ArchiveDir() = %q, want to contain %q", p, dir.Archive) } } @@ -186,13 +197,11 @@ func TestSnapshotCommand_NoTasks(t *testing.T) { } t.Cleanup(func() { _ = os.Chdir(origDir) - rc.Reset() }) // Create .context but no TASKS.md - rc.Reset() - rc.OverrideContextDir(dir.Context) - if err := os.MkdirAll(dir.Context, 0750); err != nil { + ctxDir := testctx.Declare(t, tmpDir) + if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } @@ -247,12 +256,10 @@ func TestArchiveCommand_NoTasks(t *testing.T) { } t.Cleanup(func() { _ = os.Chdir(origDir) - rc.Reset() }) - rc.Reset() - rc.OverrideContextDir(dir.Context) - if err := os.MkdirAll(dir.Context, 0750); err != nil { + ctxDir := testctx.Declare(t, tmpDir) + if err := os.MkdirAll(ctxDir, 0750); err != nil { t.Fatal(err) } diff --git a/internal/cli/trace/cmd/collect/run.go b/internal/cli/trace/cmd/collect/run.go index 8602c93d2..833a2037a 100644 --- a/internal/cli/trace/cmd/collect/run.go +++ b/internal/cli/trace/cmd/collect/run.go @@ -22,7 +22,11 @@ import ( // Returns: // - error: non-nil on execution failure func Run(cmd *cobra.Command) error { - contextDir := rc.ContextDir() + contextDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } refs := trace.Collect(contextDir) trailer := trace.FormatTrailer(refs) writeTrace.Trailer(cmd, trailer) diff --git a/internal/cli/trace/cmd/file/run.go b/internal/cli/trace/cmd/file/run.go index aaf6a4c44..d6ff47577 100644 --- a/internal/cli/trace/cmd/file/run.go +++ b/internal/cli/trace/cmd/file/run.go @@ -32,7 +32,11 @@ import ( // Returns: // - error: non-nil on execution failure func Run(cmd *cobra.Command, pathArg string, last int) error { - contextDir := rc.ContextDir() + contextDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } traceDir := filepath.Join(contextDir, dir.Trace) filePath := coreFile.ParsePathArg(pathArg) diff --git a/internal/cli/trace/cmd/show/run.go b/internal/cli/trace/cmd/show/run.go index fc2aef80c..e6807a7ff 100644 --- a/internal/cli/trace/cmd/show/run.go +++ b/internal/cli/trace/cmd/show/run.go @@ -32,7 +32,11 @@ import ( // Returns: // - error: non-nil on execution failure func Run(cmd *cobra.Command, args []string, last int, jsonOutput bool) error { - contextDir := rc.ContextDir() + contextDir, err := rc.RequireContextDir() + if err != nil { + cmd.SilenceUsage = true + return err + } traceDir := filepath.Join(contextDir, dir.Trace) if last > 0 { diff --git a/internal/cli/trace/cmd/tag/run.go b/internal/cli/trace/cmd/tag/run.go index a84d7fa8d..21f33190a 100644 --- a/internal/cli/trace/cmd/tag/run.go +++ b/internal/cli/trace/cmd/tag/run.go @@ -41,7 +41,12 @@ func Run(cmd *cobra.Command, commitRef, note string) error { return errTrace.ResolveCommit(commitRef, resolveErr) } - traceDir := filepath.Join(rc.ContextDir(), dir.Trace) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + traceDir := filepath.Join(ctxDir, dir.Trace) entry := trace.OverrideEntry{ Commit: hash, diff --git a/internal/cli/trace/core/collect/collect.go b/internal/cli/trace/core/collect/collect.go index 3a837b240..efa928942 100644 --- a/internal/cli/trace/core/collect/collect.go +++ b/internal/cli/trace/core/collect/collect.go @@ -31,7 +31,10 @@ import ( // Returns: // - error: non-nil on execution failure func RecordCommit(commitHash string) error { - contextDir := rc.ContextDir() + contextDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } // Read refs from the commit trailer, the single source of truth. // This matches exactly what was injected by the prepare-commit-msg hook. diff --git a/internal/cli/trace/trace_test.go b/internal/cli/trace/trace_test.go index efdac20e4..086c035ee 100644 --- a/internal/cli/trace/trace_test.go +++ b/internal/cli/trace/trace_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" "github.com/ActiveMemory/ctx/internal/trace" ) @@ -26,6 +27,8 @@ func TestTraceTagAndShow(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Init git repo run(t, "git", "init") run(t, "git", "config", "user.email", "test@test.com") diff --git a/internal/cli/usage/run.go b/internal/cli/usage/run.go index 74b4f10c7..bdb2c0b92 100644 --- a/internal/cli/usage/run.go +++ b/internal/cli/usage/run.go @@ -33,7 +33,12 @@ func Run(cmd *cobra.Command) error { last, _ := cmd.Flags().GetInt(cFlag.Last) jsonOut, _ := cmd.Flags().GetBool(cFlag.JSON) - d := filepath.Join(rc.ContextDir(), dir.State) + ctxDir, ctxErr := rc.RequireContextDir() + if ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + d := filepath.Join(ctxDir, dir.State) entries, readErr := coreStats.ReadDir(d, session) if readErr != nil { diff --git a/internal/cli/watch/cmd/root/run.go b/internal/cli/watch/cmd/root/run.go index ee1540462..a4bea8875 100644 --- a/internal/cli/watch/cmd/root/run.go +++ b/internal/cli/watch/cmd/root/run.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/err/initialize" errJournal "github.com/ActiveMemory/ctx/internal/err/journal" internalIo "github.com/ActiveMemory/ctx/internal/io" + "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/write/watch" ) @@ -35,7 +36,15 @@ import ( // - error: Non-nil if the context directory is missing, the log file cannot // be opened, or stream processing fails func Run(cmd *cobra.Command, logPath string, dryRun bool) error { - if !validate.Exists("") { + if _, ctxErr := rc.RequireContextDir(); ctxErr != nil { + cmd.SilenceUsage = true + return ctxErr + } + exists, existsErr := validate.Exists("") + if existsErr != nil { + return existsErr + } + if !exists { return initialize.ContextNotInitialized() } diff --git a/internal/cli/watch/core/apply/complete.go b/internal/cli/watch/core/apply/complete.go index cff7a0daf..0f37ef9b5 100644 --- a/internal/cli/watch/core/apply/complete.go +++ b/internal/cli/watch/core/apply/complete.go @@ -38,7 +38,11 @@ func completeTask(query string) error { return errTask.NoneSpecified() } - filePath := filepath.Join(rc.ContextDir(), ctx.Task) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + filePath := filepath.Join(ctxDir, ctx.Task) nl := token.NewlineLF content, readErr := io.SafeReadUserFile(filepath.Clean(filePath)) diff --git a/internal/cli/watch/core/apply/complete_test.go b/internal/cli/watch/core/apply/complete_test.go index 61f721664..121480322 100644 --- a/internal/cli/watch/core/apply/complete_test.go +++ b/internal/cli/watch/core/apply/complete_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/cli/initialize" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // TestCompleteTaskNoMatch tests complete with no matching task. @@ -28,6 +29,8 @@ func TestCompleteTaskNoMatch(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) diff --git a/internal/cli/watch/core/core_test.go b/internal/cli/watch/core/core_test.go index a586303b0..bdf21af9a 100644 --- a/internal/cli/watch/core/core_test.go +++ b/internal/cli/watch/core/core_test.go @@ -19,6 +19,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/entry" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" "github.com/spf13/cobra" ) @@ -36,6 +37,8 @@ func TestApplyUpdate(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -141,7 +144,11 @@ func TestApplyUpdate(t *testing.T) { } // Verify content was added - filePath := filepath.Join(rc.ContextDir(), tt.checkFile) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + t.Fatalf("ContextDir: %v", ctxErr) + } + filePath := filepath.Join(ctxDir, tt.checkFile) content, err := os.ReadFile(filepath.Clean(filePath)) if err != nil { t.Fatalf("failed to read %s: %v", tt.checkFile, err) @@ -167,6 +174,8 @@ func TestApplyCompleteUpdate(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -175,7 +184,11 @@ func TestApplyCompleteUpdate(t *testing.T) { } // Add a task to complete - tasksPath := filepath.Join(rc.ContextDir(), ctx.Task) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + t.Fatalf("ContextDir: %v", ctxErr) + } + tasksPath := filepath.Join(ctxDir, ctx.Task) tasksContent := `# Tasks ## Next Up @@ -222,6 +235,8 @@ func TestProcessStream(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -245,7 +260,11 @@ More output } // Verify task was written - tasksPath := filepath.Join(rc.ContextDir(), ctx.Task) + ctxDir2, ctxErr2 := rc.ContextDir() + if ctxErr2 != nil { + t.Fatalf("ContextDir: %v", ctxErr2) + } + tasksPath := filepath.Join(ctxDir2, ctx.Task) content, err := os.ReadFile(filepath.Clean(tasksPath)) if err != nil { t.Fatalf("failed to read tasks: %v", err) @@ -269,6 +288,8 @@ func TestProcessStreamWithAttributes(t *testing.T) { } defer func() { _ = os.Chdir(origDir) }() + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -295,7 +316,11 @@ func TestProcessStreamWithAttributes(t *testing.T) { } // Verify learning was written with structured fields - learningsPath := filepath.Join(rc.ContextDir(), ctx.Learning) + ctxDir3, ctxErr3 := rc.ContextDir() + if ctxErr3 != nil { + t.Fatalf("ContextDir: %v", ctxErr3) + } + learningsPath := filepath.Join(ctxDir3, ctx.Learning) content, err := os.ReadFile(filepath.Clean(learningsPath)) if err != nil { t.Fatalf("failed to read learnings: %v", err) @@ -361,10 +386,9 @@ func TestProcessStream_DryRunMode(t *testing.T) { } t.Cleanup(func() { _ = os.Chdir(origDir) - rc.Reset() }) - rc.Reset() + testctx.Declare(t, tmpDir) // Initialize context initCmd := initialize.Cmd() @@ -405,6 +429,8 @@ func TestProcessStream_FailedApply(t *testing.T) { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + // Initialize context initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) @@ -442,6 +468,8 @@ func TestProcessStream_MultipleUpdates(t *testing.T) { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -479,6 +507,8 @@ func TestProcessStream_DecisionWithAttributes(t *testing.T) { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -502,7 +532,11 @@ func TestProcessStream_DecisionWithAttributes(t *testing.T) { } // Verify decision was written - decPath := filepath.Join(rc.ContextDir(), ctx.Decision) + ctxDir4, ctxErr4 := rc.ContextDir() + if ctxErr4 != nil { + t.Fatalf("ContextDir: %v", ctxErr4) + } + decPath := filepath.Join(ctxDir4, ctx.Decision) content, err := os.ReadFile(filepath.Clean(decPath)) if err != nil { t.Fatal(err) @@ -526,6 +560,8 @@ func TestProcessStream_NoUpdates(t *testing.T) { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -603,6 +639,8 @@ func TestProcessStream_CompleteUpdate(t *testing.T) { _ = os.Chdir(origDir) }) + testctx.Declare(t, tmpDir) + initCmd := initialize.Cmd() initCmd.SetArgs([]string{}) if err := initCmd.Execute(); err != nil { @@ -610,7 +648,11 @@ func TestProcessStream_CompleteUpdate(t *testing.T) { } // Write a task to complete - tasksPath := filepath.Join(rc.ContextDir(), ctx.Task) + ctxDir5, ctxErr5 := rc.ContextDir() + if ctxErr5 != nil { + t.Fatalf("ContextDir: %v", ctxErr5) + } + tasksPath := filepath.Join(ctxDir5, ctx.Task) tasksContent := "# Tasks\n\n- [ ] Implement login\n- [ ] Write tests\n" if err := os.WriteFile(tasksPath, []byte(tasksContent), 0600); err != nil { t.Fatal(err) diff --git a/internal/cli/watch/watch_test.go b/internal/cli/watch/watch_test.go index 5c4aac9aa..52f4ca73e 100644 --- a/internal/cli/watch/watch_test.go +++ b/internal/cli/watch/watch_test.go @@ -15,7 +15,9 @@ import ( "github.com/ActiveMemory/ctx/internal/cli/initialize" "github.com/ActiveMemory/ctx/internal/config/ctx" + "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestRunWatch_NoContext(t *testing.T) { @@ -25,6 +27,7 @@ func TestRunWatch_NoContext(t *testing.T) { t.Fatal(err) } t.Cleanup(func() { _ = os.Chdir(origDir) }) + t.Setenv(env.CtxDir, "") cmd := Cmd() var buf bytes.Buffer @@ -36,8 +39,12 @@ func TestRunWatch_NoContext(t *testing.T) { if err == nil { t.Fatal("expected error when no .context/ exists") } - if !strings.Contains(err.Error(), "ctx init") { - t.Errorf("error = %q, want 'ctx init' suggestion", err.Error()) + // Under the explicit-context-dir model, the top-level gate is + // rc.RequireContextDir which surfaces a multi-line actionable + // message. The previous 'ctx init' suggestion belonged to the + // old initialize.ContextNotInitialized fallback. + if !strings.Contains(err.Error(), "no context directory") { + t.Errorf("error = %q, want 'no context directory' message", err.Error()) } } @@ -52,7 +59,7 @@ func TestRunWatch_WithLogFile(t *testing.T) { rc.Reset() }) - rc.Reset() + testctx.Declare(t, tmpDir) // Initialize context initCmd := initialize.Cmd() @@ -83,7 +90,11 @@ More output } // Verify task was written - tasksPath := filepath.Join(rc.ContextDir(), ctx.Task) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + t.Fatalf("ContextDir: %v", ctxErr) + } + tasksPath := filepath.Join(ctxDir, ctx.Task) content, err := os.ReadFile(filepath.Clean(tasksPath)) if err != nil { t.Fatal(err) @@ -104,7 +115,7 @@ func TestRunWatch_DryRun(t *testing.T) { rc.Reset() }) - rc.Reset() + testctx.Declare(t, tmpDir) // Initialize context initCmd := initialize.Cmd() @@ -152,7 +163,7 @@ func TestRunWatch_InvalidLogFile(t *testing.T) { rc.Reset() }) - rc.Reset() + testctx.Declare(t, tmpDir) // Initialize context initCmd := initialize.Cmd() diff --git a/internal/config/README.md b/internal/config/README.md index fbfd84bc3..76e974547 100644 --- a/internal/config/README.md +++ b/internal/config/README.md @@ -1,4 +1,4 @@ -# internal/config — Constants Package Structure +# internal/config: Constants Package Structure ## Why 60+ Sub-Packages? @@ -68,23 +68,23 @@ Each contains a `doc.go` and 1-3 files of `const`/`var` definitions. ### Multi-file thematic -- **`regex/`** — 14 files of compiled `regexp.MustCompile()` objects, +- **`regex/`**: 14 files of compiled `regexp.MustCompile()` objects, organized by domain (fence, task, entry, markdown, etc.) -- **`file/`** — extensions, ignore patterns, names, limits -- **`dep/`, `doctor/`** — multi-file domain constants +- **`file/`**: extensions, ignore patterns, names, limits +- **`dep/`, `doctor/`**: multi-file domain constants ### Hierarchical (nested sub-packages) -- **`embed/`** — user-facing text, organized in 3 tiers: - - `embed/cmd/` — command Short/Long descriptions (22 files) - - `embed/flag/` — flag description keys (~10 files) - - `embed/text/` — output text DescKey constants (~100 files) +- **`embed/`**: user-facing text, organized in 3 tiers: + - `embed/cmd/`: command Short/Long descriptions (22 files) + - `embed/flag/`: flag description keys (~10 files) + - `embed/text/`: output text DescKey constants (~100 files) -- **`mcp/`** — MCP protocol constants, split into 12 sub-packages: +- **`mcp/`**: MCP protocol constants, split into 12 sub-packages: `cfg/`, `event/`, `field/`, `governance/`, `method/`, `mime/`, `notify/`, `prompt/`, `resource/`, `schema/`, `server/`, `tool/` -- **`memory/`** — memory bridge constants +- **`memory/`**: memory bridge constants ## How To Find the Right Package @@ -129,11 +129,11 @@ go list ./internal/config/... ## config/ vs entity/ for Types String-typed enums (`type IssueType string`) and their const -values live in `config/` — the same place all other string +values live in `config/`: the same place all other string constants live. The type annotation adds compile-time safety but does not change where the definition belongs. -**When to promote to `entity/`:** When the type grows behavior — +**When to promote to `entity/`:** When the type grows behavior: method receivers, interface participation, or business logic. A type with `func (t IssueType) Severity() int` has outgrown `config/` and belongs in `entity/`. @@ -141,7 +141,7 @@ type with `func (t IssueType) Severity() int` has outgrown | Stage | Home | Example | |------------------------------|--------------------|---------------------------------------------------| | Pure value enum | `config//` | `type IssueType string` with const values | -| Cross-package value enum | `config//` | Same — `config/` is already importable everywhere | +| Cross-package value enum | `config//` | Same; `config/` is already importable everywhere | | Type with methods | `entity/` | `func (t IssueType) Severity() int` | | Type implementing interfaces | `entity/` | `var _ fmt.Stringer = IssueType("")` | diff --git a/internal/config/archive/archive.go b/internal/config/archive/archive.go index 26586bf9c..fb8452c9b 100644 --- a/internal/config/archive/archive.go +++ b/internal/config/archive/archive.go @@ -20,11 +20,3 @@ const ( // SnapshotTimeFormat is the compact timestamp layout for snapshot filenames. SnapshotTimeFormat = "2006-01-02-1504" ) - -// Backup archive writer identifiers for error reporting. -const ( - // WriterGzip identifies the gzip compression writer. - WriterGzip = "gzip" - // WriterTar identifies the tar archive writer. - WriterTar = "tar" -) diff --git a/internal/config/archive/backup.go b/internal/config/archive/backup.go deleted file mode 100644 index da716f595..000000000 --- a/internal/config/archive/backup.go +++ /dev/null @@ -1,49 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -// Backup configuration. -const ( - // BackupDefaultSubdir is the default subdirectory on the SMB share. - BackupDefaultSubdir = "ctx-sessions" - // BackupMarkerFile is the state file touched on a successful project backup. - BackupMarkerFile = "ctx-last-backup" - // BackupScopeProject backs up only the project context. - BackupScopeProject = "project" - // BackupScopeGlobal backs up only global Claude data. - BackupScopeGlobal = "global" - // BackupScopeAll backs up both project and global. - BackupScopeAll = "all" - // TplProjectArchive is the filename template for project archives. - // Argument: timestamp. - TplProjectArchive = "ctx-backup-%s.tar.gz" - // TplGlobalArchive is the filename template for global archives. - // Argument: timestamp. - TplGlobalArchive = "claude-global-backup-%s.tar.gz" - // BackupTimestampFormat is the compact timestamp layout for backup filenames. - BackupTimestampFormat = "20060102-150405" - // BackupExcludeTodos is the directory name excluded from global backups. - BackupExcludeTodos = "todos" - // BackupMarkerDir is the XDG state directory for the backup marker. - BackupMarkerDir = ".local/state" - // BackupMaxAgeDays is the threshold in days before a backup - // is considered stale. - BackupMaxAgeDays = 2 - // BackupThrottleID is the state file name for daily - // throttle of backup age checks. - BackupThrottleID = "backup-reminded" - // Bashrc is the user's bash configuration file. - Bashrc = ".bashrc" -) - -// GIO mount command constants. -const ( - // GioBinary is the GIO executable name. - GioBinary = "gio" - // GioMount is the gio mount subcommand. - GioMount = "mount" -) diff --git a/internal/config/archive/doc.go b/internal/config/archive/doc.go index 0f355ce0f..4e9ed9733 100644 --- a/internal/config/archive/doc.go +++ b/internal/config/archive/doc.go @@ -4,19 +4,11 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package archive centralizes constants for task archival, -// project backups, and snapshot management. +// Package archive centralizes constants for task archival and +// snapshot management. // -// ctx archives completed tasks into dated markdown files -// and backs up entire project contexts to SMB shares as -// compressed tarballs. This package defines file-naming -// patterns, backup scopes, staleness thresholds, and -// template variables used by both subsystems. -// -// # Task Snapshots -// -// When a user archives tasks, ctx creates a timestamped -// markdown file in .context/archive/: +// When a user archives tasks, ctx creates a timestamped markdown +// file in .context/archive/: // // - [ScopeTasks] identifies the task archive scope. // - [SnapshotFilenameFormat] and [SnapshotTimeFormat] @@ -25,39 +17,12 @@ // - [TplFilename] and [DateSep] control the general // archive filename template and header formatting. // -// # Backup System -// -// Full project backups are tar.gz archives written to an -// SMB share: -// -// - [BackupScopeProject], [BackupScopeGlobal], and -// [BackupScopeAll] control what gets backed up. -// - [TplProjectArchive] and [TplGlobalArchive] name the -// output files with timestamps. -// - [BackupDefaultSubdir] sets the target subdirectory -// on the SMB share (ctx-sessions). -// - [BackupMaxAgeDays] triggers a nudge when the last -// backup is older than 2 days. -// - [BackupMarkerFile] and [BackupMarkerDir] track the -// last successful backup timestamp. -// - [BackupThrottleID] ensures the staleness nudge -// fires at most once per day. -// -// # Task Parsing -// -// [SubTaskMinIndent] defines the minimum indentation (2 -// spaces) for a line to be treated as a subtask rather -// than a top-level task during archive parsing. -// -// # Writer Identifiers -// -// [WriterGzip] and [WriterTar] label the compression and -// archival stages for structured error reporting. -// -// # Why Centralized +// [SubTaskMinIndent] defines the minimum indentation (2 spaces) +// for a line to be treated as a subtask rather than a top-level +// task during archive parsing. // -// Filename templates, scopes, and staleness thresholds -// are shared between the archive command, the backup -// skill, and the staleness hook. Centralizing them -// prevents drift and makes the naming scheme auditable. +// Filename templates, scopes, and date formatting are shared +// between the task archive command and the tidy/compact helpers. +// Centralizing them prevents drift and makes the naming scheme +// auditable. package archive diff --git a/internal/config/archive/var.go b/internal/config/archive/var.go deleted file mode 100644 index 1aae7994b..000000000 --- a/internal/config/archive/var.go +++ /dev/null @@ -1,13 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package archive - -// Template variable keys for backup hooks. -const ( - // VarWarnings is the template variable for backup warning messages. - VarWarnings = "Warnings" -) diff --git a/internal/config/embed/cmd/backup.go b/internal/config/embed/cmd/backup.go deleted file mode 100644 index 667f8b74f..000000000 --- a/internal/config/embed/cmd/backup.go +++ /dev/null @@ -1,19 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package cmd - -// Use string for the top-level backup command. -const ( - // UseBackup is the cobra Use string for the backup command. - UseBackup = "backup" -) - -// DescKey for the top-level backup command. -const ( - // DescKeyBackup is the description key for the backup command. - DescKeyBackup = "backup" -) diff --git a/internal/config/embed/cmd/base.go b/internal/config/embed/cmd/base.go index 707c2f303..6836ffd88 100644 --- a/internal/config/embed/cmd/base.go +++ b/internal/config/embed/cmd/base.go @@ -8,6 +8,10 @@ package cmd // Use strings for cobra command registration. const ( + // UseActivate is the cobra Use string for the activate command. + UseActivate = "activate" + // UseDeactivate is the cobra Use string for the deactivate command. + UseDeactivate = "deactivate" // UseAdd is the cobra Use string for the add command. UseAdd = "add [content]" // UseAgent is the cobra Use string for the agent command. @@ -70,6 +74,10 @@ const ( // DescKeys for base commands. const ( + // DescKeyActivate is the description key for the activate command. + DescKeyActivate = "activate" + // DescKeyDeactivate is the description key for the deactivate command. + DescKeyDeactivate = "deactivate" // DescKeyAdd is the description key for the add command. DescKeyAdd = "add" // DescKeyAgent is the description key for the agent command. diff --git a/internal/config/embed/cmd/system.go b/internal/config/embed/cmd/system.go index f9229b9d6..5be94b276 100644 --- a/internal/config/embed/cmd/system.go +++ b/internal/config/embed/cmd/system.go @@ -10,8 +10,8 @@ package cmd // // The ctx system namespace hosts hook plumbing plus the // agent-only `bootstrap` command. Other user-facing maintenance -// commands (backup, event, message, prune, resource, stats) have -// been promoted to top-level commands; their Use constants live in +// commands (event, message, prune, resource, stats) have been +// promoted to top-level commands; their Use constants live in // their own per-command files in this package. // // `bootstrap` is intentionally NOT promoted to top-level; it is @@ -19,15 +19,12 @@ package cmd // under `ctx system` keeps `ctx --help` focused on user-facing // commands. The canonical invocation is `ctx system bootstrap`. const ( - // UseSystemBlockDangerousCommand is the cobra Use string for the system - // block dangerous command command. - UseSystemBlockDangerousCommand = "block-dangerous-command" // UseSystemBlockNonPathCtx is the cobra Use string for the system block non // path ctx command. UseSystemBlockNonPathCtx = "block-non-path-ctx" - // UseSystemCheckBackupAge is the cobra Use string for the system check backup - // age command. - UseSystemCheckBackupAge = "check-backup-age" + // UseSystemCheckAnchorDrift is the cobra Use string for the system check + // anchor drift command. + UseSystemCheckAnchorDrift = "check-anchor-drift" // UseSystemCheckCeremony is the cobra Use string for the system check // ceremony command. UseSystemCheckCeremony = "check-ceremony" @@ -106,15 +103,12 @@ const ( const ( // DescKeySystem is the description key for the system command. DescKeySystem = "system" - // DescKeySystemBlockDangerousCommand is the description key for the system - // block dangerous command command. - DescKeySystemBlockDangerousCommand = "system.blockdangerouscommand" // DescKeySystemBlockNonPathCtx is the description key for the system block // non path ctx command. DescKeySystemBlockNonPathCtx = "system.blocknonpathctx" - // DescKeySystemCheckBackupAge is the description key for the system check - // backup age command. - DescKeySystemCheckBackupAge = "system.checkbackupage" + // DescKeySystemCheckAnchorDrift is the description key for the system + // check anchor drift command. + DescKeySystemCheckAnchorDrift = "system.checkanchordrift" // DescKeySystemCheckCeremony is the description key for the system check // ceremony command. DescKeySystemCheckCeremony = "system.checkceremony" diff --git a/internal/config/embed/flag/activate.go b/internal/config/embed/flag/activate.go new file mode 100644 index 000000000..ce61c75b0 --- /dev/null +++ b/internal/config/embed/flag/activate.go @@ -0,0 +1,14 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package flag + +// DescKeys for activate / deactivate subcommand flags. +const ( + // DescKeyActivateShell is the description key for the --shell + // flag used by both `ctx activate` and `ctx deactivate`. + DescKeyActivateShell = "activate.shell" +) diff --git a/internal/config/embed/flag/backup.go b/internal/config/embed/flag/backup.go deleted file mode 100644 index fb42391be..000000000 --- a/internal/config/embed/flag/backup.go +++ /dev/null @@ -1,15 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package flag - -// DescKeys for backup command flags. -const ( - // DescKeyBackupJson is the description key for the backup json flag. - DescKeyBackupJson = "backup.json" - // DescKeyBackupScope is the description key for the backup scope flag. - DescKeyBackupScope = "backup.scope" -) diff --git a/internal/config/embed/flag/flag.go b/internal/config/embed/flag/flag.go index 5e9a3aeeb..e1d45e098 100644 --- a/internal/config/embed/flag/flag.go +++ b/internal/config/embed/flag/flag.go @@ -8,15 +8,10 @@ package flag // DescKeys for shared flag descriptions. const ( - // DescKeyAllowOutsideCwd is the description key for the allow outside cwd - // flag. - DescKeyAllowOutsideCwd = "allow-outside-cwd" // DescKeyChangesSince is the description key for the changes since flag. DescKeyChangesSince = "changes.since" // DescKeyCompactArchive is the description key for the compact archive flag. DescKeyCompactArchive = "compact.archive" - // DescKeyContextDir is the description key for the context dir flag. - DescKeyContextDir = "context-dir" // DescKeyDoctorJson is the description key for the doctor json flag. DescKeyDoctorJson = "doctor.json" // DescKeyTriggerTestPath is the description key for the trigger test path diff --git a/internal/config/embed/text/backup.go b/internal/config/embed/text/backup.go deleted file mode 100644 index 7bcfd8ae7..000000000 --- a/internal/config/embed/text/backup.go +++ /dev/null @@ -1,47 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package text - -// DescKeys for backup operations. -const ( - // DescKeyBackupBoxTitle is the text key for backup box title messages. - DescKeyBackupBoxTitle = "backup.box-title" - // DescKeyBackupNoMarker is the text key for backup no marker messages. - DescKeyBackupNoMarker = "backup.no-marker" - // DescKeyBackupRelayMessage is the text key for backup relay message messages. - DescKeyBackupRelayMessage = "backup.relay-message" - // DescKeyBackupRelayPrefix is the text key for backup relay prefix messages. - DescKeyBackupRelayPrefix = "backup.relay-prefix" - // DescKeyBackupRunHint is the text key for backup run hint messages. - DescKeyBackupRunHint = "backup.run-hint" - // DescKeyBackupSMBNotMounted is the text key for backup smb not mounted - // messages. - DescKeyBackupSMBNotMounted = "backup.smb-not-mounted" - // DescKeyBackupSMBUnavailable is the text key for backup smb unavailable - // messages. - DescKeyBackupSMBUnavailable = "backup.smb-unavailable" - // DescKeyBackupStale is the text key for backup stale messages. - DescKeyBackupStale = "backup.stale" -) - -// DescKeys for backup result write output. -const ( - // DescKeyWriteBackupResult is the text key for write backup result messages. - DescKeyWriteBackupResult = "write.backup-result" - // DescKeyWriteBackupSMBDest is the text key for write backup smb dest - // messages. - DescKeyWriteBackupSMBDest = "write.backup-smb-dest" -) - -// DescKeys for snapshot write output. -const ( - // DescKeyWriteSnapshotSaved is the text key for write snapshot saved messages. - DescKeyWriteSnapshotSaved = "write.snapshot-saved" - // DescKeyWriteSnapshotUpdated is the text key for write snapshot updated - // messages. - DescKeyWriteSnapshotUpdated = "write.snapshot-updated" -) diff --git a/internal/config/embed/text/block.go b/internal/config/embed/text/block.go index 7aadf5bd5..bb598aebf 100644 --- a/internal/config/embed/text/block.go +++ b/internal/config/embed/text/block.go @@ -14,15 +14,6 @@ const ( // DescKeyBlockConstitutionSuffix is the text key for block constitution // suffix messages. DescKeyBlockConstitutionSuffix = "block.constitution-suffix" - // DescKeyBlockMidSudo is the text key for block mid sudo messages. - DescKeyBlockMidSudo = "block.mid-sudo" - // DescKeyBlockMidGitPush is the text key for block mid git push messages. - DescKeyBlockMidGitPush = "block.mid-git-push" - // DescKeyBlockCpToBin is the text key for block cp to bin messages. - DescKeyBlockCpToBin = "block.cp-to-bin" - // DescKeyBlockInstallToLocalBin is the text key for block install to local - // bin messages. - DescKeyBlockInstallToLocalBin = "block.install-to-local-bin" // DescKeyBlockDotSlash is the text key for block dot slash messages. DescKeyBlockDotSlash = "block.dot-slash" // DescKeyBlockGoRun is the text key for block go run messages. diff --git a/internal/config/embed/text/check_anchor.go b/internal/config/embed/text/check_anchor.go new file mode 100644 index 000000000..6edfdb3eb --- /dev/null +++ b/internal/config/embed/text/check_anchor.go @@ -0,0 +1,25 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package text + +// DescKeys for the check-anchor-drift hook (single-source-anchor +// model: specs/single-source-context-anchor.md). +const ( + // DescKeyCheckAnchorDriftBoxTitle is the text key for the + // anchor-drift nudge box title. + DescKeyCheckAnchorDriftBoxTitle = "check-anchor-drift.box-title" + // DescKeyCheckAnchorDriftContent is the text key for the + // anchor-drift nudge body. Two %s placeholders: the inherited + // CTX_DIR and the Claude-injected CTX_DIR. + DescKeyCheckAnchorDriftContent = "check-anchor-drift.content" + // DescKeyCheckAnchorDriftRelayMessage is the text key for the + // short relay-channel message. + DescKeyCheckAnchorDriftRelayMessage = "check-anchor-drift.relay-message" + // DescKeyCheckAnchorDriftRelayPrefix is the text key for the + // VERBATIM-relay prefix line. + DescKeyCheckAnchorDriftRelayPrefix = "check-anchor-drift.relay-prefix" +) diff --git a/internal/config/embed/text/doctor.go b/internal/config/embed/text/doctor.go index bfb3a6b4a..6357eba85 100644 --- a/internal/config/embed/text/doctor.go +++ b/internal/config/embed/text/doctor.go @@ -8,6 +8,16 @@ package text // DescKeys for doctor diagnostics. const ( + // DescKeyDoctorCheckDidNotRun is the text key for the failure + // result emitted by the doctor runner when a check returns an + // error it could not handle itself. + DescKeyDoctorCheckDidNotRun = "doctor.check-did-not-run" + // DescKeyDoctorCheckDidNotRunCascade is emitted once for the + // first context-dependent check that fails with + // [errCtx.ErrDirNotDeclared]; later dependent checks are + // silently skipped so the report shows one loud line instead + // of the same message N times. + DescKeyDoctorCheckDidNotRunCascade = "doctor.check-did-not-run-cascade" // DescKeyDoctorContextFileFormat is the text key for doctor context file // format messages. DescKeyDoctorContextFileFormat = "doctor.context-file.format" diff --git a/internal/config/embed/text/err_activate.go b/internal/config/embed/text/err_activate.go new file mode 100644 index 000000000..ea2a1c647 --- /dev/null +++ b/internal/config/embed/text/err_activate.go @@ -0,0 +1,15 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package text + +// DescKeys for ctx activate subcommand errors. +const ( + // DescKeyErrActivateNoCandidates is the text key used when + // `ctx activate` finds zero .context/ candidates on the upward + // path from CWD. + DescKeyErrActivateNoCandidates = "err.activate.no-candidates" +) diff --git a/internal/config/embed/text/err_backup.go b/internal/config/embed/text/err_backup.go index 55d422b00..cae88a157 100644 --- a/internal/config/embed/text/err_backup.go +++ b/internal/config/embed/text/err_backup.go @@ -6,47 +6,22 @@ package text -// DescKeys for backup operations errors. +// DescKeys for the surviving err/backup constructors (see +// internal/err/backup). The former ctx-backup-specific keys were +// removed when the command was deprecated; these four remain +// because init's `.bak` writer, task archival, and bootstrap all +// still use the package. const ( - // DescKeyErrBackupBackupGlobal is the text key for err backup backup global - // messages. - DescKeyErrBackupBackupGlobal = "err.backup.backup-global" - // DescKeyErrBackupBackupProject is the text key for err backup backup project - // messages. - DescKeyErrBackupBackupProject = "err.backup.backup-project" - // DescKeyErrBackupBackupSMBConfig is the text key for err backup backup smb - // config messages. - DescKeyErrBackupBackupSMBConfig = "err.backup.backup-smb-config" - // DescKeyErrBackupContextDirNotFound is the text key for err backup context - // dir not found messages. + // DescKeyErrBackupContextDirNotFound is the text key for the + // "context directory not found" bootstrap error. DescKeyErrBackupContextDirNotFound = "err.backup.context-dir-not-found" - // DescKeyErrBackupCreateArchive is the text key for err backup create archive - // messages. - DescKeyErrBackupCreateArchive = "err.backup.create-archive" - // DescKeyErrBackupCreateArchiveDir is the text key for err backup create - // archive dir messages. + // DescKeyErrBackupCreateArchiveDir is the text key for task + // archive directory creation failures. DescKeyErrBackupCreateArchiveDir = "err.backup.create-archive-dir" - // DescKeyErrBackupCreateBackup is the text key for err backup create backup - // messages. + // DescKeyErrBackupCreateBackup is the text key for the `.bak` + // file creation failure (ctx init --force). DescKeyErrBackupCreateBackup = "err.backup.create-backup" - // DescKeyErrBackupInvalidBackupScope is the text key for err backup invalid - // backup scope messages. - DescKeyErrBackupInvalidBackupScope = "err.backup.invalid-backup-scope" - // DescKeyErrBackupInvalidSMBURL is the text key for err backup invalid smburl - // messages. - DescKeyErrBackupInvalidSMBURL = "err.backup.invalid-smb-url" - // DescKeyErrBackupMountFailed is the text key for err backup mount failed - // messages. - DescKeyErrBackupMountFailed = "err.backup.mount-failed" - // DescKeyErrBackupSMBMissingShare is the text key for err backup smb missing - // share messages. - DescKeyErrBackupSMBMissingShare = "err.backup.smb-missing-share" - // DescKeyErrBackupSourceNotFound is the text key for err backup source not - // found messages. - DescKeyErrBackupSourceNotFound = "err.backup.source-not-found" - // DescKeyErrBackupWriteArchive is the text key for err backup write archive - // messages. + // DescKeyErrBackupWriteArchive is the text key for task archive + // write failures. DescKeyErrBackupWriteArchive = "err.backup.write-archive" - // DescKeyErrBackupWriteSMB is the text key for err backup write smb messages. - DescKeyErrBackupWriteSMB = "err.backup.write-smb" ) diff --git a/internal/config/embed/text/err_fs.go b/internal/config/embed/text/err_fs.go index 9c6e2a94a..4f0504da3 100644 --- a/internal/config/embed/text/err_fs.go +++ b/internal/config/embed/text/err_fs.go @@ -8,9 +8,6 @@ package text // DescKeys for filesystem operations errors. const ( - // DescKeyErrFsBoundaryViolation is the text key for err fs boundary violation - // messages. - DescKeyErrFsBoundaryViolation = "err.fs.boundary-violation" // DescKeyErrFsCreateDir is the text key for err fs create dir messages. DescKeyErrFsCreateDir = "err.fs.create-dir" // DescKeyErrFsDirNotFound is the text key for err fs dir not found messages. @@ -74,6 +71,27 @@ const ( // DescKeyErrContextDirNotFound is the text key for err context dir not found // messages. DescKeyErrContextDirNotFound = "err.context.dir-not-found" + // DescKeyErrContextNotDeclaredZero is the text key used when CTX_DIR + // is not set and no .context/ candidate is visible from CWD. + DescKeyErrContextNotDeclaredZero = "err.context.not-declared-zero" + // DescKeyErrContextNotDeclaredOne is the text key used when CTX_DIR + // is not set and exactly one .context/ candidate is visible from CWD. + DescKeyErrContextNotDeclaredOne = "err.context.not-declared-one" + // DescKeyErrContextNotDeclaredMany is the text key used when CTX_DIR + // is not set and two or more .context/ candidates are visible from CWD. + DescKeyErrContextNotDeclaredMany = "err.context.not-declared-many" + // DescKeyErrContextRelativeNotAllowed is the text key for the + // "CTX_DIR must be absolute" rejection. + DescKeyErrContextRelativeNotAllowed = "err.context.relative-not-allowed" + // DescKeyErrContextNonCanonicalBasename is the text key for the + // "CTX_DIR basename must be .context" rejection. + DescKeyErrContextNonCanonicalBasename = "err.context.non-canonical-basename" + // DescKeyErrContextDirNotADirectory is the text key for the + // "CTX_DIR points at a file, not a directory" rejection. + DescKeyErrContextDirNotADirectory = "err.context.dir-not-a-directory" + // DescKeyErrContextDirStat is the text key for stat failures + // other than not-exist (permission denied, I/O error). + DescKeyErrContextDirStat = "err.context.dir-stat" ) // DescKeys for filesystem write output. diff --git a/internal/config/embed/text/err_validate.go b/internal/config/embed/text/err_validate.go index c8dabfdd6..06606a798 100644 --- a/internal/config/embed/text/err_validate.go +++ b/internal/config/embed/text/err_validate.go @@ -14,9 +14,6 @@ const ( // DescKeyErrValidateContextFileSymlink is the text key for err validate // context file symlink messages. DescKeyErrValidateContextFileSymlink = "err.validate.context-file-symlink" - // DescKeyErrValidateContextOutsideRoot is the text key for err validate - // context outside root messages. - DescKeyErrValidateContextOutsideRoot = "err.validate.context-outside-root" // DescKeyErrValidateInvalidSelection is the text key for err validate invalid // selection messages. DescKeyErrValidateInvalidSelection = "err.validate.invalid-selection" diff --git a/internal/config/embed/text/format.go b/internal/config/embed/text/format.go index edd5e1605..f2716952e 100644 --- a/internal/config/embed/text/format.go +++ b/internal/config/embed/text/format.go @@ -16,9 +16,6 @@ const ( DescKeyWriteFormatBytesUnit = "write.format-bytes-unit" // DescKeyWriteFormatBytes is the text key for write format bytes messages. DescKeyWriteFormatBytes = "write.format-bytes" - // DescKeyWriteFormatGVFSPath is the text key for write format gvfs path - // messages. - DescKeyWriteFormatGVFSPath = "write.format-gvfs-path" // DescKeyWriteFormatDurationDay is the text key for write format duration day // messages. DescKeyWriteFormatDurationDay = "write.format-duration-day" @@ -61,9 +58,6 @@ const ( // DescKeys for miscellaneous format write output. const ( - // DescKeyWriteBackupSkipEntry is the text key for write backup skip entry - // messages. - DescKeyWriteBackupSkipEntry = "write.backup-skip-entry" // DescKeyWriteWikilinkListItem is the text key for write wikilink list item // messages. DescKeyWriteWikilinkListItem = "write.wikilink-list-item" diff --git a/internal/config/embed/text/initialize.go b/internal/config/embed/text/initialize.go index be0d4d7ed..786c7789c 100644 --- a/internal/config/embed/text/initialize.go +++ b/internal/config/embed/text/initialize.go @@ -69,6 +69,18 @@ const ( // DescKeyWriteInitWorkflowTips is the text key for write init workflow tips // messages. DescKeyWriteInitWorkflowTips = "write.init-workflow-tips" + // DescKeyWriteInitActivateHint is the text key for the + // activation hint printed after `ctx init` finishes, telling + // the user how to bind CTX_DIR for their shell. Template + // expects a single %s argument for the absolute context path + // (used in the `export CTX_DIR=%s` line). + DescKeyWriteInitActivateHint = "write.init-activate-hint" + // DescKeyWriteInitAnatomyPreamble is the text key for the + // "what is .context/" primer prepended to GETTING_STARTED.md + // at the project root. Names the project-root contract, + // the basename guard, and the one-.context-per-project rule. + // No format arguments. + DescKeyWriteInitAnatomyPreamble = "write.init-anatomy-preamble" // DescKeyWriteInitNoChanges is the text key for write init no changes // messages. DescKeyWriteInitNoChanges = "write.init-no-changes" diff --git a/internal/config/embed/text/restore.go b/internal/config/embed/text/restore.go index 7f7c397fa..1a175e8fc 100644 --- a/internal/config/embed/text/restore.go +++ b/internal/config/embed/text/restore.go @@ -35,4 +35,10 @@ const ( // DescKeyWriteRestoreRestoredHeader is the text key for write restore // restored header messages. DescKeyWriteRestoreRestoredHeader = "write.restore-restored-header" + // DescKeyWriteSnapshotSaved is the text key for the first-time + // golden snapshot save confirmation. + DescKeyWriteSnapshotSaved = "write.snapshot-saved" + // DescKeyWriteSnapshotUpdated is the text key for the subsequent + // golden snapshot update confirmation. + DescKeyWriteSnapshotUpdated = "write.snapshot-updated" ) diff --git a/internal/config/env/doc.go b/internal/config/env/doc.go index 3cd6fb4d3..a4cc1f89b 100644 --- a/internal/config/env/doc.go +++ b/internal/config/env/doc.go @@ -23,10 +23,6 @@ // - CtxTokenBudget: overrides the default token // budget for context window sizing // ($CTX_TOKEN_BUDGET) -// - BackupSMBURL: SMB share URL for backup -// operations ($CTX_BACKUP_SMB_URL) -// - BackupSMBSubdir: subdirectory within the SMB -// share for backups ($CTX_BACKUP_SMB_SUBDIR) // - SessionID: active AI session identifier used // by ctx trace ($CTX_SESSION_ID) // - SkipPathCheck: skips PATH validation during @@ -49,8 +45,8 @@ // # Why Centralized // // Environment variable names are referenced by the init -// command, bootstrap logic, backup routines, and test -// helpers. Centralizing them here prevents naming drift -// and makes it trivial to add new variables with -// consistent documentation. +// command, bootstrap logic, and test helpers. +// Centralizing them here prevents naming drift and makes +// it trivial to add new variables with consistent +// documentation. package env diff --git a/internal/config/env/env.go b/internal/config/env/env.go index 08d61bb50..ce3c43ac6 100644 --- a/internal/config/env/env.go +++ b/internal/config/env/env.go @@ -12,16 +12,25 @@ package env const ( // Home is the environment variable for the user's home directory. Home = "HOME" - // CtxDir is the environment variable for overriding the context directory. + // Shell is the environment variable that names the user's login + // shell (e.g. "/bin/bash"). Read by `ctx activate` / + // `ctx deactivate` to auto-detect the emitter dialect. + Shell = "SHELL" + // CtxDir is the environment variable that declares the context + // directory. Single-source-anchor model: + // specs/single-source-context-anchor.md. CtxDir = "CTX_DIR" + // CtxDirInherited is the diagnostic-only sibling of CtxDir set by + // the check-anchor-drift hook line so the hook can compare the + // parent shell's pre-injection CTX_DIR against the + // CLAUDE_PROJECT_DIR-anchored CTX_DIR. Not read by the resolver + // or any operating command; consumed only by + // `ctx system check-anchor-drift`. + CtxDirInherited = "CTX_DIR_INHERITED" // CtxTokenBudget is the environment variable for overriding // the token budget. //nolint:gosec // G101: env var name, not a credential CtxTokenBudget = "CTX_TOKEN_BUDGET" - // BackupSMBURL is the environment variable for the SMB share URL. - BackupSMBURL = "CTX_BACKUP_SMB_URL" - // BackupSMBSubdir is the environment variable for the SMB share subdirectory. - BackupSMBSubdir = "CTX_BACKUP_SMB_SUBDIR" // SessionID is the environment variable for the active AI session ID. // Used by ctx trace for context linking. SessionID = "CTX_SESSION_ID" diff --git a/internal/config/flag/doc.go b/internal/config/flag/doc.go index ef2cdece1..748159f1e 100644 --- a/internal/config/flag/doc.go +++ b/internal/config/flag/doc.go @@ -17,10 +17,8 @@ // // # Global Flags // -// - ContextDir ("context-dir"): override the -// .context/ directory path -// - AllowOutsideCwd ("allow-outside-cwd"): permit -// operations outside the working directory +// - Tool ("tool"): override the active AI tool +// identifier (e.g. claude, cursor, kiro). // // PrefixLong ("--") is the long-flag prefix used in // error messages and help text formatting. diff --git a/internal/config/flag/flag.go b/internal/config/flag/flag.go index 0d67a6772..bfe90fcb2 100644 --- a/internal/config/flag/flag.go +++ b/internal/config/flag/flag.go @@ -6,15 +6,17 @@ package flag -// Global CLI flag names. -const ( - ContextDir = "context-dir" - AllowOutsideCwd = "allow-outside-cwd" -) - // PrefixLong is a CLI flag prefix for display formatting. const PrefixLong = "--" +// Activate / deactivate command flag names. +const ( + // Shell selects the shell dialect for `ctx activate` and + // `ctx deactivate` emitters. When unset, the command + // auto-detects from $SHELL, falling back to bash. + Shell = "shell" +) + // Add command flag names: used for both flag registration and error display. const ( Application = "application" diff --git a/internal/config/git/git.go b/internal/config/git/git.go index c6270c141..197388574 100644 --- a/internal/config/git/git.go +++ b/internal/config/git/git.go @@ -9,9 +9,6 @@ package git // Binary is the git executable name. const Binary = "git" -// DotDir is the name of the git metadata directory (or file in worktrees). -const DotDir = ".git" - // Subcommand names passed as the first argument to git. const ( Branch = "branch" diff --git a/internal/config/hook/hook.go b/internal/config/hook/hook.go index 9ce6ca2e1..9099a69cc 100644 --- a/internal/config/hook/hook.go +++ b/internal/config/hook/hook.go @@ -9,12 +9,11 @@ package hook // Hook name constants: used for Load, NewTemplateRef, notify.Send, // and log.Append to avoid magic strings. const ( - // BlockDangerousCommand is the hook name for blocking dangerous commands. - BlockDangerousCommand = "block-dangerous-command" // BlockNonPathCtx is the hook name for blocking non-PATH ctx invocations. BlockNonPathCtx = "block-non-path-ctx" - // CheckBackupAge is the hook name for backup staleness checks. - CheckBackupAge = "check-backup-age" + // CheckAnchorDrift is the hook name for the stale-anchor sanity hook + // added by specs/single-source-context-anchor.md. + CheckAnchorDrift = "check-anchor-drift" // CheckCeremony is the hook name for ceremony usage checks. CheckCeremony = "check-ceremony" // CheckContextSize is the hook name for context window size checks. diff --git a/internal/config/hook/variant.go b/internal/config/hook/variant.go index 935df5435..84f45137c 100644 --- a/internal/config/hook/variant.go +++ b/internal/config/hook/variant.go @@ -9,14 +9,6 @@ package hook // Hook variant constants: template selectors passed to Load and // NewTemplateRef to choose the appropriate message for each trigger type. const ( - // VariantMidSudo selects the mid-command sudo block message. - VariantMidSudo = "mid-sudo" - // VariantMidGitPush selects the mid-command git push block message. - VariantMidGitPush = "mid-git-push" - // VariantCpToBin selects the cp/mv to bin block message. - VariantCpToBin = "cp-to-bin" - // VariantInstallToLocalBin selects the install to ~/.local/bin block message. - VariantInstallToLocalBin = "install-to-local-bin" // VariantDotSlash selects the relative path (./ctx) block message. VariantDotSlash = "dot-slash" // VariantGoRun selects the go run block message. diff --git a/internal/config/rc/messages.go b/internal/config/rc/messages.go new file mode 100644 index 000000000..39d012b6e --- /dev/null +++ b/internal/config/rc/messages.go @@ -0,0 +1,46 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package rc + +// Error message constants for rc sentinel errors. These are used +// only for errors.Is matching; user-facing wrapping goes through +// err/context constructors that format tailored messages. +const ( + // ErrMsgDirNotDeclared is the sentinel message for the + // "context directory has not been declared" error. + ErrMsgDirNotDeclared = "context directory not declared" + // ErrMsgRelativeNotAllowed is the sentinel message for the + // "CTX_DIR must be absolute" rejection. + ErrMsgRelativeNotAllowed = "context directory must be absolute" + // ErrMsgNonCanonicalBasename is the sentinel message for the + // "CTX_DIR basename must be .context" rejection. + ErrMsgNonCanonicalBasename = "context directory has non-canonical basename" + // ErrMsgContextDirNotFound is the sentinel message for the + // "declared CTX_DIR does not exist" rejection. + ErrMsgContextDirNotFound = "context directory not found: " + // ErrMsgContextDirNotADirectory is the sentinel message for the + // "CTX_DIR is a file, not a directory" rejection. + ErrMsgContextDirNotADirectory = "context directory is not a directory" + // ErrMsgContextDirStat is the sentinel message for stat failures + // other than not-exist (permission denied, I/O error). + ErrMsgContextDirStat = "context directory stat failed" +) + +// Format strings for sentinel-wrapping in err/context constructors. +// Centralized here so the magic-string audit (which exempts +// internal/config) does not flag them at the call site. +const ( + // FmtWrapColon wraps a sentinel and a tailored message: + // fmt.Errorf(FmtWrapColon, ErrFoo, "tailored detail") + // ↦ ": tailored detail". + FmtWrapColon = "%w: %s" + // FmtWrapBare appends the tailored detail directly to the + // sentinel without a separator. Used when the sentinel message + // already ends with whatever separator the caller wants + // (e.g., a trailing space-colon for "context directory not found: "). + FmtWrapBare = "%w%s" +) diff --git a/internal/config/regex/cmd.go b/internal/config/regex/cmd.go index c541a4e97..94fffe620 100644 --- a/internal/config/regex/cmd.go +++ b/internal/config/regex/cmd.go @@ -8,61 +8,6 @@ package regex import "regexp" -// MidSudo matches mid-command sudo after && || ; -var MidSudo = regexp.MustCompile(`(;|&&|\|\|)\s*sudo\s`) - -// GitPush matches `git push` invocations across common shell shapes. -// -// Covered entry points (prefix anchor `[^|(`+"`"+`\n]`): -// - Bare `git push` at start of the command -// - After statement separators: `;`, `&&`, `||`, `|`, `&` -// - Subshells and command substitution: `(git push)`, `$(git push)`, -// backtick-wrapped `git push` -// - New lines in multi-line command input -// -// Covered prefixes (the `(\S+\s+)*` group before `git`): -// - Environment variable assignments: `GIT_DIR=/foo git push` -// - Command wrappers: `time git push`, `nice git push`, `nohup git push` -// -// Covered flag shapes between `git` and `push` (the `(\s+\S+)*` group): -// - Short flags with values: `-C /path`, `-c key=value` -// - Short boolean flags: `-p`, `-P`, `-h`, `-v` -// - Long flags (boolean or `=value`): `--git-dir=PATH`, `--no-pager`, -// `--bare`, `--work-tree=PATH`, etc. -// -// Trailing anchor `([^a-zA-Z0-9._/-]|$)`: matches any shell terminator -// (whitespace, `)`, backtick, `;`, `|`, `&`, `>`, `<`, quote, newline, -// end-of-string) but rejects ref-name continuations like `push-to-remote` -// or `push_branch` so `git push-to-remote` (an imagined alias) does not -// false-positive as a push subcommand. -// -// Known blind spots: -// - False-positives on literal `push` as an argument in other -// subcommands, e.g. `git log push` when `push` is a branch name. -// Accepted as a safer-than-sorry trade-off for a push guard: -// over-blocking is recoverable, under-blocking is not. -// - Does not match through `eval` or `sh -c` quoting, e.g. -// `eval "git push"` or `sh -c "git push"`. Parsing through arbitrary -// shell quoting is undecidable in the general case. -// - Shell aliases (`alias p=push; git p`) are invisible to static -// regex matching. -// -// Uses Go's RE2 engine, so `(\S+\s+)*` is linear-time despite its -// nested-quantifier appearance. Do not port this regex to a PCRE -// engine without reviewing backtracking behavior. -var GitPush = regexp.MustCompile( - `(^|[;&|(` + "`" + `\n]\s*)(\S+\s+)*git(\s+\S+)*\s+push([^a-zA-Z0-9._/-]|$)`, -) - -// CpMvToBin matches cp/mv to bin directories. -var CpMvToBin = regexp.MustCompile( - `(cp|mv)\s+\S+\s+` + - `(/usr/local/bin|/usr/bin|~/go/bin|~/.local/bin` + - `|/home/\S+/go/bin|/home/\S+/.local/bin)`) - -// InstallToLocalBin matches cp/install to ~/.local/bin. -var InstallToLocalBin = regexp.MustCompile(`(cp|install)\s.*~/\.local/bin`) - // GitCommit matches git commit commands. var GitCommit = regexp.MustCompile(`git\s+commit`) diff --git a/internal/config/regex/cmd_test.go b/internal/config/regex/cmd_test.go deleted file mode 100644 index 81e37086b..000000000 --- a/internal/config/regex/cmd_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package regex - -import "testing" - -func TestGitPush(t *testing.T) { - cases := []struct { - name string - input string - want bool - }{ - // Positive: bare - {"bare", "git push", true}, - {"bare_with_args", "git push origin main", true}, - {"bare_with_force", "git push --force origin main", true}, - - // Positive: after statement separators - {"after_semicolon", "cd foo; git push", true}, - {"after_and", "make && git push", true}, - {"after_and_no_space", "make &&git push", true}, - {"after_or", "foo || git push", true}, - {"after_pipe", "echo x | git push", true}, - {"after_bg", "sleep 1 & git push", true}, - - // Positive: subshells and command substitution - {"subshell", "(git push)", true}, - {"command_sub_dollar", "$(git push)", true}, - {"command_sub_backtick", "`git push`", true}, - - // Positive: newline-separated multi-line - {"newline", "git status\ngit push origin main", true}, - - // Positive: env var prefix - {"env_var", "GIT_DIR=/tmp/foo git push", true}, - {"multi_env", "GIT_DIR=/x GIT_SSH_COMMAND=ssh git push", true}, - - // Positive: command wrappers - {"time_wrapper", "time git push", true}, - {"nice_wrapper", "nice git push", true}, - {"nohup_wrapper", "nohup git push", true}, - - // Positive: git top-level flags - {"dash_c_path", "git -C /path push", true}, - {"dash_c_config", "git -c push.default=simple push", true}, - {"long_git_dir", "git --git-dir=/path push", true}, - {"long_work_tree", "git --work-tree=/path push", true}, - {"long_no_pager", "git --no-pager push", true}, - {"long_bare", "git --bare push", true}, - {"short_paginate", "git -p push", true}, - {"short_no_pager", "git -P push", true}, - {"mixed_flags", "git -C /path --no-pager push origin", true}, - {"flags_other_order", "nice git --no-pager -C /path push", true}, - - // Negative: not a push - {"empty", "", false}, - {"no_git", "echo hello", false}, - {"other_subcommand", "git status", false}, - {"git_pull", "git pull origin main", false}, - {"git_log", "git log --oneline", false}, - {"git_log_with_grep_push", "git log --grep=push", false}, - - // Negative: ref-name starting with push (tail anchor rejects) - {"push_hyphen", "git push-to-remote", false}, - {"push_underscore", "git push_branch", false}, - {"push_slash", "git push/foo", false}, - {"push_dot", "git push.default", false}, - - // Negative: not the `git` program - {"mygit", "mygit push", false}, - {"gitpush_joined", "gitpush", false}, - {"git_push_joined", "gitpush origin", false}, - - // Accepted false positives: `push` as a literal arg after another - // subcommand. Documented trade-off — over-blocking is preferred - // to under-blocking for a push guard. - {"false_positive_log_push", "git log push", true}, - {"false_positive_commit_msg_push", "git commit -m push", true}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := GitPush.MatchString(c.input) - if got != c.want { - t.Errorf("GitPush.MatchString(%q) = %v, want %v", c.input, got, c.want) - } - }) - } -} diff --git a/internal/config/shell/doc.go b/internal/config/shell/doc.go new file mode 100644 index 000000000..9f3d732be --- /dev/null +++ b/internal/config/shell/doc.go @@ -0,0 +1,17 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package shell holds shared string constants used by `ctx activate` +// and `ctx deactivate` when emitting shell-specific statements via +// `eval "$(ctx activate)"`. +// +// Keeping the literal identifiers (`bash`, `zsh`, `sh`), POSIX +// export/unset format strings, and single-quote escape sequences in +// internal/config/ satisfies the magic-string audit (non-config +// literals are convention violations) and consolidates the list of +// supported dialects in one place so adding fish / nushell / +// powershell becomes a single-file change. +package shell diff --git a/internal/config/shell/shell.go b/internal/config/shell/shell.go new file mode 100644 index 000000000..4cc951263 --- /dev/null +++ b/internal/config/shell/shell.go @@ -0,0 +1,67 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package shell holds shared string constants used by `ctx activate` +// and `ctx deactivate` when emitting shell-specific statements via +// `eval "$(ctx activate)"`. +// +// Keeping these in internal/config/ satisfies the magic-string audit +// (any non-config magic literal is a convention violation) and +// consolidates the list of supported dialects in one place so adding +// fish / nushell / powershell is a single-file change. +package shell + +// Supported shell dialect identifiers (lowercase, matches +// filepath.Base($SHELL) for Unix shells). +const ( + // Bash is the POSIX-family shell identifier for GNU Bash. + Bash = "bash" + // Zsh is the POSIX-family shell identifier for Z shell. + Zsh = "zsh" + // Sh is the POSIX-family shell identifier for /bin/sh. + Sh = "sh" +) + +// Emit formats for POSIX-compatible shells (bash/zsh/sh share one +// export/unset syntax; other shells are future work). +const ( + // FormatPOSIXExport is the format string for emitting + // `export KEY=VALUE\n`; expects (key, quotedValue). + FormatPOSIXExport = "export %s=%s\n" + // FormatPOSIXUnset is the format string for emitting + // `unset KEY\n`; expects (key). + FormatPOSIXUnset = "unset %s\n" + // FormatStaleReplaceComment is the format used by `ctx activate` + // to surface a stale CTX_DIR being replaced. Expects + // (envName, oldValue, exportLine). The leading comment hash is + // inert in `eval` output, so it is informational only. + FormatStaleReplaceComment = "# ctx: replacing stale %s=%s\n%s" + // FormatAlsoVisibleAdvisory is the format used by `ctx activate` + // to surface additional .context/ candidates further up the + // path when more than one is visible. The innermost wins + // (selected); each additional candidate gets one of these + // lines written to **stderr** so it actually reaches the user + // during the standard `eval "$(ctx activate)"` invocation + // (`eval` captures stdout but not stderr). Expects + // (additionalPath). + FormatAlsoVisibleAdvisory = "ctx: also visible upward: %s\n" + // FormatActivatedAtAdvisory is the format used by `ctx activate` + // to surface the bound .context/ path on stderr. Always + // printed (single-candidate too) so the user always sees what + // just got bound, not just an empty terminal. Pairs with + // [FormatAlsoVisibleAdvisory] when multiple candidates exist. + // Expects (selectedPath). + FormatActivatedAtAdvisory = "ctx: activated at: %s\n" +) + +// Single-quote characters used by the POSIX-shell quoting helper. +const ( + // SingleQuote wraps values in bash/zsh single-quoted strings. + SingleQuote = "'" + // SingleQuoteEscaped is the canonical POSIX-shell escape for an + // embedded single quote: `'\''` (close, escape, reopen). + SingleQuoteEscaped = `'\''` +) diff --git a/internal/config/warn/warn.go b/internal/config/warn/warn.go index 930b97a57..32ee069f3 100644 --- a/internal/config/warn/warn.go +++ b/internal/config/warn/warn.go @@ -46,6 +46,64 @@ const ( // JSONEncode is the JSON-safe error for encoding failures. JSONEncode = `{"error": "json encode: %v"}` + + // ContextDirResolve is the stderr format for unexpected + // rc.ContextDir failures in hook paths that must not propagate. + // The declared-vs-undeclared split is matched with errors.Is at + // each call site; this constant is used only when that match + // fails, which should never happen with the current single-error + // return but catches future regressions loudly. + ContextDirResolve = "resolve context dir: %v" + + // RCNoContextDir is the stderr message emitted by rc.load when + // it observes ErrDirNotDeclared. Exempt commands (init, + // activate, doctor, hub *, etc.) legitimately reach this state; + // they call accessors and want defaults. Operating commands + // should never reach it because [bootstrap/cmd.go]'s + // PersistentPreRunE gate calls RequireContextDir first. The + // warning is the breadcrumb that catches a missed-gate + // regression: an operating command added without the gate + // would silently get default config (token_budget = 8000, + // auto_archive = true, etc.) regardless of what the user's + // .ctxrc says, with no diagnostic. This message makes the + // silence visible so the call site can be evaluated. + RCNoContextDir = "rc.RC: no CTX_DIR declared; " + + "defaults applied " + + "(investigate calling command if unexpected)" + + // ReadMapTracking is the stderr format for map-tracking.json + // read / parse failures in the check-map-staleness hook. The + // hook can't fail the user's tool call, so it logs and returns + // nil; the log line keeps the failure visible instead of having + // the staleness check silently stop firing. + ReadMapTracking = "read map tracking: %v" + + // CheckKnowledge is the stderr format for check-knowledge hook + // failures downstream of rc.ContextDir resolution. Same shape + // as ReadMapTracking: hook surfaces the error rather than + // silently going dark. + CheckKnowledge = "check knowledge: %v" + + // HubConnectedProbe is the stderr format for failures inside + // [hubsync.Connected] beyond "no context dir declared" and + // "connect file missing." Surfacing the error keeps operators + // from wondering why the hub silently stopped syncing after a + // broken .ctxrc or permissions regression. + HubConnectedProbe = "probe hub connection: %v" + + // StateInitializedProbe is the stderr format for failures + // inside [state.Initialized] beyond "no context dir declared." + // Hooks bail on false either way, but a visible warning shows + // operators why the hook stopped firing instead of letting the + // failure vanish into the gap between "initialized" and "not." + StateInitializedProbe = "probe state initialized: %v" + + // StateDirProbe is the stderr format for failures inside + // [state.Dir] beyond "no context dir declared." Callers use + // the returned path as a filepath.Join base; a warning here + // explains why the state directory resolution went sideways + // before the caller surfaces an empty-path error. + StateDirProbe = "probe state dir: %v" ) // Warn context identifiers for index generation. diff --git a/internal/context/load/loader.go b/internal/context/load/loader.go index 05230c65e..df68a8a6f 100644 --- a/internal/context/load/loader.go +++ b/internal/context/load/loader.go @@ -34,7 +34,11 @@ import ( // - error: NotFoundError if directory doesn't exist, or other IO errors func Do(dir string) (*entity.Context, error) { if dir == "" { - dir = rc.ContextDir() + declared, ctxErr := rc.ContextDir() + if ctxErr != nil { + return nil, ctxErr + } + dir = declared } // Check if the directory exists @@ -50,8 +54,8 @@ func Do(dir string) (*entity.Context, error) { } // Reject context directories that contain symlinks (M-2 defense). - if err := validate.Symlinks(dir); err != nil { - return nil, err + if symlinkErr := validate.Symlinks(dir); symlinkErr != nil { + return nil, symlinkErr } ctx := &entity.Context{ diff --git a/internal/context/load/loader_test.go b/internal/context/load/loader_test.go index 5d2c03141..0754403c7 100644 --- a/internal/context/load/loader_test.go +++ b/internal/context/load/loader_test.go @@ -67,7 +67,10 @@ func TestExists(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := validate.Exists(tt.dir) + result, existsErr := validate.Exists(tt.dir) + if existsErr != nil { + t.Fatalf("Exists(%q) error = %v", tt.dir, existsErr) + } if result != tt.expected { t.Errorf("Exists(%q) = %v, want %v", tt.dir, result, tt.expected) } diff --git a/internal/context/resolve/resolve.go b/internal/context/resolve/resolve.go index 3b612e805..266e7e8d4 100644 --- a/internal/context/resolve/resolve.go +++ b/internal/context/resolve/resolve.go @@ -7,12 +7,16 @@ package resolve import ( + "errors" "fmt" "path/filepath" "github.com/ActiveMemory/ctx/internal/assets/read/desc" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/text" + "github.com/ActiveMemory/ctx/internal/config/warn" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" "github.com/ActiveMemory/ctx/internal/rc" ) @@ -21,36 +25,64 @@ import ( // // Returns: // - string: Absolute path to the journal directory -func JournalDir() string { - return filepath.Join(rc.ContextDir(), dir.Journal) +// - error: non-nil when the context directory is not declared +func JournalDir() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, dir.Journal), nil } // DirLine returns a one-line context directory identifier. -// Returns an empty string if the directory cannot be resolved. +// +// Emits a warn log on any non-ErrDirNotDeclared resolver error. This +// loudness is intentional: the primary caller is an AI agent whose +// incorrect invocations must be visible to the human reading the +// TUI. Do not silence this; do not move the log to a caller that +// might filter it. The error is also returned so non-rendering +// callers can propagate rather than rely solely on the log channel. // // Returns: -// - string: "Context: " line, or empty string if unresolved -func DirLine() string { - d := rc.ContextDir() - if d == "" { - return "" +// - string: "Context: " line on success; "" on any error +// - error: propagated from [rc.ContextDir] unchanged +func DirLine() (string, error) { + d, err := rc.ContextDir() + if err != nil { + if !errors.Is(err, errCtx.ErrDirNotDeclared) { + logWarn.Warn(warn.ContextDirResolve, err) + } + return "", err } - return fmt.Sprintf(desc.Text(text.DescKeyWriteContextDirLabel), d) + return fmt.Sprintf(desc.Text(text.DescKeyWriteContextDirLabel), d), nil } -// AppendDir appends a bracketed context directory footer to msg -// if a context directory is available. Returns msg unchanged otherwise. +// AppendDir appends a bracketed context directory footer to msg. +// +// Emits a warn log on any non-ErrDirNotDeclared resolver error. This +// loudness is intentional: the primary caller is an AI agent whose +// incorrect invocations must be visible to the human reading the +// TUI. Do not silence this; do not move the log to a caller that +// might filter it. The error is also returned so callers can +// propagate instead of rendering an un-annotated message when the +// context directory is unexpectedly unavailable. // // Parameters: // - msg: Base message to append the directory footer to // // Returns: -// - string: Message with appended "[Context: ]", or msg unchanged -func AppendDir(msg string) string { - if line := DirLine(); line != "" { - return msg + fmt.Sprintf( - desc.Text(text.DescKeyWriteContextDirBracket), rc.ContextDir(), - ) +// - string: Message with appended "[Context: ]" on success; +// msg unchanged on any error +// - error: propagated from [rc.ContextDir] unchanged +func AppendDir(msg string) (string, error) { + d, err := rc.ContextDir() + if err != nil { + if !errors.Is(err, errCtx.ErrDirNotDeclared) { + logWarn.Warn(warn.ContextDirResolve, err) + } + return msg, err } - return msg + return msg + fmt.Sprintf( + desc.Text(text.DescKeyWriteContextDirBracket), d, + ), nil } diff --git a/internal/context/validate/doc.go b/internal/context/validate/doc.go index 678e7c2fa..14f6dc9f8 100644 --- a/internal/context/validate/doc.go +++ b/internal/context/validate/doc.go @@ -26,10 +26,18 @@ // string is passed, it falls back to the configured // context directory from the rc package. // -// if validate.Exists("") { +// exists, err := validate.Exists("") +// if err != nil { +// return err +// } +// if exists { // // default context dir exists // } -// if validate.Exists("/custom/path") { +// exists, err = validate.Exists("/custom/path") +// if err != nil { +// return err +// } +// if exists { // // custom path exists // } // diff --git a/internal/context/validate/validate.go b/internal/context/validate/validate.go index f9c30ae4e..3ee21d1fa 100644 --- a/internal/context/validate/validate.go +++ b/internal/context/validate/validate.go @@ -7,6 +7,7 @@ package validate import ( + "errors" "os" "path/filepath" @@ -33,17 +34,34 @@ func Initialized(contextDir string) bool { // Exists checks whether a context directory exists. // -// If dir is empty, it uses the configured context directory. +// If dir is empty, it uses the configured context directory. A missing +// context declaration is a "false, nil" result (no configured dir, so +// nothing to check); a resolver failure for any other reason and a stat +// failure that is not "does not exist" are both propagated so callers +// can distinguish "the directory is not there" from "we could not find +// out." // // Parameters: // - dir: path to check, or empty string for default // // Returns: // - bool: true if the path exists and is a directory -func Exists(dir string) bool { +// - error: non-nil on resolver failure (other than not-declared) or +// stat failure (other than not-exist) +func Exists(dir string) (bool, error) { if dir == "" { - dir = rc.ContextDir() + declared, err := rc.ContextDir() + if err != nil { + return false, err + } + dir = declared } info, statErr := os.Stat(dir) - return statErr == nil && info.IsDir() + if statErr != nil { + if errors.Is(statErr, os.ErrNotExist) { + return false, nil + } + return false, statErr + } + return info.IsDir(), nil } diff --git a/internal/drift/check.go b/internal/drift/check.go index ba6eb8ba9..ff96dade9 100644 --- a/internal/drift/check.go +++ b/internal/drift/check.go @@ -9,6 +9,7 @@ package drift import ( "fmt" "os" + "path/filepath" "strings" "time" @@ -129,30 +130,30 @@ func checkStaleness(ctx *entity.Context, report *Report) { // checkConstitution performs heuristic checks for constitution violations. // -// Currently, it scans the working directory for files that may contain secrets -// (e.g., .env, credentials, api_key) and flags them as violations. +// Scans the project root (the parent of the declared context directory) +// for files that may contain secrets (e.g. `.env`, `credentials`, +// `api_key`). Under the explicit-context-dir model the project root is +// always `filepath.Dir(rc.ContextDir())` rather than the caller's CWD, +// so `ctx drift` run from a subdirectory still audits the right tree. // // Parameters: // - ctx: Loaded context (currently unused, reserved for future checks) // - report: Report to append violations to (modified in place) func checkConstitution(_ *entity.Context, report *Report) { - // Basic heuristic checks for constitution violations - // Check for potential secrets in common config files - secretPatterns := token.SecretPatterns - // Look for common secret file patterns in the working directory - cwd, cwdErr := os.Getwd() - if cwdErr != nil { + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { report.Warnings = append(report.Warnings, Issue{ - Message: fmt.Sprintf(warn.Getwd, cwdErr), + Message: ctxErr.Error(), }) return } - entries, readErr := os.ReadDir(cwd) + projectRoot := filepath.Dir(ctxDir) + entries, readErr := os.ReadDir(projectRoot) if readErr != nil { report.Warnings = append(report.Warnings, Issue{ - Message: fmt.Sprintf(warn.Readdir, cwd, readErr), + Message: fmt.Sprintf(warn.Readdir, projectRoot, readErr), }) return } diff --git a/internal/drift/check_ext.go b/internal/drift/check_ext.go index d8aaa04ae..bf690f6e9 100644 --- a/internal/drift/check_ext.go +++ b/internal/drift/check_ext.go @@ -131,11 +131,19 @@ func checkSyncStaleness(report *Report) { return } - cwd, cwdErr := os.Getwd() - if cwdErr != nil { - report.Passed = append(report.Passed, cfgDrift.CheckSyncStaleness) + // Tool-native outputs are written to the project root, which + // under the explicit-context-dir model is the parent of the + // declared context directory. Using CWD here broke checks when + // `ctx drift` was invoked from a subdirectory (spec: + // specs/explicit-context-dir.md). + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + report.Warnings = append(report.Warnings, Issue{ + Message: ctxErr.Error(), + }) return } + projectRoot := filepath.Dir(ctxDir) found := false // Check each syncable tool. @@ -144,7 +152,7 @@ func checkSyncStaleness(report *Report) { cfgHook.ToolKiro, } for _, tool := range syncTools { - stale := steering.StaleFiles(steeringDir, cwd, tool) + stale := steering.StaleFiles(steeringDir, projectRoot, tool) for _, name := range stale { report.Warnings = append(report.Warnings, Issue{ File: name, diff --git a/internal/drift/check_ext_test.go b/internal/drift/check_ext_test.go index 40e20f37f..5bcf5f699 100644 --- a/internal/drift/check_ext_test.go +++ b/internal/drift/check_ext_test.go @@ -15,6 +15,7 @@ import ( cfgDrift "github.com/ActiveMemory/ctx/internal/config/drift" "github.com/ActiveMemory/ctx/internal/rc" "github.com/ActiveMemory/ctx/internal/steering" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // **Validates: Requirements 19.7** @@ -90,7 +91,7 @@ func TestCheckSteeringTools(t *testing.T) { defer func() { _ = os.Chdir(origDir) }() writeCtxRC(t, tmpDir, fmt.Sprintf("steering:\n dir: %s\n", steeringDir)) - rc.Reset() + testctx.Declare(t, tmpDir) defer rc.Reset() report := &Report{ @@ -284,6 +285,8 @@ func TestCheckSyncStaleness(t *testing.T) { defer func() { _ = os.Chdir(origDir) }() writeCtxRC(t, tmpDir, fmt.Sprintf("steering:\n dir: %s\n", steeringDir)) + // Declare CTX_DIR so rc.ContextDir() resolves (no walk-up). + t.Setenv("CTX_DIR", filepath.Join(tmpDir, ".context")) rc.Reset() defer rc.Reset() @@ -351,7 +354,7 @@ func TestCheckRCTool(t *testing.T) { defer func() { _ = os.Chdir(origDir) }() writeCtxRC(t, tmpDir, tt.rcContent) - rc.Reset() + testctx.Declare(t, tmpDir) defer rc.Reset() report := &Report{ diff --git a/internal/drift/detector_test.go b/internal/drift/detector_test.go index a32345a07..67a78a744 100644 --- a/internal/drift/detector_test.go +++ b/internal/drift/detector_test.go @@ -18,6 +18,7 @@ import ( "github.com/ActiveMemory/ctx/internal/entity" "github.com/ActiveMemory/ctx/internal/io" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestReportStatus(t *testing.T) { @@ -93,6 +94,8 @@ func TestDetect(t *testing.T) { } }(origDir) + testctx.Declare(t, tmpDir) + // Create a .context directory with test files ctxDir := filepath.Join(tmpDir, ".context") if mkErr := os.Mkdir(ctxDir, 0750); mkErr != nil { diff --git a/internal/entity/notify.go b/internal/entity/notify.go index 4db844cfe..f45182f95 100644 --- a/internal/entity/notify.go +++ b/internal/entity/notify.go @@ -6,6 +6,8 @@ package entity +import "time" + // TemplateRef identifies the hook template and variables that produced a // notification, allowing receivers to filter, re-render, or aggregate // without parsing opaque rendered text. @@ -52,3 +54,32 @@ type NotifyPayload struct { Timestamp string `json:"timestamp"` Project string `json:"project"` } + +// NewNotifyPayload constructs a NotifyPayload, stamping the current +// UTC time in RFC 3339 format. The constructor is pure: callers +// resolve the project name (typically via the CWD-with-fallback +// pattern under log/event and notify) and pass it in, keeping the +// entity package free of I/O and logging. +// +// Parameters: +// - event: event type (loop, nudge, relay, heartbeat) +// - message: rendered notification text +// - sessionID: Claude Code session ID ("" is valid) +// - projectName: resolved project name (fallback already applied) +// - detail: template reference for re-rendering; nil is valid +// +// Returns: +// - NotifyPayload: ready to serialize for the event log or webhook +func NewNotifyPayload( + event, message, sessionID, projectName string, + detail *TemplateRef, +) NotifyPayload { + return NotifyPayload{ + Event: event, + Message: message, + Detail: detail, + SessionID: sessionID, + Timestamp: time.Now().UTC().Format(time.RFC3339), + Project: projectName, + } +} diff --git a/internal/entity/system.go b/internal/entity/system.go index f0e29e4fe..18ade9f5a 100644 --- a/internal/entity/system.go +++ b/internal/entity/system.go @@ -6,32 +6,6 @@ package entity -// ArchiveEntry describes a directory or file to include in a backup archive. -type ArchiveEntry struct { - // SourcePath is the absolute path to the directory or file. - SourcePath string - // Prefix is the path prefix inside the tar archive. - Prefix string - // ExcludeDir is a directory name to skip (e.g. "journal-site"). - ExcludeDir string - // Optional means a missing source is not an error. - Optional bool -} - -// BackupResult holds the outcome of a single archive creation. -// -// Fields: -// - Scope: Backup scope (project, global) -// - Archive: Local archive file path -// - Size: Archive file size in bytes -// - SMBDest: SMB destination path (empty if not copied) -type BackupResult struct { - Scope string `json:"scope"` - Archive string `json:"archive"` - Size int64 `json:"size"` - SMBDest string `json:"smb_dest,omitempty"` -} - // FileTokenEntry tracks per-file token counts during context injection. // // Fields: diff --git a/internal/entry/write.go b/internal/entry/write.go index b881d5669..3ebca95fe 100644 --- a/internal/entry/write.go +++ b/internal/entry/write.go @@ -45,7 +45,11 @@ func Write(params entity.EntryParams) error { contextDir := params.ContextDir if contextDir == "" { - contextDir = rc.ContextDir() + declared, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + contextDir = declared } filePath := filepath.Join(contextDir, fileName) diff --git a/internal/err/activate/activate.go b/internal/err/activate/activate.go new file mode 100644 index 000000000..d45012a84 --- /dev/null +++ b/internal/err/activate/activate.go @@ -0,0 +1,23 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package activate + +import ( + "errors" + + "github.com/ActiveMemory/ctx/internal/assets/read/desc" + "github.com/ActiveMemory/ctx/internal/config/embed/text" +) + +// NoCandidates returns the error used when `ctx activate` +// finds zero .context/ directories on the upward path from CWD. +// +// Returns: +// - error: multi-line message prompting `ctx init`. +func NoCandidates() error { + return errors.New(desc.Text(text.DescKeyErrActivateNoCandidates)) +} diff --git a/internal/err/activate/doc.go b/internal/err/activate/doc.go new file mode 100644 index 000000000..b39e1d0b5 --- /dev/null +++ b/internal/err/activate/doc.go @@ -0,0 +1,21 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package activate provides error factories for the `ctx activate` +// subcommand. The factories cover the five failure modes surfaced +// by the command: +// +// - [NoCandidates]: scan from CWD found zero .context/ dirs. +// - [Ambiguous]: scan found multiple and refuses to pick. +// - [InvalidPath]: explicit path cannot be stat()ed. +// - [NotDirectory]: explicit path exists but is not a directory. +// - [NotContext]: explicit path is a directory but lacks any +// canonical context file (CONSTITUTION.md or TASKS.md). +// +// Messages are loaded via desc.Text using the DescKey constants in +// internal/config/embed/text/err_activate.go so they stay editable +// without code changes. +package activate diff --git a/internal/err/backup/backup.go b/internal/err/backup/backup.go index 8fe139b50..6d2cb3b43 100644 --- a/internal/err/backup/backup.go +++ b/internal/err/backup/backup.go @@ -13,7 +13,8 @@ import ( "github.com/ActiveMemory/ctx/internal/config/embed/text" ) -// Create wraps a failure to create a backup file. +// Create wraps a failure to create a backup (.bak) file during +// ctx init --force. // // Parameters: // - name: backup filename that could not be created @@ -27,21 +28,8 @@ func Create(name string, cause error) error { ) } -// CreateArchive wraps an archive creation failure. -// -// Parameters: -// - cause: the underlying error -// -// Returns: -// - error: "create archive file: " -func CreateArchive(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupCreateArchive), - cause, - ) -} - -// CreateArchiveDir wraps a failure to create the archive directory. +// CreateArchiveDir wraps a failure to create the archive directory +// under .context/archive/ during task archival. // // Parameters: // - cause: the underlying OS error @@ -54,7 +42,8 @@ func CreateArchiveDir(cause error) error { cause) } -// WriteArchive wraps a failure to write an archive file. +// WriteArchive wraps a failure to write an archive file during +// task archival. // // Parameters: // - cause: the underlying OS error @@ -68,128 +57,8 @@ func WriteArchive(cause error) error { ) } -// SMBConfig wraps an SMB configuration parse failure. -// -// Parameters: -// - cause: the underlying error -// -// Returns: -// - error: "parse SMB config: " -func SMBConfig(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupBackupSMBConfig), - cause, - ) -} - -// Project wraps a project backup failure. -// -// Parameters: -// - cause: the underlying error -// -// Returns: -// - error: "project backup: " -func Project(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupBackupProject), - cause, - ) -} - -// Global wraps a global backup failure. -// -// Parameters: -// - cause: the underlying error -// -// Returns: -// - error: "global backup: " -func Global(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupBackupGlobal), cause, - ) -} - -// InvalidScope returns an error for an unrecognized backup scope value. -// -// Parameters: -// - scope: the invalid scope string -// -// Returns: -// - error: "invalid scope '': must be project, global, or all" -func InvalidScope(scope string) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupInvalidBackupScope), scope, - ) -} - -// SourceNotFound returns an error when a backup source path is missing. -// -// Parameters: -// - path: the missing source path -// -// Returns: -// - error: "source not found: " -func SourceNotFound(path string) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupSourceNotFound), path, - ) -} - -// InvalidSMBURL returns an error for a malformed SMB URL. -// -// Parameters: -// - url: the invalid SMB URL -// -// Returns: -// - error: "invalid SMB URL: " -func InvalidSMBURL(url string) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupInvalidSMBURL), url, - ) -} - -// SMBMissingShare returns an error when an SMB URL has no share name. -// -// Parameters: -// - url: the SMB URL missing a share name -// -// Returns: -// - error: "SMB URL missing share name: " -func SMBMissingShare(url string) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupSMBMissingShare), url, - ) -} - -// MountFailed wraps a failure to mount an SMB share. -// -// Parameters: -// - source: the SMB source URL -// - cause: the underlying mount error -// -// Returns: -// - error: "failed to mount : " -func MountFailed(source string, cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupMountFailed), source, cause, - ) -} - -// WriteSMB wraps a failure to write to an SMB share. -// -// Parameters: -// - cause: the underlying write error -// -// Returns: -// - error: "write to SMB: " -func WriteSMB(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrBackupWriteSMB), cause, - ) -} - -// ContextDirNotFound returns an error when the context -// directory does not exist. +// ContextDirNotFound returns an error when the context directory +// does not exist. // // Parameters: // - dir: the missing context directory path. diff --git a/internal/err/backup/doc.go b/internal/err/backup/doc.go index db43e6caf..01ccb8471 100644 --- a/internal/err/backup/doc.go +++ b/internal/err/backup/doc.go @@ -4,35 +4,18 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package backup defines the **typed error constructors** -// the backup subsystem returns. Every failure that can -// happen during `ctx backup` flows through one of these -// constructors so the call site upstream sees a -// sentinel-able error and the renderer downstream knows -// which user-facing text to surface. +// Package backup provides error constructors for two narrow file +// operations that still live under a historical "backup" label: // -// # Why Typed Errors +// - [Create]: wraps `.bak` file creation during `ctx init --force`. +// - [CreateArchiveDir], [WriteArchive]: wrap task-archive directory +// and file write failures under `.context/archive/`. +// - [ContextDirNotFound]: the bootstrap-path "context dir missing" +// error. // -// Three reasons: -// -// - **Stability**: error categories are part of -// the public API; adding a constructor is an -// intentional change a reviewer can see. -// - **Routing**: the write-side -// ([internal/write/backup]) maps error types to -// localized text via [internal/assets/read/desc]. -// - **Wrapping**: every constructor wraps its -// underlying cause via `%w` so callers can -// `errors.Is` / `errors.As` against system -// errors when needed. -// -// # Public Surface -// -// Constructors (one per failure mode): [Create], -// [CreateArchive], [CreateArchiveDir], -// [WriteArchive], [SMBConfig]. -// -// # Concurrency -// -// Pure constructors. Concurrent callers never race. +// The former `ctx backup` command (SMB-driven full-project backup) +// was removed; see docs/operations/runbooks/backup-strategy.md for +// the replacement guidance. The package name is kept to avoid +// churning the non-backup callers that still use these generic +// constructors. package backup diff --git a/internal/err/context/context.go b/internal/err/context/context.go index 11d49a9a7..6bc42bcf6 100644 --- a/internal/err/context/context.go +++ b/internal/err/context/context.go @@ -7,12 +7,151 @@ package context import ( + "errors" "fmt" + "strings" "github.com/ActiveMemory/ctx/internal/assets/read/desc" + cfgDir "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/embed/text" + cfgRc "github.com/ActiveMemory/ctx/internal/config/rc" + "github.com/ActiveMemory/ctx/internal/config/token" ) +// ErrDirNotDeclared is the sentinel returned by rc.ContextDir when +// CTX_DIR is unset or empty. Callers that can legitimately proceed +// without a declared context directory (init, activate, deactivate, +// bootstrap) check with errors.Is; everyone else should propagate +// the error or call rc.RequireContextDir for a user-facing message +// (see NotDeclared below). +// +// The message lives in config/rc (not resolved through desc.Text) +// because sentinel values are initialized at package load time, +// before the embedded YAML lookup is populated. Callers that print +// this to users should wrap it via NotDeclared; the sentinel itself +// is for errors.Is comparisons, not for display. +var ErrDirNotDeclared = errors.New(cfgRc.ErrMsgDirNotDeclared) + +// ErrRelativeNotAllowed is the sentinel returned when CTX_DIR is +// declared as a relative path. Absolute-only is a hardline: a +// relative CTX_DIR would resolve differently in every cwd, exactly +// the silent cwd-dependency this resolver is meant to eliminate. +// +// Wrap via [RelativeNotAllowed] for user-facing messages so the +// offending value is shown. +var ErrRelativeNotAllowed = errors.New(cfgRc.ErrMsgRelativeNotAllowed) + +// ErrNonCanonicalBasename is the sentinel returned when CTX_DIR's +// basename is not the canonical [cfgDir.Context]. It catches the +// common footgun `export CTX_DIR=$(pwd)` (project root instead of +// the `.context` subdirectory) on first use rather than letting init +// deposit canonical files into the project root. +// +// Wrap via [NonCanonicalBasename] for user-facing messages. +var ErrNonCanonicalBasename = errors.New(cfgRc.ErrMsgNonCanonicalBasename) + +// ErrContextDirNotFound is the sentinel returned by +// rc.RequireContextDir when CTX_DIR is shape-valid but the directory +// does not exist on disk. Distinct from [ErrDirNotDeclared], which +// fires before any filesystem check. +// +// Construct via [Missing]; the legacy [NotFoundError] type also +// carries this sentinel through its [NotFoundError.Is] method, so +// callers using either pattern can compare with [errors.Is]. +var ErrContextDirNotFound = errors.New(cfgRc.ErrMsgContextDirNotFound) + +// ErrContextDirNotADirectory is the sentinel returned when CTX_DIR +// points at an existing path that is not a directory (typically a +// regular file). Symlinks pointing at directories pass. +var ErrContextDirNotADirectory = errors.New(cfgRc.ErrMsgContextDirNotADirectory) + +// ErrContextDirStat is the sentinel returned when [os.Stat] on +// CTX_DIR fails for a reason other than not-exist (permission +// denied, I/O error). Wrap via [StatFailed] to attach the +// underlying cause. +var ErrContextDirStat = errors.New(cfgRc.ErrMsgContextDirStat) + +// RelativeNotAllowed wraps [ErrRelativeNotAllowed] with the +// offending value so the user sees what they declared. +// +// Parameters: +// - raw: the rejected CTX_DIR value +// +// Returns: +// - error: wrapping [ErrRelativeNotAllowed] for [errors.Is] matches +func RelativeNotAllowed(raw string) error { + return fmt.Errorf(cfgRc.FmtWrapColon, + ErrRelativeNotAllowed, + fmt.Sprintf(desc.Text(text.DescKeyErrContextRelativeNotAllowed), raw), + ) +} + +// NonCanonicalBasename wraps [ErrNonCanonicalBasename] with the +// offending basename so the user sees how their declaration deviated +// from the canonical `.context`. +// +// Parameters: +// - base: the rejected basename (e.g., "tmp", "myctx") +// +// Returns: +// - error: wrapping [ErrNonCanonicalBasename] for [errors.Is] matches +func NonCanonicalBasename(base string) error { + return fmt.Errorf(cfgRc.FmtWrapColon, + ErrNonCanonicalBasename, + fmt.Sprintf( + desc.Text(text.DescKeyErrContextNonCanonicalBasename), + cfgDir.Context, base, + ), + ) +} + +// Missing wraps [ErrContextDirNotFound] with the missing path so +// the user sees which directory was expected. +// +// Parameters: +// - path: absolute path that does not exist +// +// Returns: +// - error: wrapping [ErrContextDirNotFound] for [errors.Is] matches +func Missing(path string) error { + return fmt.Errorf(cfgRc.FmtWrapBare, + ErrContextDirNotFound, + path, + ) +} + +// NotADir wraps [ErrContextDirNotADirectory] with the offending +// path so the user sees what was rejected. +// +// Parameters: +// - path: absolute path that exists but is not a directory +// +// Returns: +// - error: wrapping [ErrContextDirNotADirectory] for [errors.Is] +func NotADir(path string) error { + return fmt.Errorf(cfgRc.FmtWrapColon, + ErrContextDirNotADirectory, + fmt.Sprintf(desc.Text(text.DescKeyErrContextDirNotADirectory), path), + ) +} + +// StatFailed wraps [ErrContextDirStat] with the path and the +// underlying [os.Stat] failure. +// +// Parameters: +// - path: absolute path that failed to stat +// - cause: the underlying stat error +// +// Returns: +// - error: wrapping both [ErrContextDirStat] and the underlying +// cause; supports [errors.Is] for either +func StatFailed(path string, cause error) error { + return fmt.Errorf(cfgRc.FmtWrapColon, + ErrContextDirStat, + fmt.Errorf(desc.Text(text.DescKeyErrContextDirStat), path, cause), + ) +} + // NotFoundError is returned when the context directory does not exist. type NotFoundError struct { Dir string @@ -26,42 +165,40 @@ func (e *NotFoundError) Error() string { return desc.Text(text.DescKeyErrContextDirNotFound) + e.Dir } -// NotFound returns a NotFoundError for the given directory. +// Is reports whether target matches the not-found sentinel. Lets +// callers using errors.Is(err, ErrContextDirNotFound) match instances +// of [NotFoundError] without rewriting them. // // Parameters: -// - dir: path to the missing context directory +// - target: error to compare against // // Returns: -// - *NotFoundError: typed error for errors.As matching -func NotFound(dir string) *NotFoundError { - return &NotFoundError{Dir: dir} +// - bool: true when target is the not-found sentinel +func (e *NotFoundError) Is(target error) bool { + return target == ErrContextDirNotFound } -// OutsideRoot returns an error when .context/ resolves outside the -// project root. +// NotFound returns a NotFoundError for the given directory. // // Parameters: -// - dir: the context directory path -// - root: the project root path +// - path: path to the missing context directory // // Returns: -// - error: "context directory resolves outside project root " -func OutsideRoot(dir, root string) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrValidateContextOutsideRoot), dir, root, - ) +// - *NotFoundError: typed error for errors.As matching +func NotFound(path string) *NotFoundError { + return &NotFoundError{Dir: path} } // DirSymlink returns an error when .context/ is a symlink. // // Parameters: -// - dir: the context directory path +// - path: the context directory path // // Returns: -// - error: "context directory is a symlink" -func DirSymlink(dir string) error { +// - error: "context directory is a symlink" +func DirSymlink(path string) error { return fmt.Errorf( - desc.Text(text.DescKeyErrValidateContextDirSymlink), dir, + desc.Text(text.DescKeyErrValidateContextDirSymlink), path, ) } @@ -78,3 +215,50 @@ func FileSymlink(file string) error { desc.Text(text.DescKeyErrValidateContextFileSymlink), file, ) } + +// NotDeclared returns the standard "no context directory specified" +// error used by rc.RequireContextDir when CTX_DIR has not been +// declared. +// +// The returned message is tailored by how many .context/ candidates +// are visible from the caller's CWD, so users get a next-step hint +// specific to their situation: +// +// - zero candidates: suggest `ctx init`. +// - one candidate: name it as the likely target and suggest +// `eval "$(ctx activate)"`. +// - many candidates: list all of them and refer the user to +// `ctx activate` from a more specific cwd. +// +// The scan that produces candidates is read-only (rc.ScanCandidates) +// and never binds anything; resolution itself stays explicit. +// +// Parameters: +// - candidates: absolute paths of every visible .context/ +// directory, ordered innermost-first. Empty/nil when none. +// +// Returns: +// - error: a multi-line, actionable message ready to be returned +// from a Cobra Run function. +func NotDeclared(candidates []string) error { + switch len(candidates) { + case 0: + return errors.New(desc.Text(text.DescKeyErrContextNotDeclaredZero)) + case 1: + return fmt.Errorf( + desc.Text(text.DescKeyErrContextNotDeclaredOne), + candidates[0], + ) + default: + var b strings.Builder + for _, p := range candidates { + b.WriteString(token.Indent2) + b.WriteString(p) + b.WriteString(token.NewlineLF) + } + return fmt.Errorf( + desc.Text(text.DescKeyErrContextNotDeclaredMany), + strings.TrimRight(b.String(), token.NewlineLF), + ) + } +} diff --git a/internal/err/context/doc.go b/internal/err/context/doc.go index 31e034b4a..1bf5413a0 100644 --- a/internal/err/context/doc.go +++ b/internal/err/context/doc.go @@ -19,10 +19,8 @@ // implements the error interface and supports // errors.As matching. Constructor: [NotFound]. // - **Security validation**: the directory or a -// file inside it is a symlink, or the directory -// resolves outside the project root. -// Constructors: [OutsideRoot], [DirSymlink], -// [FileSymlink]. +// file inside it is a symlink. Constructors: +// [DirSymlink], [FileSymlink]. // // # Typed Error: NotFoundError // diff --git a/internal/err/fs/fs.go b/internal/err/fs/fs.go index 6a718fb42..90752c320 100644 --- a/internal/err/fs/fs.go +++ b/internal/err/fs/fs.go @@ -206,20 +206,6 @@ func CreateDir(dir string, cause error) error { ) } -// BoundaryViolation wraps a boundary validation error with a hint -// to use --allow-outside-cwd. -// -// Parameters: -// - cause: the underlying validation error -// -// Returns: -// - error: "\nUse --allow-outside-cwd to override this check" -func BoundaryViolation(cause error) error { - return fmt.Errorf( - desc.Text(text.DescKeyErrFsBoundaryViolation), cause, - ) -} - // ReadFile wraps a file read failure. // // Parameters: diff --git a/internal/exec/gio/doc.go b/internal/exec/gio/doc.go deleted file mode 100644 index d1806b615..000000000 --- a/internal/exec/gio/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package gio wraps GNOME GIO command execution for -// mounting network shares. -// -// # Mounting -// -// Mount runs the "gio mount" command with the given -// URL. This is used during backup operations to mount -// SMB shares before copying context files. -// -// err := gio.Mount("smb://host/share") -// -// # Dependencies -// -// The gio binary must be installed on the system. -// Mount returns an error if gio is not found in PATH -// or if the mount operation fails. The binary name -// and subcommand are sourced from the archive config -// package. -// -// # Security -// -// The URL argument comes from user configuration -// (not from untrusted input). The exec.Command call -// is annotated with a gosec nolint directive to -// acknowledge this. -package gio diff --git a/internal/exec/gio/mount.go b/internal/exec/gio/mount.go deleted file mode 100644 index 12ca22790..000000000 --- a/internal/exec/gio/mount.go +++ /dev/null @@ -1,27 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package gio - -import ( - "os/exec" - - "github.com/ActiveMemory/ctx/internal/config/archive" -) - -// Mount runs `gio mount` with the given URL. -// -// Parameters: -// - url: mount target (e.g. smb://host/share) -// -// Returns: -// - error: non-nil if gio is not found or the mount fails -func Mount(url string) error { - //nolint:gosec // G204: url is from user config - return exec.Command( - archive.GioBinary, archive.GioMount, url, - ).Run() -} diff --git a/internal/flagbind/doc.go b/internal/flagbind/doc.go index 139f8ce9f..8550decb2 100644 --- a/internal/flagbind/doc.go +++ b/internal/flagbind/doc.go @@ -37,8 +37,8 @@ // flag with shorthand. // - [StringArrayFlagP] registers repeatable string // flags (--tag x --tag y). -// - [PersistentBoolFlag], [PersistentStringFlag] -// register persistent flags inherited by children. +// - [PersistentBoolFlag] registers a persistent bool +// flag inherited by children. // - [LastJSON] registers the --last/--json pair for // list-style commands. // diff --git a/internal/flagbind/flag.go b/internal/flagbind/flag.go index ee0ed96e5..f5aa6067a 100644 --- a/internal/flagbind/flag.go +++ b/internal/flagbind/flag.go @@ -164,38 +164,6 @@ func DurationFlag( ) } -// PersistentBoolFlag registers a persistent boolean -// flag with no shorthand, defaulting to false. -// -// Parameters: -// - c: Cobra command to register on -// - p: Pointer to the bool variable -// - name: Flag name constant -// - descKey: YAML DescKey for the flag description -func PersistentBoolFlag( - c *cobra.Command, p *bool, name, descKey string, -) { - c.PersistentFlags().BoolVar( - p, name, false, desc.Flag(descKey), - ) -} - -// PersistentStringFlag registers a persistent string -// flag with no shorthand, defaulting to empty string. -// -// Parameters: -// - c: Cobra command to register on -// - p: Pointer to the string variable -// - name: Flag name constant -// - descKey: YAML DescKey for the flag description -func PersistentStringFlag( - c *cobra.Command, p *string, name, descKey string, -) { - c.PersistentFlags().StringVar( - p, name, "", desc.Flag(descKey), - ) -} - // StringArrayFlagP registers a string array flag with a shorthand // letter. The flag can be repeated: --tag x --tag y. // diff --git a/internal/io/append.go b/internal/io/append.go index cd8f3858d..a86fe48d1 100644 --- a/internal/io/append.go +++ b/internal/io/append.go @@ -6,33 +6,38 @@ package io -import ( - "os" - - cfgWarn "github.com/ActiveMemory/ctx/internal/config/warn" - logWarn "github.com/ActiveMemory/ctx/internal/log/warn" -) +import "os" // AppendBytes opens path in append mode, writes data, and closes. -// Errors are logged to stderr via log/warn; this is a best-effort -// operation for JSONL event logs and session stats where failures -// should not interrupt the caller. +// Returns the first non-nil error encountered among open, write, and +// close. Callers decide whether to propagate, log, or absorb. +// +// Previously this helper logged errors to stderr and returned void +// (best-effort), which conflated "the write succeeded" with "the +// write failed but you'll only know if you scroll stderr". Audit +// trails that depend on the append landing (event.Append, stat +// rollups) need the error to propagate so callers can honour a +// log-first ordering: if the record can't be written, downstream +// side effects should not pretend the event happened. // // Parameters: // - path: file path to append to (created if missing) // - data: bytes to append // - perm: file permission bits for creation -func AppendBytes(path string, data []byte, perm os.FileMode) { +// +// Returns: +// - error: non-nil on open, write, or close failure. When write +// succeeds but close fails, the close error is returned so +// disk-flush / fsync problems surface. +func AppendBytes(path string, data []byte, perm os.FileMode) error { f, openErr := SafeAppendFile(path, perm) if openErr != nil { - return + return openErr } - defer func() { - if closeErr := f.Close(); closeErr != nil { - logWarn.Warn(cfgWarn.Close, path, closeErr) - } - }() - if _, writeErr := f.Write(data); writeErr != nil { - logWarn.Warn(cfgWarn.Write, path, writeErr) + _, writeErr := f.Write(data) + closeErr := f.Close() + if writeErr != nil { + return writeErr } + return closeErr } diff --git a/internal/journal/parser/markdown_test.go b/internal/journal/parser/markdown_test.go index ed272a70a..7a6438c3f 100644 --- a/internal/journal/parser/markdown_test.go +++ b/internal/journal/parser/markdown_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/ActiveMemory/ctx/internal/config/session" - "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func TestMarkdownSessionParser_Tool(t *testing.T) { @@ -236,8 +236,7 @@ func TestIsSessionHeader_CustomPrefix(t *testing.T) { } t.Cleanup(func() { _ = os.Chdir(origDir) }) - rc.Reset() - t.Cleanup(rc.Reset) + testctx.Declare(t, ctxrcDir) tests := []struct { name string diff --git a/internal/log/event/event.go b/internal/log/event/event.go index 0be6255f5..ae2798e1b 100644 --- a/internal/log/event/event.go +++ b/internal/log/event/event.go @@ -10,7 +10,6 @@ import ( "encoding/json" "os" "path/filepath" - "time" "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/config/project" @@ -24,56 +23,82 @@ import ( // Append writes a single event to the log file. // -// Noop when event logging is disabled in .ctxrc. Creates the state -// directory if it does not exist. Rotates the log when it exceeds -// EventLogMaxBytes. All errors are silently ignored: event logging -// must never break hook execution. +// # Log-First Principle +// +// The event log is the authoritative record of "what this hook did". +// Any hook path that emits an observable side effect (webhook, stdout +// marker, state mutation) must call Append FIRST and gate the side +// effect on the log landing. If the log write fails, the side effect +// must not fire: claiming success for an event we never recorded is +// the kind of silent drift this function used to produce before it +// returned an error. See docs/security/reporting.md → +// "Log-First Audit Trail" for the rationale and call-site pattern. +// +// Noop (nil) when event logging is disabled in .ctxrc. Creates the +// state directory if it does not exist. Rotates the log when it +// exceeds EventLogMaxBytes. // // Parameters: // - event: Event type (e.g., "relay", "nudge") // - message: Human-readable description // - sessionID: Claude session ID (may be empty) // - detail: Optional template reference (may be nil) -func Append(event, message, sessionID string, detail *entity.TemplateRef) { +// +// Returns: +// - error: non-nil on path resolution, state-dir creation, rotation, +// marshal, or append failure. Callers are expected to propagate +// this error and skip any downstream webhook / state / stdout +// side effects that would pretend the event happened. The Getwd +// failure path is the one intentional exception: it falls back +// to [project.FallbackName] and only warns to stderr, because +// the event itself is still recorded, just with a less specific +// project field. A missing CWD is never a reason to drop an +// event entry. +func Append( + event, message, sessionID string, + detail *entity.TemplateRef, +) error { if !rc.EventLog() { - return + return nil } - logPath := logFilePath() + logPath, pathErr := logFilePath() + if pathErr != nil { + return pathErr + } // Ensure state directory exists. stateDir := filepath.Dir(logPath) if mkErr := io.SafeMkdirAll(stateDir, fs.PermExec); mkErr != nil { - return + return mkErr } // Check rotation before appending. - rotate(logPath) + if rotateErr := rotate(logPath); rotateErr != nil { + return rotateErr + } projectName := project.FallbackName if cwd, cwdErr := os.Getwd(); cwdErr == nil { projectName = filepath.Base(cwd) } else { + // Documented fallback: record the event with a generic + // project name rather than dropping the entry entirely. logWarn.Warn(warn.Getwd, cwdErr) } - payload := entity.NotifyPayload{ - Event: event, - Message: message, - Detail: detail, - SessionID: sessionID, - Timestamp: time.Now().UTC().Format(time.RFC3339), - Project: projectName, - } + payload := entity.NewNotifyPayload( + event, message, sessionID, projectName, detail, + ) line, marshalErr := json.Marshal(payload) if marshalErr != nil { - return + return marshalErr } newline := token.NewlineLF[0] line = append(line, newline) - io.AppendBytes(logPath, line, fs.PermFile) + return io.AppendBytes(logPath, line, fs.PermFile) } // Query reads events from the log, applying filters. @@ -92,7 +117,10 @@ func Query(opts entity.EventQueryOpts) ([]entity.NotifyPayload, error) { // Read the rotated file first (older events) if requested. if opts.IncludeRotated { - prev := prevLogFilePath() + prev, prevErr := prevLogFilePath() + if prevErr != nil { + return []entity.NotifyPayload{}, nil + } events, readErr := readLogFile(prev) if readErr != nil { return nil, readErr @@ -101,7 +129,10 @@ func Query(opts entity.EventQueryOpts) ([]entity.NotifyPayload, error) { } // Read current log file. - current := logFilePath() + current, currentErr := logFilePath() + if currentErr != nil { + return []entity.NotifyPayload{}, nil + } events, readErr := readLogFile(current) if readErr != nil { return nil, readErr diff --git a/internal/log/event/event_test.go b/internal/log/event/event_test.go index 36d16ea99..5c65cf476 100644 --- a/internal/log/event/event_test.go +++ b/internal/log/event/event_test.go @@ -19,6 +19,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/fs" "github.com/ActiveMemory/ctx/internal/entity" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // setupTestDir creates a temporary directory, configures rc to use it, @@ -27,10 +28,8 @@ func setupTestDir(t *testing.T, enableLog bool) string { t.Helper() tmpDir := t.TempDir() - rc.Reset() - rc.OverrideContextDir(filepath.Join(tmpDir, dir.Context)) - - // Write .ctxrc to control event_log. + // Write .ctxrc at the project root (the parent of the .context/ + // that testctx will declare). rcContent := "event_log: false\n" if enableLog { rcContent = "event_log: true\n" @@ -41,12 +40,11 @@ func setupTestDir(t *testing.T, enableLog bool) string { t.Fatalf("failed to write .ctxrc: %v", writeErr) } - // Change to temp dir so rc loads the .ctxrc. origDir, _ := os.Getwd() if chErr := os.Chdir(tmpDir); chErr != nil { t.Fatalf("failed to chdir: %v", chErr) } - rc.Reset() // force reload with new cwd + testctx.Declare(t, tmpDir) t.Cleanup(func() { _ = os.Chdir(origDir) @@ -60,7 +58,9 @@ func TestAppend_Disabled(t *testing.T) { tmpDir := setupTestDir(t, false) logPath := filepath.Join(tmpDir, dir.Context, dir.State, event.FileLog) - Append("relay", "test message", "session-1", nil) + if err := Append("relay", "test message", "session-1", nil); err != nil { + t.Fatalf("Append: %v", err) + } if _, statErr := os.Stat(logPath); !os.IsNotExist(statErr) { t.Error("Append() created log file when event_log is disabled") @@ -72,7 +72,9 @@ func TestAppend_Basic(t *testing.T) { logPath := filepath.Join(tmpDir, dir.Context, dir.State, event.FileLog) detail := entity.NewTemplateRef("qa-reminder", "gate", nil) - Append("relay", "QA gate reminder", "session-1", detail) + if err := Append("relay", "QA gate reminder", "session-1", detail); err != nil { + t.Fatalf("Append: %v", err) + } data, readErr := os.ReadFile(logPath) //nolint:gosec // test file if readErr != nil { @@ -110,7 +112,9 @@ func TestAppend_CreatesStateDir(t *testing.T) { t.Fatal("state dir should not exist before AppendEvent") } - Append("nudge", "test", "", nil) + if err := Append("nudge", "test", "", nil); err != nil { + t.Fatalf("Append: %v", err) + } if _, statErr := os.Stat(stateDir); os.IsNotExist(statErr) { t.Error("Append() did not create state directory") @@ -138,7 +142,9 @@ func TestAppend_Rotation(t *testing.T) { } // AppendEvent should trigger rotation. - Append("relay", "after rotation", "", nil) + if err := Append("relay", "after rotation", "", nil); err != nil { + t.Fatalf("Append: %v", err) + } // Previous file should exist with the big content. if _, statErr := os.Stat(prevPath); os.IsNotExist(statErr) { @@ -185,7 +191,9 @@ func TestAppend_RotationOverwrite(t *testing.T) { t.Fatalf("failed to write big log: %v", writeErr) } - Append("relay", "new event", "", nil) + if err := Append("relay", "new event", "", nil); err != nil { + t.Fatalf("Append: %v", err) + } // The .1 file should now contain the rotated content, // not "old rotated content". @@ -213,12 +221,18 @@ func TestQuery_NoFile(t *testing.T) { func TestQuery_FilterHook(t *testing.T) { setupTestDir(t, true) - Append("relay", "qa gate", "s1", - entity.NewTemplateRef("qa-reminder", "gate", nil)) - Append("relay", "context load", "s1", - entity.NewTemplateRef("context-load-gate", "inject", nil)) - Append("nudge", "ceremonies", "s1", - entity.NewTemplateRef("check-ceremony", "both", nil)) + if err := Append("relay", "qa gate", "s1", + entity.NewTemplateRef("qa-reminder", "gate", nil)); err != nil { + t.Fatalf("Append: %v", err) + } + if err := Append("relay", "context load", "s1", + entity.NewTemplateRef("context-load-gate", "inject", nil)); err != nil { + t.Fatalf("Append: %v", err) + } + if err := Append("nudge", "ceremonies", "s1", + entity.NewTemplateRef("check-ceremony", "both", nil)); err != nil { + t.Fatalf("Append: %v", err) + } events, queryErr := Query(entity.EventQueryOpts{Hook: "qa-reminder"}) if queryErr != nil { @@ -235,9 +249,15 @@ func TestQuery_FilterHook(t *testing.T) { func TestQuery_FilterSession(t *testing.T) { setupTestDir(t, true) - Append("relay", "session one", "s1", nil) - Append("relay", "session two", "s2", nil) - Append("relay", "session one again", "s1", nil) + if err := Append("relay", "session one", "s1", nil); err != nil { + t.Fatalf("Append: %v", err) + } + if err := Append("relay", "session two", "s2", nil); err != nil { + t.Fatalf("Append: %v", err) + } + if err := Append("relay", "session one again", "s1", nil); err != nil { + t.Fatalf("Append: %v", err) + } events, queryErr := Query(entity.EventQueryOpts{Session: "s1"}) if queryErr != nil { @@ -252,7 +272,9 @@ func TestQuery_Last(t *testing.T) { setupTestDir(t, true) for i := 0; i < 20; i++ { - Append("relay", "event", "", nil) + if err := Append("relay", "event", "", nil); err != nil { + t.Fatalf("Append: %v", err) + } } events, queryErr := Query(entity.EventQueryOpts{Last: 5}) @@ -283,7 +305,9 @@ func TestQuery_IncludeRotated(t *testing.T) { } // Write event to current file. - Append("relay", "new event", "", nil) + if err := Append("relay", "new event", "", nil); err != nil { + t.Fatalf("Append: %v", err) + } // Without --all, only current events. events, _ := Query(entity.EventQueryOpts{}) diff --git a/internal/log/event/ops.go b/internal/log/event/ops.go index d5257314c..15a0afd2b 100644 --- a/internal/log/event/ops.go +++ b/internal/log/event/ops.go @@ -7,34 +7,49 @@ package event import ( + "errors" "os" "github.com/ActiveMemory/ctx/internal/config/event" - "github.com/ActiveMemory/ctx/internal/config/warn" - logWarn "github.com/ActiveMemory/ctx/internal/log/warn" ) // rotate checks the current log file size and renames it to the // previous-generation path when it exceeds [event.LogMaxBytes]. -// Best-effort: all errors are silently ignored so rotation never -// blocks event logging. +// +// Returns nil when there is nothing to do (log missing or under the +// size threshold), or when rotation succeeds. Any other failure is +// propagated so callers honour the log-first principle: if the log +// cannot be rotated, [Append] cannot safely continue writing and +// downstream side effects must not fire. // // Parameters: // - logPath: absolute path to the current event log -func rotate(logPath string) { +// +// Returns: +// - error: [os.ErrNotExist] from the Stat or Remove path is treated +// as "nothing to rotate" / "nothing to clean up" and returns nil. +// Any other stat, path, rename, or remove failure is surfaced. +func rotate(logPath string) error { info, statErr := os.Stat(logPath) if statErr != nil { - return // file doesn't exist yet, nothing to rotate + if errors.Is(statErr, os.ErrNotExist) { + return nil // nothing to rotate yet + } + return statErr } if info.Size() < int64(event.LogMaxBytes) { - return + return nil } - prevPath := prevLogFilePath() - if removeErr := os.Remove(prevPath); removeErr != nil { - logWarn.Warn(warn.Remove, prevPath, removeErr) + prevPath, prevErr := prevLogFilePath() + if prevErr != nil { + return prevErr } - if renameErr := os.Rename(logPath, prevPath); renameErr != nil { - logWarn.Warn(warn.Rename, logPath, renameErr) + if removeErr := os.Remove(prevPath); removeErr != nil { + if !errors.Is(removeErr, os.ErrNotExist) { + return removeErr + } + // ErrNotExist is fine: no previous generation to remove. } + return os.Rename(logPath, prevPath) } diff --git a/internal/log/event/path.go b/internal/log/event/path.go index 2951947eb..7f360103e 100644 --- a/internal/log/event/path.go +++ b/internal/log/event/path.go @@ -18,14 +18,24 @@ import ( // // Returns: // - string: path under the active context directory -func logFilePath() string { - return filepath.Join(rc.ContextDir(), dir.State, event.FileLog) +// - error: non-nil when the context directory is not declared +func logFilePath() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, dir.State, event.FileLog), nil } // prevLogFilePath returns the absolute path to the rotated event log. // // Returns: // - string: path under the active context directory -func prevLogFilePath() string { - return filepath.Join(rc.ContextDir(), dir.State, event.FileLogPrev) +// - error: non-nil when the context directory is not declared +func prevLogFilePath() (string, error) { + ctxDir, err := rc.ContextDir() + if err != nil { + return "", err + } + return filepath.Join(ctxDir, dir.State, event.FileLogPrev), nil } diff --git a/internal/mcp/README.md b/internal/mcp/README.md index c0976e1e6..757ee6155 100644 --- a/internal/mcp/README.md +++ b/internal/mcp/README.md @@ -1,4 +1,4 @@ -# internal/mcp — MCP Server +# internal/mcp: MCP Server JSON-RPC 2.0 server exposing ctx context to any MCP-compatible AI tool over stdin/stdout. See `doc.go` for the full resource, @@ -44,14 +44,14 @@ mcp/ Three files, always: -1. **Define** in `server/def/tool/tool.go` — add entry to `Defs` +1. **Define** in `server/def/tool/tool.go`: add entry to `Defs` array with name, description, and `InputSchema` (JSON Schema for parameters) -2. **Implement** in `handler/tool.go` — add method on `Handler` +2. **Implement** in `handler/tool.go`: add method on `Handler` with signature `func (h *Handler) ToolName(args...) (string, error)` -3. **Route** in `server/route/tool/tool.go` — add case in the +3. **Route** in `server/route/tool/tool.go`: add case in the dispatch switch calling your handler method, wrap result with `out.ToolResult()` @@ -59,36 +59,36 @@ Three files, always: Same pattern, three files: -1. **Define** in `server/def/prompt/prompt.go` — add entry to +1. **Define** in `server/def/prompt/prompt.go`: add entry to `Defs` array with name, description, and arguments -2. **Build** in `server/route/prompt/prompt.go` — add builder +2. **Build** in `server/route/prompt/prompt.go`: add builder function returning `[]proto.PromptMessage` -3. **Route** in `server/route/prompt/dispatch.go` — add case in +3. **Route** in `server/route/prompt/dispatch.go`: add case in the dispatch switch ## How To Add a New Resource -1. **Register** in `server/catalog/data.go` — add URI-to-file +1. **Register** in `server/catalog/data.go`: add URI-to-file mapping -2. **Handle** in `server/resource/resource.go` — if it needs +2. **Handle** in `server/resource/resource.go`: if it needs special assembly (like the agent packet), add a reader function ## Key Design Decisions -- **handler/ has no JSON-RPC coupling** — all tool methods take +- **handler/ has no JSON-RPC coupling**: all tool methods take typed args and return `(string, error)`. Protocol translation happens in server/route/. This makes handler/ testable without stdin/stdout. -- **Single-threaded main loop** — one request at a time. Poller +- **Single-threaded main loop**: one request at a time. Poller runs in a background goroutine. Thread safety via mutex on stdout writer only. -- **Governance is advisory** — session state tracks tool calls and +- **Governance is advisory**: session state tracks tool calls and nudges (drift check, persist reminder) but never blocks execution. -- **Protocol version** — 2024-11-05. Capabilities advertised: +- **Protocol version**: 2024-11-05. Capabilities advertised: resources (subscribe=true), tools, prompts. diff --git a/internal/mcp/handler/tool.go b/internal/mcp/handler/tool.go index 0952631e6..694995482 100644 --- a/internal/mcp/handler/tool.go +++ b/internal/mcp/handler/tool.go @@ -33,7 +33,6 @@ import ( "github.com/ActiveMemory/ctx/internal/mcp/handler/task" "github.com/ActiveMemory/ctx/internal/mcp/server/stat" "github.com/ActiveMemory/ctx/internal/tidy" - "github.com/ActiveMemory/ctx/internal/validate" ) // Status loads context and returns a status summary. @@ -88,17 +87,11 @@ func Status(d *entity.MCPDeps) (string, error) { // // Returns: // - string: confirmation message with entry type and target file -// - error: boundary, validation, or write error +// - error: validation or write error func Add( d *entity.MCPDeps, entryType, content string, opts entity.EntryOpts, ) (string, error) { - if boundaryErr := validate.Boundary( - d.ContextDir, - ); boundaryErr != nil { - return "", boundaryErr - } - if writeErr := entry.ValidateAndWrite(entity.EntryParams{ Type: entryType, Content: content, @@ -131,14 +124,8 @@ func Add( // // Returns: // - string: confirmation message with completed task text -// - error: boundary or completion error +// - error: completion error func Complete(d *entity.MCPDeps, query string) (string, error) { - if boundaryErr := validate.Boundary( - d.ContextDir, - ); boundaryErr != nil { - return "", boundaryErr - } - completedTask, _, completeErr := taskComplete.Complete( query, d.ContextDir, ) @@ -295,16 +282,11 @@ func Recall( // // Returns: // - string: confirmation with file name and review status -// - error: boundary, validation, or write error +// - error: validation or write error func WatchUpdate( d *entity.MCPDeps, entryType, content string, opts entity.EntryOpts, ) (string, error) { - boundaryErr := validate.Boundary(d.ContextDir) - if boundaryErr != nil { - return "", boundaryErr - } - // Handle the "complete" type as a special case. if entryType == cfgEntry.Complete { completedTask, _, completeErr := taskComplete.Complete( @@ -367,14 +349,8 @@ func WatchUpdate( // // Returns: // - string: summary of moved tasks and cleaned sections -// - error: boundary, context load, or write error +// - error: context load or write error func Compact(d *entity.MCPDeps, archive bool) (string, error) { - if boundaryErr := validate.Boundary( - d.ContextDir, - ); boundaryErr != nil { - return "", boundaryErr - } - ctx, loadErr := load.Do(d.ContextDir) if loadErr != nil { return "", loadErr diff --git a/internal/mcp/server/server_test.go b/internal/mcp/server/server_test.go index e1aceb694..010599fe0 100644 --- a/internal/mcp/server/server_test.go +++ b/internal/mcp/server/server_test.go @@ -20,6 +20,7 @@ import ( cfgSchema "github.com/ActiveMemory/ctx/internal/config/mcp/schema" "github.com/ActiveMemory/ctx/internal/mcp/proto" mcpIO "github.com/ActiveMemory/ctx/internal/mcp/server/io" + "github.com/ActiveMemory/ctx/internal/rc" ) func newTestServer(t *testing.T) (*Server, string) { @@ -40,6 +41,12 @@ func newTestServer(t *testing.T) (*Server, string) { if err := os.MkdirAll(contextDir, 0o755); err != nil { t.Fatalf("mkdir: %v", err) } + // Tools dispatched through the MCP server call rc.ContextDir() + // for paths under .context/; declare it so they resolve without + // the "context directory not declared" error. + t.Setenv("CTX_DIR", contextDir) + rc.Reset() + t.Cleanup(rc.Reset) files := map[string]string{ ctx.Constitution: "# Constitution\n\n- Rule 1: Never break things\n", ctx.Task: "# Tasks\n\n- [ ] Build MCP server\n- [ ] Write tests\n", diff --git a/internal/memory/promote_test.go b/internal/memory/promote_test.go index 3d92dc897..8f61eb587 100644 --- a/internal/memory/promote_test.go +++ b/internal/memory/promote_test.go @@ -15,7 +15,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/entry" - "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) // setupContextDir creates a minimal .context/ for promotion tests. @@ -24,7 +24,7 @@ func setupContextDir(t *testing.T) (string, func()) { workDir := t.TempDir() origDir, _ := os.Getwd() _ = os.Chdir(workDir) - rc.Reset() + testctx.Declare(t, workDir) contextDir := filepath.Join(workDir, dir.Context) if mkErr := os.MkdirAll(contextDir, 0o755); mkErr != nil { diff --git a/internal/notify/notify.go b/internal/notify/notify.go index 62c8ea4a7..b350a8b47 100644 --- a/internal/notify/notify.go +++ b/internal/notify/notify.go @@ -27,15 +27,32 @@ import ( // LoadWebhook reads and decrypts the webhook URL from .context/.notify.enc. // -// Returns ("", nil) if either the key file or encrypted file is missing -// (silent noop: webhook not configured). +// Returns ("", nil) when: +// - the key file is missing (key was never generated), +// - the encrypted file is missing (webhook never configured). +// +// Any resolver or I/O failure is propagated (including +// [errCtx.ErrDirNotDeclared]) so callers can distinguish +// "no context dir" from "no webhook configured" rather than +// being forced to treat them identically. [Send] treats any error +// as "no webhook, silently skip"; interactive callers (e.g. +// `ctx notify test`) can use [errors.Is] to surface a clearer +// message when the project is not set up yet. // // Returns: // - string: the decrypted webhook URL, or "" if not configured -// - error: non-nil only if decryption fails (missing files are silent) +// - error: non-nil on any resolver failure or decryption failure; +// missing key / encrypted file are silent func LoadWebhook() (string, error) { - kp := rc.KeyPath() - encPath := filepath.Join(rc.ContextDir(), cfgCrypto.NotifyEnc) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + return "", kpErr + } + ctxDir, pathErr := rc.ContextDir() + if pathErr != nil { + return "", pathErr + } + encPath := filepath.Join(ctxDir, cfgCrypto.NotifyEnc) key, loadErr := crypto.LoadKey(kp) if loadErr != nil { @@ -71,8 +88,15 @@ func LoadWebhook() (string, error) { // Returns: // - error: non-nil if key generation, encryption, or file write fails func SaveWebhook(url string) error { - kp := rc.KeyPath() - encPath := filepath.Join(rc.ContextDir(), cfgCrypto.NotifyEnc) + kp, kpErr := rc.KeyPath() + if kpErr != nil { + return kpErr + } + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return ctxErr + } + encPath := filepath.Join(ctxDir, cfgCrypto.NotifyEnc) key, loadErr := crypto.LoadKey(kp) if loadErr != nil { @@ -152,14 +176,9 @@ func Send(event, message, sessionID string, detail *entity.TemplateRef) error { logWarn.Warn(cfgWarn.Getwd, cwdErr) } - payload := entity.NotifyPayload{ - Event: event, - Message: message, - Detail: detail, - SessionID: sessionID, - Timestamp: time.Now().UTC().Format(time.RFC3339), - Project: projectName, - } + payload := entity.NewNotifyPayload( + event, message, sessionID, projectName, detail, + ) body, marshalErr := json.Marshal(payload) if marshalErr != nil { diff --git a/internal/notify/notify_test.go b/internal/notify/notify_test.go index ab2ad11b5..1738129c3 100644 --- a/internal/notify/notify_test.go +++ b/internal/notify/notify_test.go @@ -17,6 +17,7 @@ import ( "github.com/ActiveMemory/ctx/internal/config/crypto" "github.com/ActiveMemory/ctx/internal/entity" "github.com/ActiveMemory/ctx/internal/rc" + "github.com/ActiveMemory/ctx/internal/testutil/testctx" ) func setupTestDir(t *testing.T) (string, func()) { @@ -26,8 +27,7 @@ func setupTestDir(t *testing.T) (string, func()) { _ = os.Chdir(tempDir) _ = os.MkdirAll(filepath.Join(tempDir, ".context"), 0o750) - // Point rc to this temp dir's .context - rc.Reset() + testctx.Declare(t, tempDir) return tempDir, func() { _ = os.Chdir(origDir) diff --git a/internal/rc/candidates.go b/internal/rc/candidates.go new file mode 100644 index 000000000..983f4f048 --- /dev/null +++ b/internal/rc/candidates.go @@ -0,0 +1,50 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package rc + +import ( + "os" + "path/filepath" + + "github.com/ActiveMemory/ctx/internal/config/dir" +) + +// ScanCandidates walks upward from start collecting every directory +// whose basename matches the canonical context directory name +// (`.context`). The scan is read-only: it does not resolve, bind, or +// select a context directory. It exists so error messages and the +// `ctx activate` subcommand can share the same candidate enumeration +// without reintroducing walk-up resolution elsewhere. +// +// The scan always uses the canonical `.context` basename, independent +// of any `.ctxrc` configuration. Under the explicit-declaration model, +// a custom name is only ever reached via an explicit --context-dir or +// CTX_DIR, so a rename-aware scan would be surplus machinery. +// +// Parameters: +// - start: directory to begin the upward walk from; typically the +// current working directory returned by os.Getwd. +// +// Returns: +// - []string: absolute paths of every matching directory found, +// ordered innermost-first (closest to start first). Empty when +// no candidates are visible on the upward path. +func ScanCandidates(start string) []string { + var out []string + cur := start + for { + path := filepath.Join(cur, dir.Context) + if info, err := os.Stat(path); err == nil && info.IsDir() { + out = append(out, path) + } + parent := filepath.Dir(cur) + if parent == cur { + return out + } + cur = parent + } +} diff --git a/internal/rc/doc.go b/internal/rc/doc.go index f7433e40d..42f683a91 100644 --- a/internal/rc/doc.go +++ b/internal/rc/doc.go @@ -4,61 +4,52 @@ // \ Copyright 2026-present Context contributors. // SPDX-License-Identifier: Apache-2.0 -// Package rc loads, caches, and exposes the runtime -// configuration every other ctx package depends on. -// It is the single source of truth for context -// directory location, token budget, encryption -// settings, and the dozens of other knobs that shape -// ctx behavior. +// Package rc loads, caches, and exposes the runtime configuration +// every other ctx package depends on. It is the single source of +// truth for context directory location, token budget, encryption +// settings, and the dozens of other knobs that shape ctx behavior. // -// # Configuration Sources (Resolution Order) +// # Context-Directory Resolution (explicit-only) // -// 1. CLI overrides: set via ctx --context-dir -// (highest priority, stored in rcOverrideDir). -// 2. Environment variables: CTX_DIR, -// CTX_TOKEN_BUDGET override .ctxrc fields. -// 3. .ctxrc (YAML): read once at process start -// by [load]. Parse errors are logged via -// [internal/write/rc.ParseWarning] and defaults -// are kept; a malformed .ctxrc never aborts ctx. -// 4. Defaults: every field has a hardcoded default -// in [Default] (8000 token budget, 7-day archive, -// 200k context window, etc.). +// Under the explicit-context-dir model +// (spec: specs/explicit-context-dir.md), rc does NOT walk the +// filesystem looking for a .context/ directory. Every non-exempt +// command must declare the target explicitly. // -// The result is the singleton [CtxRC] returned by -// [RC], memoized via sync.Once so YAML is parsed at -// most once per process. +// [ContextDir] returns the declared path or the empty string: // -// # Context-Directory Resolution +// 1. CLI override set via [OverrideContextDir] (--context-dir +// flag) wins if present. +// 2. CTX_DIR environment variable is consulted next. +// 3. Otherwise the empty string is returned. Exempt callers +// (ctx init, activate, deactivate, system bootstrap) handle +// empty themselves; every other command should call +// [RequireContextDir] instead, which returns a tailored error +// whose message depends on how many .context/ candidates are +// visible from CWD. // -// [ContextDir] resolves the .context/ path: +// [ScanCandidates] is a read-only upward scan used by the +// `ctx activate` subcommand and by [RequireContextDir]'s error +// formatter. It does not resolve, bind, or select a directory. // -// 1. CLI override (rcOverrideDir): return absolute. -// 2. Configured absolute path: return as-is. -// 3. Upward walk from CWD ([walkForContextDir]): -// find the first ancestor containing a matching -// directory, bounded by the git root. -// 4. Fallback: filepath.Join(cwd, name) so that -// ctx init can create a fresh .context/. +// # Configuration File (.ctxrc) // -// # Key Accessors +// Once [ContextDir] is declared, [load] reads `.ctxrc` from +// `filepath.Dir(ContextDir())`: the project root, which by contract +// is the parent of [ContextDir]. CWD has no say. When no context +// directory is declared, `.ctxrc` is not read at all and defaults +// apply. // -// - [TokenBudget], [ContextWindow]: budgets -// - [AutoArchive], [ArchiveAfterDays]: lifecycle -// - [ScratchpadEncrypt], [KeyPath], -// [KeyRotationDays]: encryption -// - [ClassifyRules], [SpecSignalWords]: memory -// - [HooksEnabled], [HooksDir], [HookTimeout] -- -// hook system -// - [SteeringDir]: steering layer -// - [Tool], [ActiveProfile]: tool and profile -// - [Validate]: strict YAML validation with -// unknown-field warnings +// Environment overrides (CTX_TOKEN_BUDGET) are applied after the +// YAML merge so users can tune per-session without editing the +// file. +// +// The singleton [CtxRC] returned by [RC] is memoized via +// sync.Once so YAML is parsed at most once per process. // // # Concurrency // -// [RC] serializes initialization through rcOnce. -// Read accessors hold an RLock; the only writer is -// the test-only [Reset]. CLI override mutation goes -// through a brief Lock(). +// [RC] serializes initialization through rcOnce. Read accessors +// hold an RLock; the only writer is the test-only [Reset]. CLI +// override mutation goes through a brief Lock(). package rc diff --git a/internal/rc/load.go b/internal/rc/load.go index c9f47efa8..801af3ad7 100644 --- a/internal/rc/load.go +++ b/internal/rc/load.go @@ -7,37 +7,85 @@ package rc import ( + "errors" "os" + "path/filepath" "strconv" "gopkg.in/yaml.v3" "github.com/ActiveMemory/ctx/internal/config/env" "github.com/ActiveMemory/ctx/internal/config/file" + cfgWarn "github.com/ActiveMemory/ctx/internal/config/warn" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" ctxIo "github.com/ActiveMemory/ctx/internal/io" + logWarn "github.com/ActiveMemory/ctx/internal/log/warn" writeRC "github.com/ActiveMemory/ctx/internal/write/rc" ) -// load loads configuration from the .ctxrc file and applies env -// overrides. +// load builds the runtime configuration under the +// single-source-anchor model +// (spec: specs/single-source-context-anchor.md). +// +// Lookup rules: +// +// - When a context directory has been declared via CTX_DIR, +// `.ctxrc` is read from +// `filepath.Dir(ContextDir()) + "/.ctxrc"`: the project root, +// which by contract is the parent of [ContextDir]. CWD has no +// say. This is the "configuration belongs to the project root" +// rule. +// - When no context directory is declared, `.ctxrc` is not read +// at all: there is no project to configure. Defaults apply. +// - Environment overrides (CTX_TOKEN_BUDGET) are applied after the +// YAML merge so users can tune per-session without editing the +// file. // // Returns: -// - *CtxRC: Configuration with file values and env overrides applied +// - *CtxRC: Configuration with file values (when .ctxrc is +// readable) and environment overrides applied. func load() *CtxRC { cfg := Default() - // Try to load .ctxrc from the current directory - data, readErr := ctxIo.SafeReadUserFile(file.CtxRC) - if readErr == nil { - if yamlErr := yaml.Unmarshal(data, cfg); yamlErr != nil { - writeRC.ParseWarning(file.CtxRC, yamlErr) + rcPath, pathErr := ctxrcPath() + switch { + case pathErr == nil: + data, readErr := ctxIo.SafeReadUserFile(rcPath) + if readErr == nil { + if yamlErr := yaml.Unmarshal(data, cfg); yamlErr != nil { + writeRC.ParseWarning(rcPath, yamlErr) + } } + case errors.Is(pathErr, errCtx.ErrDirNotDeclared): + // CTX_DIR not declared. **Expected** for exempt commands + // (ctx init, activate, deactivate, doctor, version, + // hub *, etc.) that legitimately call accessors before + // any project exists; defaults are the right answer for + // them. **Unexpected** for operating commands, which + // should have been gated by [bootstrap/cmd.go]'s + // PersistentPreRunE call to RequireContextDir before + // reaching any RC accessor. + // + // If an operating command ever slips past that gate, this + // branch would silently hand back default config + // (token_budget = 8000, auto_archive = true, etc.) and + // the user's .ctxrc settings would be invisibly ignored. + // Emit a stderr breadcrumb so the silence is visible: + // loud enough to surface during a missed-gate regression + // in dev / CI, quiet enough to ignore in legitimate + // exempt flows. Defaults still apply so the command can + // keep running. + logWarn.Warn(cfgWarn.RCNoContextDir) + default: + // Unexpected resolver failure (relative path, + // non-canonical basename, etc.). Surface loudly rather + // than swallowing; defaults still apply so commands that + // do not require a project can still boot. Same noisy-TUI + // principle documented on resolve.DirLine / + // resolve.AppendDir. + logWarn.Warn(cfgWarn.ContextDirResolve, pathErr) } - // Apply environment variable overrides - if envDir := os.Getenv(env.CtxDir); envDir != "" { - cfg.ContextDir = envDir - } if envBudget := os.Getenv(env.CtxTokenBudget); envBudget != "" { budget, parseErr := strconv.Atoi(envBudget) if parseErr == nil && budget > 0 { @@ -47,3 +95,20 @@ func load() *CtxRC { return cfg } + +// ctxrcPath returns the absolute path to the `.ctxrc` file adjacent +// to the declared context directory. +// +// Returns: +// - string: Absolute path to .ctxrc on success; "" on error. +// - error: errCtx.ErrDirNotDeclared when no context directory has +// been declared; any other resolver error from ContextDir is +// propagated unchanged so the caller decides policy rather than +// this helper silently returning an empty path. +func ctxrcPath() (string, error) { + ctxDir, err := ContextDir() + if err != nil { + return "", err + } + return filepath.Join(filepath.Dir(ctxDir), file.CtxRC), nil +} diff --git a/internal/rc/lock.go b/internal/rc/lock.go index c219675c9..84870c2a5 100644 --- a/internal/rc/lock.go +++ b/internal/rc/lock.go @@ -8,15 +8,13 @@ package rc import "sync" -// rc, rcOnce, rcOverrideDir, and rcMu hold the singleton runtime -// configuration loaded once from .ctxrc via sync.Once. +// rc, rcOnce, and rcMu hold the singleton runtime configuration +// loaded once from .ctxrc via sync.Once. var ( // rc holds the singleton runtime configuration. rc *CtxRC // rcOnce guards one-time configuration loading. rcOnce sync.Once - // rcOverrideDir overrides the config search directory. - rcOverrideDir string - // rcMu protects concurrent access to rc and rcOverrideDir. + // rcMu protects concurrent access to rc. rcMu sync.RWMutex ) diff --git a/internal/rc/rc.go b/internal/rc/rc.go index c29b6c799..7d0052a7f 100644 --- a/internal/rc/rc.go +++ b/internal/rc/rc.go @@ -7,15 +7,18 @@ package rc import ( + "os" "path/filepath" "sync" "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" cfgEntry "github.com/ActiveMemory/ctx/internal/config/entry" + "github.com/ActiveMemory/ctx/internal/config/env" cfgMemory "github.com/ActiveMemory/ctx/internal/config/memory" "github.com/ActiveMemory/ctx/internal/config/parser" "github.com/ActiveMemory/ctx/internal/crypto" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" ) // Default returns a new CtxRC with hardcoded default values. @@ -25,7 +28,6 @@ import ( // (8000 token budget, 7-day archive, etc.) func Default() *CtxRC { return &CtxRC{ - ContextDir: dir.Context, TokenBudget: DefaultTokenBudget, PriorityOrder: nil, // nil means use config.ReadOrder AutoArchive: true, @@ -40,10 +42,16 @@ func Default() *CtxRC { } } -// RC returns the loaded configuration, initializing it on the first call. +// RC returns the loaded configuration, initializing it on the first +// call. // -// It loads from .ctxrc if present, then applies environment overrides. -// The result is cached for subsequent calls. +// Under the single-source-anchor resolution model +// (spec: specs/single-source-context-anchor.md), `.ctxrc` is read +// from `filepath.Dir(ContextDir())/.ctxrc`: the project root, which +// by contract is the parent of [ContextDir]. CWD has no say. When +// no context directory is declared, `.ctxrc` is not read and +// defaults apply. Environment overrides (CTX_TOKEN_BUDGET) are +// applied afterward. The result is cached for subsequent calls. // // Returns: // - *CtxRC: The loaded and cached configuration @@ -54,34 +62,59 @@ func RC() *CtxRC { return rc } -// ContextDir returns the configured context directory as an absolute path. -// -// Resolution order: -// 1. CLI override (rcOverrideDir): returned as absolute, no walk. -// 2. Configured absolute path (.ctxrc or env var): returned as-is. -// 3. Upward walk from CWD: the first ancestor containing an existing -// directory whose basename matches the configured name wins. -// 4. Fallback: filepath.Join(cwd, configuredName) as absolute. Preserves -// ctx init's ability to create a new context directory at CWD. -// -// The walk allows commands and hooks invoked from project subdirectories -// to resolve the project-root context dir instead of creating stray state -// files inside the subdirectory. The walk result is cached for the life -// of the process; tests can call Reset to invalidate the cache. -// -// Returns: -// - string: Absolute path to the context directory -func ContextDir() string { - rcMu.RLock() - override := rcOverrideDir - rcMu.RUnlock() - if override != "" { - if abs, err := filepath.Abs(override); err == nil { - return abs - } - return override +// ContextDir returns the context directory as a cleaned absolute +// path after validating its declaration *shape*. +// +// This is the **declaration shape** validator: it observes [env.CtxDir] +// and checks the value is set, absolute, and canonically named. It +// performs **no filesystem syscalls**. Diagnostic callers that must +// describe declared state without erroring on broken state (for +// example, the `check-anchor-drift` hook) use this directly. +// +// Operating callers that need a usable directory should call +// [RequireContextDir] instead; it adds the boundary stat/IsDir +// checks. Mixing the two is a convention violation: an operating +// caller getting a shape-valid but non-existent path here would +// surface as a confusing downstream error +// (`open .../TASKS.md: no such file or directory`) instead of the +// friendly tailored not-found error from [RequireContextDir]. +// +// Rejection conditions, in order: +// +// 1. Unset/empty: [errCtx.ErrDirNotDeclared]. +// 2. Relative path (not [filepath.IsAbs]): +// [errCtx.ErrRelativeNotAllowed]. Absolute-only is a hardline: +// `filepath.Abs` *would* absolutize via cwd, exactly the silent +// cwd-dependency this resolver is meant to eliminate. +// 3. Cleaned basename != [dir.Context]: [errCtx.ErrNonCanonicalBasename]. +// Catches the common footgun `export CTX_DIR=$(pwd)` (project +// root instead of the `.context` subdirectory) on first use +// rather than letting init deposit canonical files in the +// project root. +// +// [filepath.Clean] runs unconditionally to normalize separators, +// dot segments, and trailing slashes, but the input itself must be +// absolute. Symlinks are not resolved: the basename guard checks +// the *declared* name, not the symlink target name. +// +// Returns: +// - string: cleaned absolute path when declared and shape-valid; +// "" on error. +// - error: [errCtx.ErrDirNotDeclared] / [errCtx.ErrRelativeNotAllowed] +// / [errCtx.ErrNonCanonicalBasename] depending on what failed. +func ContextDir() (string, error) { + raw := os.Getenv(env.CtxDir) + if raw == "" { + return "", errCtx.ErrDirNotDeclared + } + if !filepath.IsAbs(raw) { + return "", errCtx.RelativeNotAllowed(raw) } - return walkForContextDir(RC().ContextDir) + abs := filepath.Clean(raw) + if filepath.Base(abs) != dir.Context { + return "", errCtx.NonCanonicalBasename(filepath.Base(abs)) + } + return abs, nil } // TokenBudget returns the configured default token budget. @@ -218,14 +251,30 @@ func NotifyEvents() []string { // KeyPath returns the resolved encryption key file path. // -// Priority: key_path in .ctxrc (explicit) > project-local +// Under the explicit-context-dir model the caller must have a +// declared context directory. The previous implementation silently +// handed "" to [crypto.ResolveKeyPath] when ContextDir failed, which +// either filepath.Join'd a CWD-relative `.ctx.key` path or fell +// through to the global `~/.ctx/.ctx.key`: exactly the class of +// silent-wrong-location / wrong-key-rotation bug this branch aims +// to eliminate. The error is propagated instead so callers handle +// the absence of a project rather than rotating encryption against +// a surprise key. // -// (.context/.ctx.key) > global (~/.ctx/.ctx.key). +// Within ResolveKeyPath the existing priority still applies: +// key_path in .ctxrc (explicit) > project-local +// (.context/.ctx.key) > global (~/.ctx/.ctx.key). // // Returns: // - string: Resolved path to the encryption key file -func KeyPath() string { - return crypto.ResolveKeyPath(ContextDir(), RC().KeyPathOverride) +// - error: [errCtx.ErrDirNotDeclared] or any other ContextDir +// resolver failure, propagated unchanged +func KeyPath() (string, error) { + ctxDir, err := ContextDir() + if err != nil { + return "", err + } + return crypto.ResolveKeyPath(ctxDir, RC().KeyPathOverride), nil } // KeyRotationDays returns the configured key rotation threshold in days. @@ -479,28 +528,6 @@ func HooksEnabled() bool { return true } -// AllowOutsideCwd returns whether boundary validation should be skipped. -// -// Returns false (default) when the field is not set in .ctxrc. -// -// Returns: -// - bool: True if the context directory is allowed outside the project root -func AllowOutsideCwd() bool { - return RC().AllowOutsideCwd -} - -// OverrideContextDir sets a CLI-provided override for the context directory. -// -// This takes precedence over all other configuration sources. -// -// Parameters: -// - ctxDir: Directory path to use as an override -func OverrideContextDir(ctxDir string) { - rcMu.Lock() - defer rcMu.Unlock() - rcOverrideDir = ctxDir -} - // Reset clears the cached configuration, forcing // reload on the next access. func Reset() { @@ -508,7 +535,6 @@ func Reset() { defer rcMu.Unlock() rcOnce = sync.Once{} rc = nil - rcOverrideDir = "" } // FilePriority returns the priority of a context file. diff --git a/internal/rc/rc_test.go b/internal/rc/rc_test.go index d91a5fc71..15fa567dd 100644 --- a/internal/rc/rc_test.go +++ b/internal/rc/rc_test.go @@ -7,6 +7,7 @@ package rc import ( + "errors" "os" "path/filepath" "testing" @@ -14,14 +15,44 @@ import ( "github.com/ActiveMemory/ctx/internal/config/ctx" "github.com/ActiveMemory/ctx/internal/config/dir" "github.com/ActiveMemory/ctx/internal/config/env" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" ) +// declareContext sets up a tempDir layout with a .context/ directory +// and a .ctxrc at the project root (the parent of CTX_DIR), declares +// CTX_DIR via t.Setenv, and resets the rc singleton. The helper +// matches the single-source-anchor resolution model +// (spec: specs/single-source-context-anchor.md): .ctxrc is read from +// filepath.Dir(ContextDir())/.ctxrc, not CWD. +// +// Parameters: +// - t: test handle for Setenv/TempDir/Cleanup wiring. +// - content: YAML body to write into .ctxrc; empty for "no file". +// +// Returns: +// - string: absolute path of the declared .context/ directory. +func declareContext(t *testing.T, content string) string { + t.Helper() + tempDir := t.TempDir() + ctxDir := filepath.Join(tempDir, dir.Context) + if mkErr := os.MkdirAll(ctxDir, 0700); mkErr != nil { + t.Fatalf("mkdir .context: %v", mkErr) + } + if content != "" { + rcPath := filepath.Join(tempDir, ".ctxrc") + if wrErr := os.WriteFile(rcPath, []byte(content), 0600); wrErr != nil { + t.Fatalf("write .ctxrc: %v", wrErr) + } + } + t.Setenv(env.CtxDir, ctxDir) + Reset() + t.Cleanup(Reset) + return ctxDir +} + func TestDefaultRC(t *testing.T) { rc := Default() - if rc.ContextDir != dir.Context { - t.Errorf("ContextDir = %q, want %q", rc.ContextDir, dir.Context) - } if rc.TokenBudget != DefaultTokenBudget { t.Errorf("TokenBudget = %d, want %d", rc.TokenBudget, DefaultTokenBudget) } @@ -39,49 +70,40 @@ func TestDefaultRC(t *testing.T) { } } +// TestGetRC_NoFile: no CTX_DIR declared and no .ctxrc anywhere → +// defaults apply. func TestGetRC_NoFile(t *testing.T) { - // Change to temp directory with no .ctxrc tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() + t.Chdir(tempDir) + // Ensure no env leak from other tests. + t.Setenv(env.CtxDir, "") Reset() + t.Cleanup(Reset) rc := RC() - if rc.ContextDir != dir.Context { - t.Errorf("ContextDir = %q, want %q", rc.ContextDir, dir.Context) - } if rc.TokenBudget != DefaultTokenBudget { t.Errorf("TokenBudget = %d, want %d", rc.TokenBudget, DefaultTokenBudget) } + if !rc.AutoArchive { + t.Error("AutoArchive = false, want true (default)") + } } +// TestGetRC_WithFile: CTX_DIR declared, .ctxrc adjacent → values +// picked up. func TestGetRC_WithFile(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - // Create .ctxrc file - rcContent := `context_dir: custom-context -token_budget: 4000 + declareContext(t, `token_budget: 4000 priority_order: - TASKS.md - DECISIONS.md auto_archive: false archive_after_days: 14 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() +`) rc := RC() - if rc.ContextDir != "custom-context" { - t.Errorf("ContextDir = %q, want %q", rc.ContextDir, "custom-context") - } if rc.TokenBudget != 4000 { t.Errorf("TokenBudget = %d, want %d", rc.TokenBudget, 4000) } @@ -96,97 +118,355 @@ archive_after_days: 14 } } -func TestGetRC_EnvOverrides(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - // Create .ctxrc file - rcContent := `context_dir: file-context -token_budget: 4000 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - // Set environment variables (t.Setenv auto-restores after test) - t.Setenv(env.CtxDir, "env-context") +// TestGetRC_TokenBudgetEnvOverride: CTX_TOKEN_BUDGET beats .ctxrc. +func TestGetRC_TokenBudgetEnvOverride(t *testing.T) { + declareContext(t, `token_budget: 4000`) t.Setenv(env.CtxTokenBudget, "2000") - Reset() rc := RC() - - // Env should override file - if rc.ContextDir != "env-context" { - t.Errorf( - "ContextDir = %q, want %q (env override)", - rc.ContextDir, "env-context", - ) - } if rc.TokenBudget != 2000 { t.Errorf("TokenBudget = %d, want %d (env override)", rc.TokenBudget, 2000) } } -func TestGetContextDir_CLIOverride(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() +// TestContextDir_RejectsUnset: CTX_DIR unset → ErrDirNotDeclared. +func TestContextDir_RejectsUnset(t *testing.T) { + t.Setenv(env.CtxDir, "") + Reset() + t.Cleanup(Reset) - // Create .ctxrc file - rcContent := `context_dir: file-context` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) + got, err := ContextDir() + if !errors.Is(err, errCtx.ErrDirNotDeclared) { + t.Errorf("ContextDir() err = %v, want ErrDirNotDeclared", err) + } + if got != "" { + t.Errorf("ContextDir() = %q, want \"\"", got) + } +} - // Set env override (t.Setenv auto-restores after test) - t.Setenv(env.CtxDir, "env-context") +// TestContextDir_RejectsEmpty: CTX_DIR set to empty string is +// treated as unset. Spec contract: declared-or-not, no +// in-between. +func TestContextDir_RejectsEmpty(t *testing.T) { + t.Setenv(env.CtxDir, "") + Reset() + t.Cleanup(Reset) + + _, err := ContextDir() + if !errors.Is(err, errCtx.ErrDirNotDeclared) { + t.Errorf("ContextDir() err = %v, want ErrDirNotDeclared", err) + } +} +// TestContextDir_RejectsRelative_DotContext: critical regression +// guard against silent cwd-dependency. Without IsAbs check, +// CTX_DIR=.context would be cwd-absolutized via filepath.Abs and +// pass the basename guard, defeating the resolver. +func TestContextDir_RejectsRelative_DotContext(t *testing.T) { + t.Setenv(env.CtxDir, ".context") Reset() + t.Cleanup(Reset) + + got, err := ContextDir() + if !errors.Is(err, errCtx.ErrRelativeNotAllowed) { + t.Errorf("ContextDir() err = %v, want ErrRelativeNotAllowed", err) + } + if got != "" { + t.Errorf("ContextDir() = %q, want \"\"", got) + } +} - // CLI override takes precedence over all - OverrideContextDir("cli-context") - defer Reset() +// TestContextDir_RejectsRelative_DotSlashContext: another shape of +// relative path, same expected error. +func TestContextDir_RejectsRelative_DotSlashContext(t *testing.T) { + t.Setenv(env.CtxDir, "./.context") + Reset() + t.Cleanup(Reset) - got := ContextDir() - // Contract: ContextDir() always returns an absolute path. - // A relative CLI override is resolved against the current working - // directory. - wantAbs, _ := filepath.Abs("cli-context") - if got != wantAbs { - t.Errorf("ContextDir() = %q, want %q (CLI override)", got, wantAbs) + _, err := ContextDir() + if !errors.Is(err, errCtx.ErrRelativeNotAllowed) { + t.Errorf("ContextDir() err = %v, want ErrRelativeNotAllowed", err) } } -func TestGetTokenBudget(t *testing.T) { +// TestContextDir_RejectsRelative_DotDot: dot-dot relative path +// also rejected. +func TestContextDir_RejectsRelative_DotDot(t *testing.T) { + t.Setenv(env.CtxDir, "../foo/.context") + Reset() + t.Cleanup(Reset) + + _, err := ContextDir() + if !errors.Is(err, errCtx.ErrRelativeNotAllowed) { + t.Errorf("ContextDir() err = %v, want ErrRelativeNotAllowed", err) + } +} + +// TestContextDir_RejectsNonCanonicalBasename: catches the common +// `export CTX_DIR=$(pwd)` footgun on first use rather than +// letting init deposit canonical files in the project root. +func TestContextDir_RejectsNonCanonicalBasename(t *testing.T) { + t.Setenv(env.CtxDir, "/tmp/notdotcontext") + Reset() + t.Cleanup(Reset) + + _, err := ContextDir() + if !errors.Is(err, errCtx.ErrNonCanonicalBasename) { + t.Errorf("ContextDir() err = %v, want ErrNonCanonicalBasename", err) + } + if err != nil && !contains(err.Error(), "notdotcontext") { + t.Errorf("err message %q should include offending basename", err.Error()) + } +} + +// TestContextDir_RejectsRoot: filepath.Base("/") returns "/", not +// ".context", so root path is rejected by the basename guard. +func TestContextDir_RejectsRoot(t *testing.T) { + t.Setenv(env.CtxDir, "/") + Reset() + t.Cleanup(Reset) + + _, err := ContextDir() + if !errors.Is(err, errCtx.ErrNonCanonicalBasename) { + t.Errorf("ContextDir() err = %v, want ErrNonCanonicalBasename", err) + } +} + +// TestContextDir_AcceptsCanonical: canonical absolute `.context` +// path is the happy path. +func TestContextDir_AcceptsCanonical(t *testing.T) { + t.Setenv(env.CtxDir, "/tmp/.context") + Reset() + t.Cleanup(Reset) + + got, err := ContextDir() + if err != nil { + t.Fatalf("ContextDir() err = %v, want nil", err) + } + if got != "/tmp/.context" { + t.Errorf("ContextDir() = %q, want %q", got, "/tmp/.context") + } +} + +// TestContextDir_NormalizesTrailingSlash: filepath.Clean strips +// trailing slash; basename guard still passes. +func TestContextDir_NormalizesTrailingSlash(t *testing.T) { + t.Setenv(env.CtxDir, "/tmp/.context/") + Reset() + t.Cleanup(Reset) + + got, err := ContextDir() + if err != nil { + t.Fatalf("ContextDir() err = %v, want nil", err) + } + if got != "/tmp/.context" { + t.Errorf("ContextDir() = %q, want %q", got, "/tmp/.context") + } +} + +// TestContextDir_NormalizesDotSegments: filepath.Clean +// canonicalizes dot segments. +func TestContextDir_NormalizesDotSegments(t *testing.T) { + t.Setenv(env.CtxDir, "/tmp/./.context") + Reset() + t.Cleanup(Reset) + + got, err := ContextDir() + if err != nil { + t.Fatalf("ContextDir() err = %v, want nil", err) + } + if got != "/tmp/.context" { + t.Errorf("ContextDir() = %q, want %q", got, "/tmp/.context") + } +} + +// TestContextDir_AcceptsSymlinkNamedDotContext: a symlink whose +// basename is `.context` (regardless of where it points) passes +// the basename guard. The resolver checks the *declared* name, +// not the symlink target name. +func TestContextDir_AcceptsSymlinkNamedDotContext(t *testing.T) { tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() + target := filepath.Join(tempDir, "actual-target") + if err := os.MkdirAll(target, 0700); err != nil { + t.Fatalf("mkdir target: %v", err) + } + link := filepath.Join(tempDir, dir.Context) + if err := os.Symlink(target, link); err != nil { + t.Skipf("symlink unsupported: %v", err) + } + t.Setenv(env.CtxDir, link) + Reset() + t.Cleanup(Reset) + got, err := ContextDir() + if err != nil { + t.Fatalf("ContextDir() err = %v, want nil", err) + } + if got != link { + t.Errorf("ContextDir() = %q, want %q (declared symlink path)", got, link) + } +} + +// contains is a small helper for substring checks in error +// messages. Avoids pulling strings.Contains everywhere. +func contains(haystack, needle string) bool { + for i := 0; i+len(needle) <= len(haystack); i++ { + if haystack[i:i+len(needle)] == needle { + return true + } + } + return false +} + +// TestContextDir_Unset: no env declaration → errCtx.ErrDirNotDeclared. +// Under the single-source-anchor model, unset is a valid signal used by +// exempt commands and rc.RequireContextDir's error path. +func TestContextDir_Unset(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + t.Setenv(env.CtxDir, "") Reset() + t.Cleanup(Reset) - // Default value - budget := TokenBudget() - if budget != DefaultTokenBudget { - t.Errorf("TokenBudget() = %d, want %d", budget, DefaultTokenBudget) + got, err := ContextDir() + if err == nil { + t.Errorf("ContextDir() err = nil, want errCtx.ErrDirNotDeclared") + } + if got != "" { + t.Errorf("ContextDir() = %q, want \"\" (unset)", got) } } -func TestGetRC_InvalidYAML(t *testing.T) { +// TestContextDir_EnvOnly: CTX_DIR env set with canonical absolute +// `.context` path → resolves to that path. +func TestContextDir_EnvOnly(t *testing.T) { tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() + target := filepath.Join(tempDir, dir.Context) + _ = os.MkdirAll(target, 0700) + t.Setenv(env.CtxDir, target) + Reset() + t.Cleanup(Reset) + + got, err := ContextDir() + if err != nil { + t.Fatalf("ContextDir() err = %v, want nil", err) + } + if !filepath.IsAbs(got) { + t.Errorf("ContextDir() = %q, want absolute path", got) + } + gotResolved, _ := filepath.EvalSymlinks(got) + wantResolved, _ := filepath.EvalSymlinks(target) + if gotResolved != wantResolved { + t.Errorf("ContextDir() = %q, want %q (env)", gotResolved, wantResolved) + } +} - // Create invalid .ctxrc file - _ = os.WriteFile( - filepath.Join(tempDir, ".ctxrc"), - []byte("invalid: [yaml: content"), 0600, - ) +// TestRequireContextDir_Declared: a declared CTX_DIR yields the +// path and no error. +func TestRequireContextDir_Declared(t *testing.T) { + ctxDir := declareContext(t, "") + + got, err := RequireContextDir() + if err != nil { + t.Fatalf("RequireContextDir() err = %v, want nil", err) + } + gotResolved, _ := filepath.EvalSymlinks(got) + wantResolved, _ := filepath.EvalSymlinks(ctxDir) + if gotResolved != wantResolved { + t.Errorf("RequireContextDir() = %q, want %q", gotResolved, wantResolved) + } +} +// TestRequireContextDir_Undeclared: no override, no env → error +// with a tailored, non-empty message. +func TestRequireContextDir_Undeclared(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + t.Setenv(env.CtxDir, "") Reset() + t.Cleanup(Reset) - // Should return defaults on invalid YAML + got, err := RequireContextDir() + if err == nil { + t.Fatalf("RequireContextDir() err = nil, want non-nil") + } + if got != "" { + t.Errorf("RequireContextDir() path = %q, want \"\" on error", got) + } + if msg := err.Error(); msg == "" { + t.Error("RequireContextDir() returned empty error message") + } +} + +// TestScanCandidates_NoMatches: empty tree → empty slice. +func TestScanCandidates_NoMatches(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + + got := ScanCandidates(tempDir) + if len(got) != 0 { + t.Errorf("ScanCandidates() = %v, want []", got) + } +} + +// TestScanCandidates_SelfMatch: .context/ exists at start dir → +// one candidate, same path. +func TestScanCandidates_SelfMatch(t *testing.T) { + tempDir := t.TempDir() + ctxPath := filepath.Join(tempDir, dir.Context) + _ = os.MkdirAll(ctxPath, 0700) + + got := ScanCandidates(tempDir) + if len(got) != 1 { + t.Fatalf("ScanCandidates() len = %d, want 1", len(got)) + } + + wantResolved, _ := filepath.EvalSymlinks(ctxPath) + gotResolved, _ := filepath.EvalSymlinks(got[0]) + if gotResolved != wantResolved { + t.Errorf("ScanCandidates()[0] = %q, want %q", gotResolved, wantResolved) + } +} + +// TestScanCandidates_ManyAncestors: nested .context/ dirs upward +// are all returned, innermost first. +func TestScanCandidates_ManyAncestors(t *testing.T) { + tempDir := t.TempDir() + inner := filepath.Join(tempDir, "inner", "deep") + innerCtx := filepath.Join(tempDir, "inner", dir.Context) + outerCtx := filepath.Join(tempDir, dir.Context) + + for _, d := range []string{inner, innerCtx, outerCtx} { + if mkErr := os.MkdirAll(d, 0700); mkErr != nil { + t.Fatalf("mkdir %s: %v", d, mkErr) + } + } + + got := ScanCandidates(inner) + if len(got) < 2 { + t.Fatalf("ScanCandidates() len = %d, want >= 2", len(got)) + } + + // Innermost first: the first candidate must be in the parent of + // the start dir (i.e., inner/.context). + innerResolved, _ := filepath.EvalSymlinks(innerCtx) + gotInner, _ := filepath.EvalSymlinks(got[0]) + if gotInner != innerResolved { + t.Errorf("ScanCandidates()[0] = %q, want %q (innermost)", gotInner, innerResolved) + } +} + +func TestGetTokenBudget(t *testing.T) { + declareContext(t, "") + budget := TokenBudget() + if budget != DefaultTokenBudget { + t.Errorf("TokenBudget() = %d, want %d", budget, DefaultTokenBudget) + } +} + +func TestGetRC_InvalidYAML(t *testing.T) { + declareContext(t, "invalid: [yaml: content") rc := RC() if rc.TokenBudget != DefaultTokenBudget { t.Errorf( @@ -197,40 +477,21 @@ func TestGetRC_InvalidYAML(t *testing.T) { } func TestGetRC_PartialConfig(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - // Create .ctxrc with only some fields - rcContent := `token_budget: 5000` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - + declareContext(t, `token_budget: 5000`) rc := RC() - - // Specified value should be used if rc.TokenBudget != 5000 { t.Errorf("TokenBudget = %d, want %d", rc.TokenBudget, 5000) } - // Unspecified values should use defaults - if rc.ContextDir != dir.Context { - t.Errorf("ContextDir = %q, want %q (default)", rc.ContextDir, dir.Context) + if rc.ArchiveAfterDays != DefaultArchiveAfterDays { + t.Errorf("ArchiveAfterDays = %d, want default", rc.ArchiveAfterDays) } } func TestGetRC_InvalidEnvBudget(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - + declareContext(t, "") t.Setenv(env.CtxTokenBudget, "not-a-number") - Reset() - // Invalid env should be ignored, use default rc := RC() if rc.TokenBudget != DefaultTokenBudget { t.Errorf( @@ -240,51 +501,42 @@ func TestGetRC_InvalidEnvBudget(t *testing.T) { } } -func TestGetRC_Singleton(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - +func TestGetRC_NegativeEnvBudget(t *testing.T) { + declareContext(t, "") + t.Setenv(env.CtxTokenBudget, "-100") Reset() + rc := RC() + if rc.TokenBudget != DefaultTokenBudget { + t.Errorf( + "TokenBudget = %d, want %d (default on negative env)", + rc.TokenBudget, DefaultTokenBudget, + ) + } +} + +func TestGetRC_Singleton(t *testing.T) { + declareContext(t, "") rc1 := RC() rc2 := RC() - if rc1 != rc2 { t.Error("RC() should return same instance") } } func TestPriorityOrder(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default has nil PriorityOrder - order := PriorityOrder() - if order != nil { + declareContext(t, "") + if order := PriorityOrder(); order != nil { t.Errorf("PriorityOrder() = %v, want nil", order) } } func TestPriorityOrder_Custom(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `priority_order: + declareContext(t, `priority_order: - TASKS.md - DECISIONS.md - LEARNINGS.md -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() +`) order := PriorityOrder() if len(order) != 3 { @@ -296,43 +548,21 @@ func TestPriorityOrder_Custom(t *testing.T) { } func TestAutoArchive(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default is true + declareContext(t, "") if !AutoArchive() { t.Error("AutoArchive() = false, want true") } } func TestAutoArchive_Disabled(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `auto_archive: false` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - + declareContext(t, `auto_archive: false`) if AutoArchive() { t.Error("AutoArchive() = true, want false") } } func TestArchiveAfterDays(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - + declareContext(t, "") days := ArchiveAfterDays() if days != DefaultArchiveAfterDays { t.Errorf("ArchiveAfterDays() = %d, want %d", days, DefaultArchiveAfterDays) @@ -340,16 +570,7 @@ func TestArchiveAfterDays(t *testing.T) { } func TestArchiveAfterDays_Custom(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `archive_after_days: 30` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - + declareContext(t, `archive_after_days: 30`) days := ArchiveAfterDays() if days != 30 { t.Errorf("ArchiveAfterDays() = %d, want %d", days, 30) @@ -357,196 +578,70 @@ func TestArchiveAfterDays_Custom(t *testing.T) { } func TestScratchpadEncrypt_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default (nil pointer) should return true + declareContext(t, "") if !ScratchpadEncrypt() { t.Error("ScratchpadEncrypt() = false, want true (default)") } } func TestScratchpadEncrypt_Explicit(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `scratchpad_encrypt: false` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - + declareContext(t, `scratchpad_encrypt: false`) if ScratchpadEncrypt() { t.Error("ScratchpadEncrypt() = true, want false") } } func TestScratchpadEncrypt_ExplicitTrue(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `scratchpad_encrypt: true` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - + declareContext(t, `scratchpad_encrypt: true`) if !ScratchpadEncrypt() { t.Error("ScratchpadEncrypt() = false, want true") } } func TestFilePriority_DefaultOrder(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() + declareContext(t, "") - Reset() - - // CONSTITUTION.md should be first in default ReadOrder - p := FilePriority(ctx.Constitution) - if p != 1 { + if p := FilePriority(ctx.Constitution); p != 1 { t.Errorf("FilePriority(%q) = %d, want 1", ctx.Constitution, p) } - - // TASKS.md should be second - p = FilePriority(ctx.Task) - if p != 2 { + if p := FilePriority(ctx.Task); p != 2 { t.Errorf("FilePriority(%q) = %d, want 2", ctx.Task, p) } - - // Unknown file gets 100 - p = FilePriority("UNKNOWN.md") - if p != 100 { + if p := FilePriority("UNKNOWN.md"); p != 100 { t.Errorf("FilePriority(%q) = %d, want 100", "UNKNOWN.md", p) } } func TestFilePriority_CustomOrder(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `priority_order: + declareContext(t, `priority_order: - DECISIONS.md - TASKS.md -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() +`) - // DECISIONS.md should be first in custom order - p := FilePriority(ctx.Decision) - if p != 1 { + if p := FilePriority(ctx.Decision); p != 1 { t.Errorf("FilePriority(%q) = %d, want 1", ctx.Decision, p) } - - // TASKS.md should be second - p = FilePriority(ctx.Task) - if p != 2 { + if p := FilePriority(ctx.Task); p != 2 { t.Errorf("FilePriority(%q) = %d, want 2", ctx.Task, p) } - - // File not in custom order gets 100 - p = FilePriority("UNKNOWN.md") - if p != 100 { + if p := FilePriority("UNKNOWN.md"); p != 100 { t.Errorf("FilePriority(%q) = %d, want 100", "UNKNOWN.md", p) } } -func TestContextDir_NoOverride(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - got := ContextDir() - - // Contract: when no .context/ exists upward, ContextDir() falls - // back to filepath.Join(cwd, dir.Context) as an absolute path. - wantResolved, _ := filepath.EvalSymlinks(tempDir) - gotParent, _ := filepath.EvalSymlinks(filepath.Dir(got)) - - if gotParent != wantResolved { - t.Errorf("ContextDir() parent = %q, want %q", gotParent, wantResolved) - } - if filepath.Base(got) != dir.Context { - t.Errorf( - "ContextDir() base = %q, want %q", - filepath.Base(got), dir.Context, - ) - } -} - -func TestAllowOutsideCwd_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default is false - if AllowOutsideCwd() { - t.Error("AllowOutsideCwd() = true, want false (default)") - } -} - -func TestAllowOutsideCwd_Enabled(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `allow_outside_cwd: true` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - if !AllowOutsideCwd() { - t.Error("AllowOutsideCwd() = false, want true") - } -} - func TestNotifyEvents_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default (nil Notify) returns nil - events := NotifyEvents() - if events != nil { + declareContext(t, "") + if events := NotifyEvents(); events != nil { t.Errorf("NotifyEvents() = %v, want nil", events) } } func TestNotifyEvents_Configured(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `notify: + declareContext(t, `notify: events: - loop - nudge -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() +`) events := NotifyEvents() if len(events) != 2 || events[0] != "loop" || events[1] != "nudge" { @@ -555,72 +650,35 @@ func TestNotifyEvents_Configured(t *testing.T) { } func TestKeyRotationDays_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - days := KeyRotationDays() - if days != DefaultKeyRotationDays { + declareContext(t, "") + if days := KeyRotationDays(); days != DefaultKeyRotationDays { t.Errorf("KeyRotationDays() = %d, want %d", days, DefaultKeyRotationDays) } } func TestKeyRotationDays_Custom(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `key_rotation_days: 30 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - days := KeyRotationDays() - if days != 30 { + declareContext(t, `key_rotation_days: 30 +`) + if days := KeyRotationDays(); days != 30 { t.Errorf("KeyRotationDays() = %d, want %d", days, 30) } } func TestKeyRotationDays_LegacyNotify(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `notify: + declareContext(t, `notify: key_rotation_days: 45 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - days := KeyRotationDays() - if days != 45 { +`) + if days := KeyRotationDays(); days != 45 { t.Errorf("KeyRotationDays() = %d, want %d (legacy notify fallback)", days, 45) } } func TestKeyRotationDays_TopLevelTakesPrecedence(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `key_rotation_days: 60 + declareContext(t, `key_rotation_days: 60 notify: key_rotation_days: 45 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - days := KeyRotationDays() - if days != 60 { +`) + if days := KeyRotationDays(); days != 60 { t.Errorf( "KeyRotationDays() = %d, want %d (top-level takes precedence)", days, 60, @@ -629,13 +687,7 @@ notify: } func TestSessionPrefixes_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - + declareContext(t, "") prefixes := SessionPrefixes() if len(prefixes) != 1 || prefixes[0] != "Session:" { t.Errorf("SessionPrefixes() = %v, want [Session:]", prefixes) @@ -643,44 +695,22 @@ func TestSessionPrefixes_Default(t *testing.T) { } func TestSessionPrefixes_Custom(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := "session_prefixes:\n" + - " - \"Session:\"\n" + - " - \"セッション:\"\n" + - " - \"Sesión:\"\n" - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() + declareContext(t, "session_prefixes:\n"+ + " - \"Session:\"\n"+ + " - \"セッション:\"\n"+ + " - \"Sesión:\"\n") prefixes := SessionPrefixes() if len(prefixes) != 3 { t.Fatalf("SessionPrefixes() len = %d, want 3", len(prefixes)) } - if prefixes[0] != "Session:" { - t.Errorf("SessionPrefixes()[0] = %q, want %q", prefixes[0], "Session:") - } - if prefixes[1] != "セッション:" { - t.Errorf("SessionPrefixes()[1] = %q, want %q", prefixes[1], "セッション:") - } - if prefixes[2] != "Sesión:" { - t.Errorf("SessionPrefixes()[2] = %q, want %q", prefixes[2], "Sesión:") + if prefixes[0] != "Session:" || prefixes[1] != "セッション:" || prefixes[2] != "Sesión:" { + t.Errorf("SessionPrefixes() = %v", prefixes) } } func TestSessionPrefixes_EmptyFallsBackToDefault(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := "session_prefixes: []\n" - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() + declareContext(t, "session_prefixes: []\n") prefixes := SessionPrefixes() if len(prefixes) != 1 || prefixes[0] != "Session:" { @@ -691,270 +721,80 @@ func TestSessionPrefixes_EmptyFallsBackToDefault(t *testing.T) { } } -func TestGetRC_NegativeEnvBudget(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - t.Setenv(env.CtxTokenBudget, "-100") - - Reset() - - // Negative budget should be ignored (budget > 0 check) - rc := RC() - if rc.TokenBudget != DefaultTokenBudget { - t.Errorf( - "TokenBudget = %d, want %d (default on negative env)", - rc.TokenBudget, DefaultTokenBudget, - ) - } -} - -// --- Hooks & Steering RC field tests --- -// Validates: Requirements 19.8 - func TestTool_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default is empty string when not configured - tool := Tool() - if tool != "" { + declareContext(t, "") + if tool := Tool(); tool != "" { t.Errorf("Tool() = %q, want %q", tool, "") } } func TestTool_Configured(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `tool: kiro` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - tool := Tool() - if tool != "kiro" { + declareContext(t, `tool: kiro`) + if tool := Tool(); tool != "kiro" { t.Errorf("Tool() = %q, want %q", tool, "kiro") } } func TestSteeringDir_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - dir := SteeringDir() - if dir != DefaultSteeringDir { - t.Errorf("SteeringDir() = %q, want %q", dir, DefaultSteeringDir) + declareContext(t, "") + if d := SteeringDir(); d != DefaultSteeringDir { + t.Errorf("SteeringDir() = %q, want %q", d, DefaultSteeringDir) } } func TestSteeringDir_Configured(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `steering: + declareContext(t, `steering: dir: custom/steering -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - dir := SteeringDir() - if dir != "custom/steering" { - t.Errorf("SteeringDir() = %q, want %q", dir, "custom/steering") +`) + if d := SteeringDir(); d != "custom/steering" { + t.Errorf("SteeringDir() = %q, want %q", d, "custom/steering") } } func TestHooksDir_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - dir := HooksDir() - if dir != DefaultHooksDir { - t.Errorf("HooksDir() = %q, want %q", dir, DefaultHooksDir) + declareContext(t, "") + if d := HooksDir(); d != DefaultHooksDir { + t.Errorf("HooksDir() = %q, want %q", d, DefaultHooksDir) } } func TestHooksDir_Configured(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `hooks: + declareContext(t, `hooks: dir: custom/hooks -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - dir := HooksDir() - if dir != "custom/hooks" { - t.Errorf("HooksDir() = %q, want %q", dir, "custom/hooks") +`) + if d := HooksDir(); d != "custom/hooks" { + t.Errorf("HooksDir() = %q, want %q", d, "custom/hooks") } } func TestHookTimeout_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - timeout := HookTimeout() - if timeout != DefaultHookTimeout { + declareContext(t, "") + if timeout := HookTimeout(); timeout != DefaultHookTimeout { t.Errorf("HookTimeout() = %d, want %d", timeout, DefaultHookTimeout) } } func TestHookTimeout_Configured(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `hooks: + declareContext(t, `hooks: timeout: 30 -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - - timeout := HookTimeout() - if timeout != 30 { +`) + if timeout := HookTimeout(); timeout != 30 { t.Errorf("HookTimeout() = %d, want %d", timeout, 30) } } func TestHooksEnabled_Default(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - // Default (nil Hooks pointer) should return true + declareContext(t, "") if !HooksEnabled() { t.Error("HooksEnabled() = false, want true (default)") } } func TestHooksEnabled_ExplicitFalse(t *testing.T) { - tempDir := t.TempDir() - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - rcContent := `hooks: + declareContext(t, `hooks: enabled: false -` - _ = os.WriteFile(filepath.Join(tempDir, ".ctxrc"), []byte(rcContent), 0600) - - Reset() - +`) if HooksEnabled() { t.Error("HooksEnabled() = true, want false") } } - -func TestContextDir_UpwardWalkFromSubdir(t *testing.T) { - tempDir := t.TempDir() - - // Project root layout: - // /project/.git/ - // /project/.context/ - // /project/deep/nested/ - projectRoot := filepath.Join(tempDir, "project") - gitPath := filepath.Join(projectRoot, ".git") - contextPath := filepath.Join(projectRoot, dir.Context) - deepSubdir := filepath.Join(projectRoot, "deep", "nested") - - for _, d := range []string{gitPath, contextPath, deepSubdir} { - if mkErr := os.MkdirAll(d, 0700); mkErr != nil { - t.Fatalf("mkdir %s: %v", d, mkErr) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(deepSubdir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - got := ContextDir() - - // Resolve symlinks so /tmp vs /private/tmp on macOS compares equal. - wantResolved, _ := filepath.EvalSymlinks(contextPath) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf( - "ContextDir() from subdir = %q, want %q", - gotResolved, wantResolved, - ) - } - - // Explicit regression guard: the returned path must NOT be the - // stray-dir fallback that the bug would have produced. - strayPath := filepath.Join(deepSubdir, dir.Context) - strayResolved, _ := filepath.EvalSymlinks(filepath.Dir(strayPath)) - if gotResolved == filepath.Join(strayResolved, dir.Context) { - t.Errorf( - "ContextDir() resolved to stray subdir path %q — "+ - "upward walk regressed", - got, - ) - } -} - -func TestContextDir_FallbackWhenNotFound(t *testing.T) { - tempDir := t.TempDir() - - origDir, _ := os.Getwd() - _ = os.Chdir(tempDir) - defer func() { _ = os.Chdir(origDir) }() - - Reset() - - got := ContextDir() - - // Fallback path: filepath.Join(cwd, dir.Context), absolute. - wantResolved, _ := filepath.EvalSymlinks(tempDir) - gotDir, _ := filepath.EvalSymlinks(filepath.Dir(got)) - - if gotDir != wantResolved { - t.Errorf( - "ContextDir() fallback parent = %q, want %q", - gotDir, wantResolved, - ) - } - if filepath.Base(got) != dir.Context { - t.Errorf( - "ContextDir() fallback base = %q, want %q", - filepath.Base(got), dir.Context, - ) - } - if !filepath.IsAbs(got) { - t.Errorf("ContextDir() fallback %q is not absolute", got) - } -} diff --git a/internal/rc/require.go b/internal/rc/require.go new file mode 100644 index 000000000..315b164a5 --- /dev/null +++ b/internal/rc/require.go @@ -0,0 +1,94 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package rc + +import ( + "errors" + "os" + + errCtx "github.com/ActiveMemory/ctx/internal/err/context" +) + +// RequireContextDir returns the declared context directory after +// validating both its declaration shape (via [ContextDir]) and that +// the path actually exists on disk as a directory. +// +// This is the **operating-command boundary**: every non-exempt +// command calls it at the start of its Run function (or via +// [PersistentPreRunE]). Diagnostic and exempt callers (init, +// activate, bootstrap, hooks like check-anchor-drift) must use +// [ContextDir] directly so they observe declared state without +// erroring on broken state. +// +// Convention: operating callers use this; only diagnostic / exempt +// callers may use raw [ContextDir]. Without that rule, operating +// callers would receive shape-valid but non-existent paths and +// surface confusing downstream errors instead of the friendly +// tailored not-found message. +// +// Rejection conditions: +// +// 1. CTX_DIR truly unset ([errCtx.ErrDirNotDeclared]) is rewrapped +// as [errCtx.NotDeclared] tailored to how many .context/ +// candidates are visible from CWD. The user said "I haven't +// told you anything yet"; the message offers a next step. +// 2. CTX_DIR set to a relative or non-canonical-basename value +// ([errCtx.ErrRelativeNotAllowed] / [errCtx.ErrNonCanonicalBasename]) +// is propagated unchanged. The user told us a specific value; +// the diagnostic should name what's wrong with that value +// ("must be absolute, got '...'", "basename must be '.context', +// got 'tmp'") rather than pretend nothing was declared. +// 3. Path does not exist: [errCtx.ErrContextDirNotFound] (wrapped +// via [errCtx.Missing]). +// 4. Stat failed for a reason other than not-exist (permission +// denied, I/O error): [errCtx.ErrContextDirStat] (wrapped via +// [errCtx.StatFailed]). +// 5. Path exists but is not a directory: +// [errCtx.ErrContextDirNotADirectory]. +// +// Exempt commands (ctx init, ctx activate, ctx deactivate, +// ctx version, ctx help, ctx system bootstrap) must not call this +// helper; they handle the unset case themselves, either by creating +// the directory (init), walking to emit shell integration (activate), +// or reporting resolution state for diagnostics (bootstrap). +// +// Returns: +// - string: absolute path to the declared context directory. +// - error: non-nil with a multi-line actionable message when the +// context directory has not been declared, does not exist, or +// does not name a directory; the error is already formatted +// for direct return from a Cobra Run function. +func RequireContextDir() (string, error) { + path, err := ContextDir() + if err != nil { + // Discriminate by error kind: only truly-unset gets the + // tailored multi-line "no context directory specified" + // message with candidate hints. Relative-path and + // non-canonical-basename errors are propagated with their + // precise "what's wrong with the value you gave us" + // message; collapsing them into the unset form would tell + // the user "you didn't declare it" when they did declare + // it (just to the wrong shape): exactly the silent / + // confusing diagnostic the spec was meant to eliminate. + if errors.Is(err, errCtx.ErrDirNotDeclared) { + cwd, _ := os.Getwd() + return "", errCtx.NotDeclared(ScanCandidates(cwd)) + } + return "", err + } + info, statErr := os.Stat(path) + if statErr != nil { + if errors.Is(statErr, os.ErrNotExist) { + return "", errCtx.Missing(path) + } + return "", errCtx.StatFailed(path, statErr) + } + if !info.IsDir() { + return "", errCtx.NotADir(path) + } + return path, nil +} diff --git a/internal/rc/require_test.go b/internal/rc/require_test.go new file mode 100644 index 000000000..aa4cb5967 --- /dev/null +++ b/internal/rc/require_test.go @@ -0,0 +1,197 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package rc + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" + errCtx "github.com/ActiveMemory/ctx/internal/err/context" +) + +// TestRequireContextDir_PathDoesNotExist: shape-valid declaration +// pointing at a path that doesn't exist on disk → ErrContextDirNotFound. +func TestRequireContextDir_PathDoesNotExist(t *testing.T) { + t.Setenv(env.CtxDir, "/nonexistent-test-dir/.context") + Reset() + t.Cleanup(Reset) + + got, err := RequireContextDir() + if !errors.Is(err, errCtx.ErrContextDirNotFound) { + t.Errorf("RequireContextDir() err = %v, want ErrContextDirNotFound", + err) + } + if got != "" { + t.Errorf("RequireContextDir() = %q, want \"\"", got) + } +} + +// TestRequireContextDir_PathIsAFile: CTX_DIR points at an existing +// regular file → ErrContextDirNotADirectory. +func TestRequireContextDir_PathIsAFile(t *testing.T) { + tempDir := t.TempDir() + filePath := filepath.Join(tempDir, dir.Context) + if err := os.WriteFile(filePath, []byte("not a dir"), 0o600); err != nil { + t.Fatalf("write: %v", err) + } + t.Setenv(env.CtxDir, filePath) + Reset() + t.Cleanup(Reset) + + _, err := RequireContextDir() + if !errors.Is(err, errCtx.ErrContextDirNotADirectory) { + t.Errorf("RequireContextDir() err = %v, want ErrContextDirNotADirectory", + err) + } +} + +// TestRequireContextDir_StatPermissionDenied: stat fails for a +// reason other than not-exist → ErrContextDirStat. Skipped on +// platforms where chmod 000 doesn't block stat (Windows) or where +// the test runs as root. +func TestRequireContextDir_StatPermissionDenied(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("permission semantics differ on windows") + } + if os.Geteuid() == 0 { + t.Skip("root bypasses permission checks") + } + tempDir := t.TempDir() + parent := filepath.Join(tempDir, "locked") + if err := os.MkdirAll(parent, 0o700); err != nil { + t.Fatalf("mkdir: %v", err) + } + target := filepath.Join(parent, dir.Context) + if err := os.MkdirAll(target, 0o700); err != nil { + t.Fatalf("mkdir target: %v", err) + } + if err := os.Chmod(parent, 0); err != nil { + t.Fatalf("chmod: %v", err) + } + t.Cleanup(func() { + // Restore rwx so t.TempDir's recursive cleanup can + // remove the directory. gosec G302 flags 0o700 as too + // permissive for files; it is fine for an in-test + // directory chmod that needs read+write+execute for + // cleanup to succeed. + _ = os.Chmod(parent, 0o700) //nolint:gosec // dir needs rwx for cleanup + }) + + t.Setenv(env.CtxDir, target) + Reset() + t.Cleanup(Reset) + + _, err := RequireContextDir() + if err == nil { + t.Fatal("RequireContextDir() err = nil, want non-nil") + } + // Either ErrContextDirNotFound or ErrContextDirStat depending on + // the underlying syscall: macOS often returns ENOENT through a + // chmod-0 parent because lookup short-circuits, while Linux + // typically surfaces EACCES. Both are acceptable diagnostics for + // the user. + if !errors.Is(err, errCtx.ErrContextDirStat) && + !errors.Is(err, errCtx.ErrContextDirNotFound) { + t.Errorf( + "RequireContextDir() err = %v, want ErrContextDirStat or ErrContextDirNotFound", + err) + } +} + +// TestRequireContextDir_HappyPath: existing dir, canonical name → +// returns absolute path, nil error. +func TestRequireContextDir_HappyPath(t *testing.T) { + tempDir := t.TempDir() + target := filepath.Join(tempDir, dir.Context) + if err := os.MkdirAll(target, 0o700); err != nil { + t.Fatalf("mkdir: %v", err) + } + t.Setenv(env.CtxDir, target) + Reset() + t.Cleanup(Reset) + + got, err := RequireContextDir() + if err != nil { + t.Fatalf("RequireContextDir() err = %v, want nil", err) + } + gotResolved, _ := filepath.EvalSymlinks(got) + wantResolved, _ := filepath.EvalSymlinks(target) + if gotResolved != wantResolved { + t.Errorf("RequireContextDir() = %q, want %q", gotResolved, wantResolved) + } +} + +// TestRequireContextDir_DelegatesShapeChecks: ContextDir shape +// errors flow through with their precise meaning preserved. Only +// the truly-unset case gets rewrapped as the tailored +// "no context directory specified" message with candidate hints; +// relative and non-canonical-basename errors propagate unchanged so +// the user sees what's wrong with the value they declared instead +// of "you didn't declare it" when they actually did. +func TestRequireContextDir_DelegatesShapeChecks(t *testing.T) { + cases := []struct { + name string + val string + wantSentinel error + wantMsgContain string + }{ + { + name: "unset", + val: "", + wantSentinel: errCtx.ErrDirNotDeclared, + wantMsgContain: "no context directory specified", + }, + { + name: "relative", + val: "relative-path", + wantSentinel: errCtx.ErrRelativeNotAllowed, + wantMsgContain: "absolute", + }, + { + name: "non-canonical", + val: "/tmp/notdotcontext", + wantSentinel: errCtx.ErrNonCanonicalBasename, + wantMsgContain: "notdotcontext", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + t.Setenv(env.CtxDir, c.val) + Reset() + t.Cleanup(Reset) + + got, err := RequireContextDir() + if err == nil { + t.Fatalf("RequireContextDir() err = nil, want non-nil for %q", + c.val) + } + if got != "" { + t.Errorf("RequireContextDir() = %q, want \"\"", got) + } + // "unset" gets rewrapped into a tailored message that no + // longer wraps the original sentinel. The other two + // shape errors propagate the sentinel unchanged. + if c.name != "unset" && !errors.Is(err, c.wantSentinel) { + t.Errorf("RequireContextDir() err = %v, want errors.Is matching %v", + err, c.wantSentinel) + } + if msg := err.Error(); msg == "" { + t.Error("RequireContextDir() returned empty error message") + } + if !strings.Contains(err.Error(), c.wantMsgContain) { + t.Errorf("RequireContextDir() msg = %q; want substring %q", + err.Error(), c.wantMsgContain) + } + }) + } +} diff --git a/internal/rc/testmain_test.go b/internal/rc/testmain_test.go new file mode 100644 index 000000000..381eac0e7 --- /dev/null +++ b/internal/rc/testmain_test.go @@ -0,0 +1,22 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package rc + +import ( + "os" + "testing" + + "github.com/ActiveMemory/ctx/internal/assets/read/lookup" +) + +// TestMain initializes the embedded text-asset lookup so that error +// factories (internal/err/context.NotDeclared, etc.) resolve their +// DescKey-based messages instead of returning empty strings. +func TestMain(m *testing.M) { + lookup.Init() + os.Exit(m.Run()) +} diff --git a/internal/rc/types.go b/internal/rc/types.go index df327c610..ea96fe453 100644 --- a/internal/rc/types.go +++ b/internal/rc/types.go @@ -11,14 +11,11 @@ import cfgMemory "github.com/ActiveMemory/ctx/internal/config/memory" // CtxRC represents the configuration from the .ctxrc file. // // Fields: -// - ContextDir: Name of the context directory (default ".context") // - TokenBudget: Default token budget for context assembly (default 8000) // - PriorityOrder: Custom file loading priority order // - AutoArchive: Whether to auto-archive completed tasks (default true) // - ArchiveAfterDays: Days before archiving completed tasks (default 7) // - ScratchpadEncrypt: Whether to encrypt the scratchpad (default true) -// - AllowOutsideCwd: Skip boundary validation for -// external context dirs (default false) // - InjectionTokenWarn: Token threshold for oversize // injection warning (default 15000, 0 = disabled) // - ContextWindow: Context window size in tokens for @@ -63,13 +60,11 @@ import cfgMemory "github.com/ActiveMemory/ctx/internal/config/memory" type CtxRC struct { Profile string `yaml:"profile"` Tool string `yaml:"tool"` - ContextDir string `yaml:"context_dir"` TokenBudget int `yaml:"token_budget"` PriorityOrder []string `yaml:"priority_order"` AutoArchive bool `yaml:"auto_archive"` ArchiveAfterDays int `yaml:"archive_after_days"` ScratchpadEncrypt *bool `yaml:"scratchpad_encrypt"` - AllowOutsideCwd bool `yaml:"allow_outside_cwd"` EntryCountLearnings int `yaml:"entry_count_learnings"` EntryCountDecisions int `yaml:"entry_count_decisions"` ConventionLineCount int `yaml:"convention_line_count"` diff --git a/internal/rc/validate_test.go b/internal/rc/validate_test.go index abeff5bdb..748c32006 100644 --- a/internal/rc/validate_test.go +++ b/internal/rc/validate_test.go @@ -81,15 +81,13 @@ func TestValidate_EmptyFile(t *testing.T) { } func TestValidate_FullValidConfig(t *testing.T) { - data := []byte(`context_dir: .context -token_budget: 8000 + data := []byte(`token_budget: 8000 priority_order: - TASKS.md - DECISIONS.md auto_archive: true archive_after_days: 7 scratchpad_encrypt: true -allow_outside_cwd: false entry_count_learnings: 30 entry_count_decisions: 20 convention_line_count: 200 diff --git a/internal/rc/walk.go b/internal/rc/walk.go deleted file mode 100644 index 0755cd017..000000000 --- a/internal/rc/walk.go +++ /dev/null @@ -1,118 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package rc - -import ( - "os" - "path/filepath" - "strings" - - cfgGit "github.com/ActiveMemory/ctx/internal/config/git" -) - -// walkForContextDir walks upward from the current working directory -// looking for an existing directory whose basename matches name. -// -// When a candidate is found above CWD, it is validated against the -// git root (if any). If the candidate falls outside the git root, -// it belongs to a different project and is discarded; the git root -// is used as the anchor instead. -// -// Absolute configured names skip the walk entirely. When no matching -// directory is found upward, returns the context directory anchored -// to the git root (if found) or filepath.Join(cwd, name) as an -// absolute path so that ctx init can create a fresh context directory -// at the current location. -// -// Parameters: -// - name: Configured context directory name (may be relative or absolute) -// -// Returns: -// - string: Absolute path to the resolved context directory -func walkForContextDir(name string) string { - if filepath.IsAbs(name) { - return name - } - - cwd, cwdErr := os.Getwd() - if cwdErr != nil { - return name - } - - // Walk upward looking for an existing context directory. - var candidate string - cur := cwd - for { - path := filepath.Join(cur, name) - if info, statErr := os.Stat(path); statErr == nil && info.IsDir() { - candidate = path - break - } - parent := filepath.Dir(cur) - if parent == cur { - break - } - cur = parent - } - - gitRoot := findGitRoot(cwd) - - // No candidate found: anchor to git root or CWD. - if candidate == "" { - if gitRoot != "" { - return filepath.Join(gitRoot, name) - } - return filepath.Join(cwd, name) - } - - // Candidate found in CWD itself; always valid. - candidateParent := filepath.Dir(candidate) - if candidateParent == cwd { - return candidate - } - - // Candidate found above CWD: validate against git root. - if gitRoot == "" { - // No git root to confirm ownership; don't trust the ancestor. - return filepath.Join(cwd, name) - } - - // Check whether the candidate is within the git root. - // Append separator to avoid "/foo/bar" matching "/foo/b". - root := gitRoot + string(os.PathSeparator) - if candidateParent == gitRoot || strings.HasPrefix(candidateParent, root) { - return candidate - } - - // Candidate is outside the git root; belongs to a different project. - // Anchor to the git root instead. - return filepath.Join(gitRoot, name) -} - -// findGitRoot walks upward from start looking for a .git entry -// (directory or file, to support worktrees). Returns the parent -// directory of the .git entry, or "" if none is found. -// -// Parameters: -// - start: Directory to start searching from -// -// Returns: -// - string: Absolute path to the git root, or "" if not found -func findGitRoot(start string) string { - cur := start - for { - gitPath := filepath.Join(cur, cfgGit.DotDir) - if _, statErr := os.Stat(gitPath); statErr == nil { - return cur - } - parent := filepath.Dir(cur) - if parent == cur { - return "" - } - cur = parent - } -} diff --git a/internal/rc/walk_test.go b/internal/rc/walk_test.go deleted file mode 100644 index 6c2b3198d..000000000 --- a/internal/rc/walk_test.go +++ /dev/null @@ -1,295 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package rc - -import ( - "os" - "path/filepath" - "testing" - - "github.com/ActiveMemory/ctx/internal/config/dir" -) - -func TestWalkForContextDir_GitAnchor(t *testing.T) { - // Parent workspace has .context, child project has .git but no .context. - // Walk should discard parent's .context and anchor to child's git root. - // - // workspace/ - // .context/ ← parent's context (should be ignored) - // child-project/ - // .git/ ← child's git root - // src/ ← CWD - tmp := t.TempDir() - workspace := filepath.Join(tmp, "workspace") - parentCtx := filepath.Join(workspace, dir.Context) - childProject := filepath.Join(workspace, "child-project") - childGit := filepath.Join(childProject, ".git") - childSrc := filepath.Join(childProject, "src") - - for _, d := range []string{parentCtx, childGit, childSrc} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(childSrc) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - // Should anchor to child-project, not use parent's .context. - wantResolved, _ := filepath.EvalSymlinks(filepath.Join(childProject, dir.Context)) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_NoGit(t *testing.T) { - // No .git anywhere, parent has .context. - // Walk should fall through to cwd/.context. - // - // workspace/ - // .context/ ← parent's context (no git to confirm) - // child/ ← CWD - tmp := t.TempDir() - workspace := filepath.Join(tmp, "workspace") - parentCtx := filepath.Join(workspace, dir.Context) - child := filepath.Join(workspace, "child") - - for _, d := range []string{parentCtx, child} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(child) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - wantResolved, _ := filepath.EvalSymlinks(filepath.Join(child, dir.Context)) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_SameGitRoot(t *testing.T) { - // .context and CWD share the same git root. - // Walk should return the found .context. - // - // project/ - // .git/ - // .context/ - // src/deep/ ← CWD - tmp := t.TempDir() - project := filepath.Join(tmp, "project") - projectGit := filepath.Join(project, ".git") - projectCtx := filepath.Join(project, dir.Context) - deep := filepath.Join(project, "src", "deep") - - for _, d := range []string{projectGit, projectCtx, deep} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(deep) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - wantResolved, _ := filepath.EvalSymlinks(projectCtx) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_GitWorktreeFile(t *testing.T) { - // .git is a file (worktree), not a directory. - // Should still be detected as git root. - // - // project/ - // .git ← file (worktree marker) - // .context/ - // src/ ← CWD - tmp := t.TempDir() - project := filepath.Join(tmp, "project") - projectCtx := filepath.Join(project, dir.Context) - src := filepath.Join(project, "src") - - for _, d := range []string{projectCtx, src} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - // Create .git as a file (like git worktrees do). - gitFile := filepath.Join(project, ".git") - if err := os.WriteFile(gitFile, []byte("gitdir: /some/other/path\n"), 0600); err != nil { - t.Fatalf("write .git file: %v", err) - } - - origDir, _ := os.Getwd() - _ = os.Chdir(src) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - wantResolved, _ := filepath.EvalSymlinks(projectCtx) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_NothingFound_GitRoot(t *testing.T) { - // No .context anywhere, but .git exists. - // Walk should anchor to git root. - // - // project/ - // .git/ - // src/ ← CWD - tmp := t.TempDir() - project := filepath.Join(tmp, "project") - projectGit := filepath.Join(project, ".git") - src := filepath.Join(project, "src") - - for _, d := range []string{projectGit, src} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(src) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - wantResolved, _ := filepath.EvalSymlinks(filepath.Join(project, dir.Context)) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_CWDHasContext(t *testing.T) { - // .context exists in CWD — should always use it regardless of git. - // - // workspace/ - // .context/ - // child/ - // .context/ ← CWD has its own - tmp := t.TempDir() - workspace := filepath.Join(tmp, "workspace") - parentCtx := filepath.Join(workspace, dir.Context) - child := filepath.Join(workspace, "child") - childCtx := filepath.Join(child, dir.Context) - - for _, d := range []string{parentCtx, childCtx} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(child) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - wantResolved, _ := filepath.EvalSymlinks(childCtx) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestWalkForContextDir_NestedGitRepos(t *testing.T) { - // Inner git repo (like a submodule) should use inner git root, - // rejecting outer project's .context. - // - // outer/ - // .git/ - // .context/ - // vendor/inner/ - // .git/ ← inner git root - // src/ ← CWD - tmp := t.TempDir() - outer := filepath.Join(tmp, "outer") - outerGit := filepath.Join(outer, ".git") - outerCtx := filepath.Join(outer, dir.Context) - inner := filepath.Join(outer, "vendor", "inner") - innerGit := filepath.Join(inner, ".git") - innerSrc := filepath.Join(inner, "src") - - for _, d := range []string{outerGit, outerCtx, innerGit, innerSrc} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - origDir, _ := os.Getwd() - _ = os.Chdir(innerSrc) - defer func() { _ = os.Chdir(origDir) }() - - got := walkForContextDir(dir.Context) - - // Should anchor to inner git root, not use outer's .context. - wantResolved, _ := filepath.EvalSymlinks(filepath.Join(inner, dir.Context)) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("walkForContextDir() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestFindGitRoot_Found(t *testing.T) { - tmp := t.TempDir() - project := filepath.Join(tmp, "project") - gitDir := filepath.Join(project, ".git") - deep := filepath.Join(project, "a", "b", "c") - - for _, d := range []string{gitDir, deep} { - if err := os.MkdirAll(d, 0700); err != nil { - t.Fatalf("mkdir %s: %v", d, err) - } - } - - got := findGitRoot(deep) - wantResolved, _ := filepath.EvalSymlinks(project) - gotResolved, _ := filepath.EvalSymlinks(got) - - if gotResolved != wantResolved { - t.Errorf("findGitRoot() = %q, want %q", gotResolved, wantResolved) - } -} - -func TestFindGitRoot_NotFound(t *testing.T) { - tmp := t.TempDir() - noGit := filepath.Join(tmp, "no-git", "deep") - if err := os.MkdirAll(noGit, 0700); err != nil { - t.Fatalf("mkdir: %v", err) - } - - got := findGitRoot(noGit) - if got != "" { - t.Errorf("findGitRoot() = %q, want empty", got) - } -} diff --git a/internal/testutil/testctx/doc.go b/internal/testutil/testctx/doc.go new file mode 100644 index 000000000..e7879e3a1 --- /dev/null +++ b/internal/testutil/testctx/doc.go @@ -0,0 +1,23 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package testctx provides helpers for exercising ctx commands in +// tests under the explicit-context-dir resolution model (spec: +// specs/explicit-context-dir.md). +// +// Under that model [rc.ContextDir] returns "" unless the caller has +// declared a context directory via --context-dir or CTX_DIR. Tests +// that chain multiple ctx commands in the same process (e.g., +// `ctx init` followed by `ctx add`) must therefore declare CTX_DIR +// before any non-exempt command runs, and must reset rc state between +// test cases so process-global overrides do not leak. +// +// [Declare] is the one-stop helper: it points CTX_DIR at +// `/.context`, resets rc, and registers an end-of-test reset +// via `t.Cleanup`. Callers still need to run `ctx init` (or +// materialize .context/ themselves); Declare only wires the +// environment. +package testctx diff --git a/internal/testutil/testctx/testctx.go b/internal/testutil/testctx/testctx.go new file mode 100644 index 000000000..daae95fb7 --- /dev/null +++ b/internal/testutil/testctx/testctx.go @@ -0,0 +1,55 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package testctx + +import ( + "path/filepath" + "testing" + + "github.com/ActiveMemory/ctx/internal/config/dir" + "github.com/ActiveMemory/ctx/internal/config/env" + "github.com/ActiveMemory/ctx/internal/rc" +) + +// Declare wires CTX_DIR to /.context, redirects HOME to +// tempDir so user-home writes (e.g. ~/.claude/settings.json) stay +// inside the temp tree, resets rc state, and returns the absolute +// path that CTX_DIR now points to. +// +// HOME isolation matters because `ctx init` reads and writes +// ~/.claude/settings.json. Without isolation, parallel `go test +// ./...` packages all read-modify-write the same real file and race. +// +// Typical pattern: +// +// tmpDir := t.TempDir() +// t.Chdir(tmpDir) +// ctxPath := testctx.Declare(t, tmpDir) +// _ = initialize.Cmd().Execute() // materialize .context/ +// // subsequent ctx commands in the same process resolve to ctxPath +// +// Declare does NOT create the directory; that is the caller's +// responsibility, typically via `ctx init`. Tests that only need the +// environment declared (without materializing .context/) can skip the +// init step. +// +// Parameters: +// - t: test handle (required for t.Setenv / t.Cleanup). +// - tempDir: absolute path to the per-test temp directory, usually +// the value returned by t.TempDir(). +// +// Returns: +// - string: absolute path `/.context`. +func Declare(t *testing.T, tempDir string) string { + t.Helper() + ctxDir := filepath.Join(tempDir, dir.Context) + t.Setenv(env.CtxDir, ctxDir) + t.Setenv(env.Home, tempDir) + rc.Reset() + t.Cleanup(rc.Reset) + return ctxDir +} diff --git a/internal/tidy/archive.go b/internal/tidy/archive.go index 89c570fdd..656918e58 100644 --- a/internal/tidy/archive.go +++ b/internal/tidy/archive.go @@ -36,7 +36,11 @@ import ( // - string: Path to the written archive file // - error: If creating the archive directory or writing fails func WriteArchive(prefix, heading, content string) (string, error) { - archiveDir := filepath.Join(rc.ContextDir(), dir.Archive) + ctxDir, ctxErr := rc.ContextDir() + if ctxErr != nil { + return "", ctxErr + } + archiveDir := filepath.Join(ctxDir, dir.Archive) if mkErr := io.SafeMkdirAll(archiveDir, fs.PermExec); mkErr != nil { return "", errBackup.CreateArchiveDir(mkErr) } diff --git a/internal/validate/path.go b/internal/validate/path.go index 6e93fb7ff..e275b6180 100644 --- a/internal/validate/path.go +++ b/internal/validate/path.go @@ -9,74 +9,10 @@ package validate import ( "os" "path/filepath" - "runtime" - "strings" - "github.com/ActiveMemory/ctx/internal/config/env" errCtx "github.com/ActiveMemory/ctx/internal/err/context" - errFs "github.com/ActiveMemory/ctx/internal/err/fs" ) -// Boundary checks that dir resolves to a path within the current -// working directory. Returns an error if the resolved path escapes the -// project root. -// -// Parameters: -// - dir: Directory path to validate -// -// Returns: -// - error: Non-nil if the path escapes the project root -func Boundary(dir string) error { - cwd, cwdErr := os.Getwd() - if cwdErr != nil { - return errFs.BoundaryViolation(cwdErr) - } - - absDir, absErr := filepath.Abs(dir) - if absErr != nil { - return errFs.BoundaryViolation(absErr) - } - - // Resolve symlinks in both paths so traversal via symlinked parents - // is caught. - resolvedCwd, resolveErr := filepath.EvalSymlinks(cwd) - if resolveErr != nil { - return errFs.BoundaryViolation(resolveErr) - } - - resolvedDir, dirResolveErr := filepath.EvalSymlinks(absDir) - if dirResolveErr != nil { - // If the target doesn't exist yet (e.g. before init), fall back - // to the absolute path for the prefix check. - resolvedDir = filepath.Clean(absDir) - } - - // On Windows, path comparisons must be case-insensitive because - // filepath.EvalSymlinks resolves to actual disk casing while - // os.Getwd preserves the casing from the caller (e.g. VS Code - // passes a lowercase drive letter via fsPath). - equal := func(a, b string) bool { return a == b } - hasPrefix := strings.HasPrefix - if runtime.GOOS == env.OSWindows { - equal = strings.EqualFold - hasPrefix = func(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[:len(prefix)], prefix) - } - } - - // Ensure the resolved dir is equal to or nested under the project root. - // Append os.PathSeparator to avoid "/foo/bar" matching "/foo/b". - // On Windows, use case-insensitive comparison since NTFS paths are - // case-insensitive but EvalSymlinks normalizes casing only for the - // existing cwd, not the non-existent target, creating a mismatch. - root := resolvedCwd + string(os.PathSeparator) - if !equal(resolvedDir, resolvedCwd) && !hasPrefix(resolvedDir, root) { - return errCtx.OutsideRoot(dir, resolvedCwd) - } - - return nil -} - // Symlinks checks whether dir itself or any of its immediate children // are symlinks. Returns an error describing the first symlink found. // diff --git a/internal/validate/path_test.go b/internal/validate/path_test.go index a5a341230..5a2ec28df 100644 --- a/internal/validate/path_test.go +++ b/internal/validate/path_test.go @@ -9,79 +9,9 @@ package validate import ( "os" "path/filepath" - "runtime" - "strings" "testing" - - "github.com/ActiveMemory/ctx/internal/config/env" ) -func TestBoundary(t *testing.T) { - cwd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - dir string - wantErr bool - }{ - {"relative inside cwd", ".context", false}, - {"absolute inside cwd", filepath.Join(cwd, ".context"), false}, - {"deeply nested", filepath.Join(cwd, "a", "b", "c"), false}, - {"cwd itself", cwd, false}, - {"dot", ".", false}, - {"escapes cwd", "../../etc", true}, - {"absolute outside cwd", "/tmp/evil", true}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := Boundary(tt.dir) - if (err != nil) != tt.wantErr { - t.Errorf("Boundary(%q) error = %v, wantErr %v", - tt.dir, err, tt.wantErr) - } - }) - } -} - -func TestBoundaryCaseInsensitive(t *testing.T) { - if runtime.GOOS != env.OSWindows { - t.Skip("case-insensitive path test only applies to Windows") - } - - // On Windows, EvalSymlinks normalizes casing to the filesystem's - // canonical form. When .context/ doesn't exist yet the fallback - // preserves the original cwd casing. The prefix check must be - // case-insensitive to avoid false "outside cwd" errors. - tmp := t.TempDir() - - // Change cwd to a case-mangled version of the temp dir. - // TempDir returns canonical casing; flip it. - mangled := strings.ToUpper(tmp) - if mangled == tmp { - mangled = strings.ToLower(tmp) - } - - orig, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - defer func() { _ = os.Chdir(orig) }() - - if err := os.Chdir(mangled); err != nil { - t.Skipf("cannot chdir to case-mangled path %q: %v", mangled, err) - } - - // .context doesn't exist — this is the exact scenario that caused the - // false positive on Windows. - if err := Boundary(".context"); err != nil { - t.Errorf("Boundary(.context) with case-mangled cwd: %v", err) - } -} - func TestCheckSymlinks(t *testing.T) { t.Run("regular directory passes", func(t *testing.T) { dir := t.TempDir() @@ -136,45 +66,3 @@ func TestCheckSymlinks(t *testing.T) { } }) } - -func TestBoundary_WindowsCaseInsensitive(t *testing.T) { - if runtime.GOOS != env.OSWindows { - t.Skip("Windows-only test") - } - - // Simulate the VS Code plugin scenario: CWD has a lowercase drive letter - // but EvalSymlinks resolves to the actual (uppercase) casing. - // When .context doesn't exist yet (first init), the fallback path - // preserves the lowercase letter, causing a case mismatch. - cwd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - // Swap the drive letter case to simulate VS Code's fsPath - if len(cwd) >= 2 && cwd[1] == ':' { - var swapped string - if cwd[0] >= 'A' && cwd[0] <= 'Z' { - swapped = strings.ToLower(cwd[:1]) + cwd[1:] - } else { - swapped = strings.ToUpper(cwd[:1]) + cwd[1:] - } - - origDir, _ := os.Getwd() - if chErr := os.Chdir(swapped); chErr != nil { - t.Fatalf("cannot chdir to %s: %v", swapped, chErr) - } - defer func() { _ = os.Chdir(origDir) }() - - // Non-existent subdir simulates .context before init - nonExistent := filepath.Join(swapped, ".nonexistent-ctx-dir") - if err := Boundary(nonExistent); err != nil { - t.Errorf("Boundary(%q) with swapped drive case should pass, got: %v", nonExistent, err) - } - - // Also test the default relative path that ctx init uses - if err := Boundary(".context"); err != nil { - t.Errorf("Boundary(.context) with swapped drive case should pass, got: %v", err) - } - } -} diff --git a/internal/write/activate/activate.go b/internal/write/activate/activate.go new file mode 100644 index 000000000..f508088cd --- /dev/null +++ b/internal/write/activate/activate.go @@ -0,0 +1,88 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +package activate + +import ( + "fmt" + + "github.com/spf13/cobra" + + cfgShell "github.com/ActiveMemory/ctx/internal/config/shell" +) + +// Emit writes pre-formatted shell-eval content to cmd's stdout +// without adding a trailing newline. The emit-layer functions in +// [internal/cli/activate/core/emit] already include the newline +// they need, so this helper must not add another (a stray blank +// line in `eval` output is harmless but ugly in `set -x` traces). +// +// Parameters: +// - cmd: cobra command providing the stdout sink. Nil is a +// no-op so test setups that omit the command don't crash. +// - content: shell-eval line(s); may be empty (no-op). +func Emit(cmd *cobra.Command, content string) { + if cmd == nil || content == "" { + return + } + _, _ = fmt.Fprint(cmd.OutOrStdout(), content) +} + +// ActivatedAt writes a single informational line to stderr +// announcing the bound `.context/` path. Always called by +// `ctx activate` on success (single-candidate too) so the user +// always sees what just happened, not just an empty terminal. +// +// Stderr (not stdout) because the line is for the user, not the +// shell. `eval` lets stderr pass through to the terminal while +// stripping the eval-captured stdout stream. +// +// Parameters: +// - cmd: cobra command providing the stderr sink. Nil is a +// no-op. +// - path: absolute path of the bound `.context/` directory. +// Empty is a no-op (defensive; Run never calls with empty). +func ActivatedAt(cmd *cobra.Command, path string) { + if cmd == nil || path == "" { + return + } + // ErrOrStderr (not OutOrStderr): cobra's OutOrStderr returns + // the SetOut writer with stderr fallback (confusingly named). + // Wrong helper would land the advisory inside the + // eval-captured stream and make it invisible to anyone + // running `eval "$(ctx activate)"`. + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), + cfgShell.FormatActivatedAtAdvisory, path) +} + +// AlsoVisible writes one informational line per additional +// `.context/` candidate to stderr. Used by `ctx activate` when +// more than one candidate is visible upward from CWD: innermost +// wins (the bind goes to stdout via [Emit] and is announced via +// [ActivatedAt]), and the others get surfaced here so the user +// can see what's around but isn't being bound. +// +// Each line follows the shape: +// +// ctx: also visible upward: +// +// Multiple paths produce multiple lines (one per path) so the +// output stays parseable when anyone scripts around it. +// +// Parameters: +// - cmd: cobra command providing the stderr sink. Nil is a +// no-op. +// - paths: additional candidates to surface, in the order they +// came back from the upward scan. Empty / nil is a no-op. +func AlsoVisible(cmd *cobra.Command, paths []string) { + if cmd == nil || len(paths) == 0 { + return + } + for _, p := range paths { + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), + cfgShell.FormatAlsoVisibleAdvisory, p) + } +} diff --git a/internal/write/activate/doc.go b/internal/write/activate/doc.go new file mode 100644 index 000000000..2f327570a --- /dev/null +++ b/internal/write/activate/doc.go @@ -0,0 +1,27 @@ +// / ctx: https://ctx.ist +// ,'`./ do you remember? +// `.,'\ +// \ Copyright 2026-present Context contributors. +// SPDX-License-Identifier: Apache-2.0 + +// Package activate carries the write-layer helpers for the +// `ctx activate` and `ctx deactivate` commands. Both produce a +// single shell-eval line (export / unset) that callers consume +// via `eval "$(ctx activate)"`. +// +// # Why a separate write package +// +// The `cmd_print` and `cmd_fprint` audits forbid `cmd.Print*` and +// `fmt.Fprint*(, ...)` outside `internal/write/`. +// The shell-eval lines are pre-formatted by +// [internal/cli/activate/core/emit] (no template substitution at the +// write layer), so this package is intentionally tiny: a single +// helper that owns the actual stdout write. +// +// # Exported Functions +// +// [Emit] writes pre-formatted shell-eval content to the cobra +// command's stdout, no trailing newline added (the emit-layer +// helpers already include one). Both `ctx activate` and +// `ctx deactivate` Run functions call it. +package activate diff --git a/internal/write/backup/backup.go b/internal/write/backup/backup.go deleted file mode 100644 index bfc7a25a4..000000000 --- a/internal/write/backup/backup.go +++ /dev/null @@ -1,43 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package backup - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/embed/text" - "github.com/ActiveMemory/ctx/internal/format" -) - -// ResultLine prints a single backup result with optional SMB destination. -// -// Parameters: -// - cmd: Cobra command for output. Nil is a no-op. -// - scope: backup scope label (e.g. "project", "global"). -// - archive: archive file path. -// - size: archive size in bytes. -// - smbDest: optional SMB destination (empty string skips). -func ResultLine( - cmd *cobra.Command, - scope, archive string, - size int64, - smbDest string, -) { - if cmd == nil { - return - } - line := fmt.Sprintf( - desc.Text(text.DescKeyWriteBackupResult), - scope, archive, format.Bytes(size)) - if smbDest != "" { - line += fmt.Sprintf(desc.Text(text.DescKeyWriteBackupSMBDest), smbDest) - } - cmd.Println(line) -} diff --git a/internal/write/backup/doc.go b/internal/write/backup/doc.go deleted file mode 100644 index 5427d7c20..000000000 --- a/internal/write/backup/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -// Package backup provides terminal output for the backup -// command (ctx backup). -// -// # Exported Functions -// -// [ResultLine] prints a single backup result line that -// includes the scope label (e.g. "project" or "global"), -// the archive file path, and the human-readable file -// size. When an SMB destination is configured, the line -// is extended with the remote copy path. -// -// [SkipEntry] writes a notice to an io.Writer when an -// optional archive entry is skipped because its source -// file does not exist on disk. This function accepts an -// io.Writer instead of *cobra.Command because it runs -// during archive assembly before command output is -// available. -// -// # Message Categories -// -// - Info: backup result with scope, path, and size -// - Skip: notice when an optional file is missing -// -// # Usage -// -// backup.ResultLine(cmd, "project", archivePath, -// fileSize, smbDest) -// backup.SkipEntry(w, "optional/file") -package backup diff --git a/internal/write/backup/skip.go b/internal/write/backup/skip.go deleted file mode 100644 index 0056f3b08..000000000 --- a/internal/write/backup/skip.go +++ /dev/null @@ -1,27 +0,0 @@ -// / ctx: https://ctx.ist -// ,'`./ do you remember? -// `.,'\\ -// \ Copyright 2026-present Context contributors. -// SPDX-License-Identifier: Apache-2.0 - -package backup - -import ( - "fmt" - "io" - - "github.com/ActiveMemory/ctx/internal/assets/read/desc" - "github.com/ActiveMemory/ctx/internal/config/embed/text" -) - -// SkipEntry writes a message indicating that an optional archive -// entry was skipped because its source file does not exist. -// -// Parameters: -// - w: output writer -// - prefix: entry prefix label -func SkipEntry(w io.Writer, prefix string) { - _, _ = fmt.Fprintf( - w, desc.Text(text.DescKeyWriteBackupSkipEntry), prefix, - ) -} diff --git a/internal/write/initialize/info.go b/internal/write/initialize/info.go index 23deb4dc7..abd86f8f5 100644 --- a/internal/write/initialize/info.go +++ b/internal/write/initialize/info.go @@ -152,6 +152,29 @@ func InfoNextSteps(cmd *cobra.Command) { cmd.Println(desc.Text(text.DescKeyWriteInitNextStepsBlock)) } +// InfoActivateHint prints the shell-activation block shown right +// after `ctx init` finishes. The block tells the user how to bind +// CTX_DIR for their shell so subsequent ctx commands resolve to the +// freshly-created context directory. +// +// Under the single-source-anchor resolution model +// (specs/single-source-context-anchor.md) this step is not +// optional: every non-exempt ctx command refuses to run without a +// declared CTX_DIR. The hint closes the loop for new users so +// `ctx init` → next command actually works. +// +// Parameters: +// - cmd: cobra command for output. +// - contextDir: absolute path to the just-created .context/ +// directory; used in the `export CTX_DIR=...` variant of the +// hint. The `eval "$(ctx activate)"` variant takes no arg +// under the single-source-anchor model and discovers the +// path itself. +func InfoActivateHint(cmd *cobra.Command, contextDir string) { + tpl := desc.Text(text.DescKeyWriteInitActivateHint) + cmd.Println(fmt.Sprintf(tpl, contextDir)) +} + // InfoWorkflowTips prints the workflow tips block showing key skills // and the ceremony loop. // diff --git a/site/404.html b/site/404.html index 9994e1c82..948fd4102 100644 --- a/site/404.html +++ b/site/404.html @@ -241,13 +241,6 @@ - - - - - - -
    • @@ -259,9 +252,6 @@ - - - @@ -535,6 +525,10 @@ + + + + @@ -564,14 +558,12 @@ - - - - + + @@ -589,8 +581,6 @@ - -
    • @@ -898,6 +888,8 @@ + + diff --git a/site/blog/2026-01-27-building-ctx-using-ctx/index.html b/site/blog/2026-01-27-building-ctx-using-ctx/index.html index 80f28be38..84b6099e3 100644 --- a/site/blog/2026-01-27-building-ctx-using-ctx/index.html +++ b/site/blog/2026-01-27-building-ctx-using-ctx/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html b/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html index 4a90be519..7f46e67fa 100644 --- a/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html +++ b/site/blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-01-refactoring-with-intent/index.html b/site/blog/2026-02-01-refactoring-with-intent/index.html index b95fdabe2..7022e3d3b 100644 --- a/site/blog/2026-02-01-refactoring-with-intent/index.html +++ b/site/blog/2026-02-01-refactoring-with-intent/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-03-the-attention-budget/index.html b/site/blog/2026-02-03-the-attention-budget/index.html index ccd4041e2..bebda2270 100644 --- a/site/blog/2026-02-03-the-attention-budget/index.html +++ b/site/blog/2026-02-03-the-attention-budget/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-04-skills-that-fight-the-platform/index.html b/site/blog/2026-02-04-skills-that-fight-the-platform/index.html index fad083a00..181592491 100644 --- a/site/blog/2026-02-04-skills-that-fight-the-platform/index.html +++ b/site/blog/2026-02-04-skills-that-fight-the-platform/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-05-you-cant-import-expertise/index.html b/site/blog/2026-02-05-you-cant-import-expertise/index.html index a37dd55d1..a5adc7a9a 100644 --- a/site/blog/2026-02-05-you-cant-import-expertise/index.html +++ b/site/blog/2026-02-05-you-cant-import-expertise/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-07-the-anatomy-of-a-skill-that-works/index.html b/site/blog/2026-02-07-the-anatomy-of-a-skill-that-works/index.html index 23b038b02..4542edec7 100644 --- a/site/blog/2026-02-07-the-anatomy-of-a-skill-that-works/index.html +++ b/site/blog/2026-02-07-the-anatomy-of-a-skill-that-works/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-08-not-everything-is-a-skill/index.html b/site/blog/2026-02-08-not-everything-is-a-skill/index.html index 798660260..c26c87d45 100644 --- a/site/blog/2026-02-08-not-everything-is-a-skill/index.html +++ b/site/blog/2026-02-08-not-everything-is-a-skill/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-09-defense-in-depth-securing-ai-agents/index.html b/site/blog/2026-02-09-defense-in-depth-securing-ai-agents/index.html index e36490550..27d6c3254 100644 --- a/site/blog/2026-02-09-defense-in-depth-securing-ai-agents/index.html +++ b/site/blog/2026-02-09-defense-in-depth-securing-ai-agents/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-12-how-deep-is-too-deep/index.html b/site/blog/2026-02-12-how-deep-is-too-deep/index.html index 6b72723a3..4f451b27a 100644 --- a/site/blog/2026-02-12-how-deep-is-too-deep/index.html +++ b/site/blog/2026-02-12-how-deep-is-too-deep/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-14-irc-as-context/index.html b/site/blog/2026-02-14-irc-as-context/index.html index fa466ee09..c6a4272b1 100644 --- a/site/blog/2026-02-14-irc-as-context/index.html +++ b/site/blog/2026-02-14-irc-as-context/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-14-parallel-agents-with-worktrees/index.html b/site/blog/2026-02-14-parallel-agents-with-worktrees/index.html index 62c8ec278..75e12ad78 100644 --- a/site/blog/2026-02-14-parallel-agents-with-worktrees/index.html +++ b/site/blog/2026-02-14-parallel-agents-with-worktrees/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-15-ctx-v0.3.0-the-discipline-release/index.html b/site/blog/2026-02-15-ctx-v0.3.0-the-discipline-release/index.html index c1a879383..f9fc5fd86 100644 --- a/site/blog/2026-02-15-ctx-v0.3.0-the-discipline-release/index.html +++ b/site/blog/2026-02-15-ctx-v0.3.0-the-discipline-release/index.html @@ -248,13 +248,6 @@ - - - - - - -
    • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
    • @@ -905,6 +895,8 @@ + + @@ -1359,12 +1351,12 @@

      The Numbers

    - + - + diff --git a/site/blog/2026-02-15-eight-ways-a-hook-can-talk/index.html b/site/blog/2026-02-15-eight-ways-a-hook-can-talk/index.html index 37930edd6..26f122871 100644 --- a/site/blog/2026-02-15-eight-ways-a-hook-can-talk/index.html +++ b/site/blog/2026-02-15-eight-ways-a-hook-can-talk/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-15-why-zensical/index.html b/site/blog/2026-02-15-why-zensical/index.html index 7e712922f..6db1022b7 100644 --- a/site/blog/2026-02-15-why-zensical/index.html +++ b/site/blog/2026-02-15-why-zensical/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-16-ctx-v0.6.0-the-integration-release/index.html b/site/blog/2026-02-16-ctx-v0.6.0-the-integration-release/index.html index 0189c475b..28c58c67b 100644 --- a/site/blog/2026-02-16-ctx-v0.6.0-the-integration-release/index.html +++ b/site/blog/2026-02-16-ctx-v0.6.0-the-integration-release/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-17-code-is-cheap-judgment-is-not/index.html b/site/blog/2026-02-17-code-is-cheap-judgment-is-not/index.html index 64692b6e7..39f9ac4a5 100644 --- a/site/blog/2026-02-17-code-is-cheap-judgment-is-not/index.html +++ b/site/blog/2026-02-17-code-is-cheap-judgment-is-not/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-17-context-as-infrastructure/index.html b/site/blog/2026-02-17-context-as-infrastructure/index.html index a191ea270..52c149339 100644 --- a/site/blog/2026-02-17-context-as-infrastructure/index.html +++ b/site/blog/2026-02-17-context-as-infrastructure/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/index.html b/site/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/index.html index 7674d7591..56627183a 100644 --- a/site/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/index.html +++ b/site/blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-17-the-3-1-ratio/index.html b/site/blog/2026-02-17-the-3-1-ratio/index.html index 3be0ec1f2..7cdc954c8 100644 --- a/site/blog/2026-02-17-the-3-1-ratio/index.html +++ b/site/blog/2026-02-17-the-3-1-ratio/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-17-when-a-system-starts-explaining-itself/index.html b/site/blog/2026-02-17-when-a-system-starts-explaining-itself/index.html index e80ade7af..47cf9982b 100644 --- a/site/blog/2026-02-17-when-a-system-starts-explaining-itself/index.html +++ b/site/blog/2026-02-17-when-a-system-starts-explaining-itself/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-25-the-homework-problem/index.html b/site/blog/2026-02-25-the-homework-problem/index.html index 83a865a7b..2181990ba 100644 --- a/site/blog/2026-02-25-the-homework-problem/index.html +++ b/site/blog/2026-02-25-the-homework-problem/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-02-28-the-last-question/index.html b/site/blog/2026-02-28-the-last-question/index.html index 6564512b3..5101e1bd7 100644 --- a/site/blog/2026-02-28-the-last-question/index.html +++ b/site/blog/2026-02-28-the-last-question/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-03-04-agent-memory-is-infrastructure/index.html b/site/blog/2026-03-04-agent-memory-is-infrastructure/index.html index 2332f73fb..b2ecb3cb2 100644 --- a/site/blog/2026-03-04-agent-memory-is-infrastructure/index.html +++ b/site/blog/2026-03-04-agent-memory-is-infrastructure/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-03-23-ctx-v0.8.0-the-architecture-release/index.html b/site/blog/2026-03-23-ctx-v0.8.0-the-architecture-release/index.html index 048c4e77f..648a8ddd1 100644 --- a/site/blog/2026-03-23-ctx-v0.8.0-the-architecture-release/index.html +++ b/site/blog/2026-03-23-ctx-v0.8.0-the-architecture-release/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-03-23-we-broke-the-3-1-rule/index.html b/site/blog/2026-03-23-we-broke-the-3-1-rule/index.html index 67cecc93a..d3bfed1c9 100644 --- a/site/blog/2026-03-23-we-broke-the-3-1-rule/index.html +++ b/site/blog/2026-03-23-we-broke-the-3-1-rule/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-04-02-code-structure-as-an-agent-interface/index.html b/site/blog/2026-04-02-code-structure-as-an-agent-interface/index.html index 0a8b4a404..62c6384e8 100644 --- a/site/blog/2026-04-02-code-structure-as-an-agent-interface/index.html +++ b/site/blog/2026-04-02-code-structure-as-an-agent-interface/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/2026-04-06-the-watermelon-rind-anti-pattern/index.html b/site/blog/2026-04-06-the-watermelon-rind-anti-pattern/index.html index 05f92990f..6e1759b93 100644 --- a/site/blog/2026-04-06-the-watermelon-rind-anti-pattern/index.html +++ b/site/blog/2026-04-06-the-watermelon-rind-anti-pattern/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/blog/index.html b/site/blog/index.html index f579409fd..3285ecef4 100644 --- a/site/blog/index.html +++ b/site/blog/index.html @@ -254,13 +254,6 @@ - - - - - - -
  • @@ -272,9 +265,6 @@ - - - @@ -565,6 +555,10 @@ + + + + @@ -594,14 +588,12 @@ - - - - + + @@ -619,8 +611,6 @@ - -
  • @@ -928,6 +918,8 @@ + + diff --git a/site/cli/bootstrap/index.html b/site/cli/bootstrap/index.html index 0a8edeffe..5feff780a 100644 --- a/site/cli/bootstrap/index.html +++ b/site/cli/bootstrap/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + @@ -1053,9 +1045,9 @@

    ctx system bootstrapctx system bootstrap -q # Just the context directory path ctx system bootstrap --json # Structured output for automation -

    Scripting tip: CTX_DIR=$(ctx system bootstrap -q) is the -canonical way for skills and scripts to find the project's context -directory without hardcoding .context/.

    +

    Note: -q prints just the resolved directory path. See +Activating a Context Directory +if you hit a "no context directory specified" error.

    diff --git a/site/cli/change/index.html b/site/cli/change/index.html index 73b4b3cc6..f41ccb241 100644 --- a/site/cli/change/index.html +++ b/site/cli/change/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1295,8 +1285,6 @@ - - @@ -1564,6 +1552,8 @@ + + diff --git a/site/cli/completion/index.html b/site/cli/completion/index.html index d9c7faa14..c328ff46b 100644 --- a/site/cli/completion/index.html +++ b/site/cli/completion/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1108,8 +1098,6 @@ - - @@ -1502,6 +1490,8 @@ + + diff --git a/site/cli/config/index.html b/site/cli/config/index.html index dcef70eef..cf6d4053d 100644 --- a/site/cli/config/index.html +++ b/site/cli/config/index.html @@ -18,7 +18,7 @@ - + @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1110,8 +1100,6 @@ - - @@ -1281,36 +1269,6 @@ -
  • - - - - - - - - - - Backup - - - - - - - - -
  • - - - - - - - - - -
  • @@ -1622,6 +1580,8 @@ + + @@ -1954,13 +1914,13 @@

    ctx config status + -

    Commands that work before initialization: ctx init, ctx setup, -ctx doctor, and grouping commands that only show help.

    +

    Context declaration required. ctx does not walk the filesystem +looking for .context/. Every non-exempt command requires CTX_DIR +to be declared explicitly before it runs. The single declaration +channel is the environment variable:

    +
      +
    • eval "$(ctx activate)": binds CTX_DIR for the current shell.
    • +
    • CTX_DIR=/abs/path/to/.context exported in the environment, or + inlined as CTX_DIR=/abs/path/to/.context ctx <command> for a + one-shot.
    • +
    +

    CTX_DIR must be an absolute path with .context as its basename. +Relative paths and other names are rejected on first use; the +basename guard catches the common footgun +(export CTX_DIR=$(pwd)) before stray writes can leak to the +project root.

    +

    Commands fail fast with a linkable error +(see Activating a Context Directory) +when none is declared. The exempt allowlist (commands that run without +a declared context directory) is: ctx init, ctx activate, +ctx deactivate, ctx version, ctx help, ctx system bootstrap, +ctx doctor, ctx guide, ctx why, ctx config switch/status, +ctx hub *.

    +

    Initialization required. Once declared, the target must already +have been initialized by ctx init (otherwise commands return +ctx: not initialized).

    Getting Started

  • Documentation commits--n/a 35+
    Feature/fix commits--n/a ~15
    @@ -1707,6 +1708,14 @@

    Getting Startedctx activate +

    + + + + + + @@ -1918,10 +1927,6 @@

    Runtimectx backup -

    - - @@ -2000,14 +2005,6 @@

    Environment VariablesEnvironment Variables

    Configuration File

    Optional .ctxrc (YAML format) at project root:

    -
    # .ctxrc
    -context_dir: .context        # Context directory name
    -token_budget: 8000           # Default token budget
    -priority_order:              # File loading priority
    -  - TASKS.md
    -  - DECISIONS.md
    -  - CONVENTIONS.md
    -auto_archive: true           # Auto-archive old items
    -archive_after_days: 7        # Days before archiving tasks
    -scratchpad_encrypt: true     # Encrypt scratchpad (default: true)
    -allow_outside_cwd: false     # Skip boundary check (default: false)
    -event_log: false             # Enable local hook event logging
    -companion_check: true        # Check companion tools at session start
    -entry_count_learnings: 30    # Drift warning threshold (0 = disable)
    -entry_count_decisions: 20    # Drift warning threshold (0 = disable)
    -convention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)
    -injection_token_warn: 15000  # Oversize injection warning (0 = disable)
    -context_window: 200000       # Auto-detected for Claude Code; override for other tools
    -billing_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)
    -key_rotation_days: 90        # Days before key rotation nudge
    -session_prefixes:            # Recognized session header prefixes (extend for i18n)
    -  - "Session:"               # English (default)
    -  # - "Oturum:"              # Turkish (add as needed)
    -  # - "セッション:"             # Japanese (add as needed)
    -freshness_files:             # Files with technology-dependent constants (opt-in)
    -  - path: config/thresholds.yaml
    -    desc: Model token limits and batch sizes
    -    review_url: https://docs.example.com/limits  # Optional
    -notify:                      # Webhook notification settings
    -  events:                    # Required: only listed events fire
    -    - loop
    -    - nudge
    -    - relay
    -    # - heartbeat            # Every-prompt session-alive signal
    -tool: ""                     # Active AI tool: claude, cursor, cline, kiro, codex
    -steering:                    # Steering layer configuration
    -  dir: .context/steering     # Steering files directory
    -  default_inclusion: manual  # Default inclusion mode (always, auto, manual)
    -  default_tools: []          # Default tool filter for new steering files
    -hooks:                       # Hook system configuration
    -  dir: .context/hooks        # Hook scripts directory
    -  timeout: 10                # Per-hook execution timeout in seconds
    -  enabled: true              # Whether hook execution is enabled
    +
    # .ctxrc
    +token_budget: 8000           # Default token budget
    +priority_order:              # File loading priority
    +  - TASKS.md
    +  - DECISIONS.md
    +  - CONVENTIONS.md
    +auto_archive: true           # Auto-archive old items
    +archive_after_days: 7        # Days before archiving tasks
    +scratchpad_encrypt: true     # Encrypt scratchpad (default: true)
    +event_log: false             # Enable local hook event logging
    +companion_check: true        # Check companion tools at session start
    +entry_count_learnings: 30    # Drift warning threshold (0 = disable)
    +entry_count_decisions: 20    # Drift warning threshold (0 = disable)
    +convention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)
    +injection_token_warn: 15000  # Oversize injection warning (0 = disable)
    +context_window: 200000       # Auto-detected for Claude Code; override for other tools
    +billing_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)
    +key_rotation_days: 90        # Days before key rotation nudge
    +session_prefixes:            # Recognized session header prefixes (extend for i18n)
    +  - "Session:"               # English (default)
    +  # - "Oturum:"              # Turkish (add as needed)
    +  # - "セッション:"             # Japanese (add as needed)
    +freshness_files:             # Files with technology-dependent constants (opt-in)
    +  - path: config/thresholds.yaml
    +    desc: Model token limits and batch sizes
    +    review_url: https://docs.example.com/limits  # Optional
    +notify:                      # Webhook notification settings
    +  events:                    # Required: only listed events fire
    +    - loop
    +    - nudge
    +    - relay
    +    # - heartbeat            # Every-prompt session-alive signal
    +tool: ""                     # Active AI tool: claude, cursor, cline, kiro, codex
    +steering:                    # Steering layer configuration
    +  dir: .context/steering     # Steering files directory
    +  default_inclusion: manual  # Default inclusion mode (always, auto, manual)
    +  default_tools: []          # Default tool filter for new steering files
    +hooks:                       # Hook system configuration
    +  dir: .context/hooks        # Hook scripts directory
    +  timeout: 10                # Per-hook execution timeout in seconds
    +  enabled: true              # Whether hook execution is enabled
     

    Emit export CTX_DIR=... to bind context for the shell
    ctx deactivateEmit unset CTX_DIR to clear the binding
    ctx status Show context summary (files, tokens, drift)
    Back up context and Claude data to tar.gz / SMB
    ctx prune Clean stale per-session state files
    @@ -2071,12 +2066,6 @@

    Configuration FileConfiguration File @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - - @@ -908,6 +898,28 @@ + + +
  • + + + + ctx activate + + + + +
  • + +
  • + + + + ctx deactivate + + + +
  • @@ -1298,8 +1310,6 @@ - - @@ -1567,6 +1577,8 @@ + + @@ -1650,6 +1662,28 @@ +
  • + +
  • + + + + ctx activate + + + + +
  • + +
  • + + + + ctx deactivate + + + +
  • @@ -1824,10 +1858,99 @@

    ctx init# Merge into existing files ctx init --merge +

    After ctx init succeeds, the final output includes a hint showing +the exact eval "$(ctx activate)" line to bind the new directory +for your shell. Every other ctx command requires that binding +(or an equivalent direct CTX_DIR=/abs/path/.context export) before +it will run.

    +
    +

    ctx activate

    +

    Emit a shell-native export CTX_DIR=... line for the target +.context/ directory. ctx does not walk the filesystem during +operating commands; every non-exempt command requires CTX_DIR +set before it will run. activate is the convenience that figures +out the path and lets you bind it with one line.

    +
    # Walk up from CWD, emit if exactly one candidate visible.
    +eval "$(ctx activate)"
    +
    +

    Flags:

    +
  • + + + + + + + + + + + + +
    FlagDescription
    --shellShell dialect override. POSIX-family (bash, zsh, sh) all share one syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from $SHELL.
    +

    Resolution:

    + + + + + + + + + + + + + + + + + + + + + +
    Candidate count from CWDBehavior
    ZeroError. Use ctx init to create one, or cd closer to the project root.
    OneEmit export CTX_DIR=<path> for that candidate.
    Two or moreRefuse. List every candidate. Re-run from a more specific cwd.
    +

    activate is args-free under the single-source-anchor model; the +explicit-path mode was removed because hub-client / hub-server +scenarios store at ~/.ctx/hub-data/ and never read .context/, +so they activate from the project root like everyone else. Direct +binding without a project-local scan is still available via +export CTX_DIR=/abs/path/.context or the inline form.

    +

    If the parent shell already has CTX_DIR set to a different value, +the output gains a leading # ctx: replacing stale CTX_DIR=... +comment so the user sees the change in eval output before the +replacement takes effect.

    +

    See also: Activating a Context Directory +for the full recipe including direnv setup and CI patterns.

    +
    +

    ctx deactivate

    +

    Emit a shell-native unset CTX_DIR line. Pairs with activate.

    +
    eval "$(ctx deactivate)"
    +
    +

    Flags:

    + + + + + + + + + + + + + +
    FlagDescription
    --shellShell dialect override. POSIX-family (bash, zsh, sh) all share one unset syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from $SHELL.
    +

    deactivate does not touch the filesystem, doesn't require a +declared context directory, and never fails under normal operation; +unsetting an already-unset variable is a no-op across supported +shells.


    ctx status

    Show the current context summary.

    -
    ctx status [flags]
    +
    ctx status [flags]
     

    Flags:

    @@ -1860,14 +1983,14 @@

    ctx status
    ctx status
    -ctx status --json
    -ctx status --verbose
    +
    ctx status
    +ctx status --json
    +ctx status --verbose
     

    ctx agent

    Print an AI-ready context packet optimized for LLM consumption.

    -
    ctx agent [flags]
    +
    ctx agent [flags]
     

    Flags:

    @@ -1974,26 +2097,26 @@

    ctx agent

    Example:

    -
    # Default (8000 tokens, markdown)
    -ctx agent
    -
    -# Smaller packet for tight context windows
    -ctx agent --budget 4000
    -
    -# JSON format for programmatic use
    -ctx agent --format json
    -
    -# Pipe to file
    -ctx agent --budget 4000 > context.md
    -
    -# With cooldown (hooks/automation: requires --session)
    -ctx agent --session $PPID
    +
    # Default (8000 tokens, markdown)
    +ctx agent
    +
    +# Smaller packet for tight context windows
    +ctx agent --budget 4000
    +
    +# JSON format for programmatic use
    +ctx agent --format json
    +
    +# Pipe to file
    +ctx agent --budget 4000 > context.md
    +
    +# With cooldown (hooks/automation: requires --session)
    +ctx agent --session $PPID
     

    Use case: Copy-paste into AI chat, pipe to system prompt, or use in hooks.


    ctx load

    Load and display assembled context as AI would see it.

    -
    ctx load [flags]
    +
    ctx load [flags]
     

    Flags:

    @@ -2015,9 +2138,9 @@

    ctx load

    Example:

    -
    ctx load
    -ctx load --budget 16000
    -ctx load --raw
    +
    ctx load
    +ctx load --budget 16000
    +ctx load --raw
     
    diff --git a/site/cli/journal/index.html b/site/cli/journal/index.html index 7a38c7d9d..e9b075a00 100644 --- a/site/cli/journal/index.html +++ b/site/cli/journal/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1469,8 +1459,6 @@ - - @@ -1738,6 +1726,8 @@ + + diff --git a/site/cli/loop/index.html b/site/cli/loop/index.html index 2d91fcad7..5124c9c09 100644 --- a/site/cli/loop/index.html +++ b/site/cli/loop/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1505,8 +1495,6 @@ - - @@ -1774,6 +1762,8 @@ + + diff --git a/site/cli/mcp/index.html b/site/cli/mcp/index.html index 2a00468cd..65aa97d2b 100644 --- a/site/cli/mcp/index.html +++ b/site/cli/mcp/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1854,8 +1844,6 @@ - - @@ -2123,6 +2111,8 @@ + + @@ -2664,14 +2654,15 @@

    ctx mcp serveConfiguration below for how each host launches it.

    -

    Flags: None. The server uses the configured context directory -(from --context-dir, CTX_DIR, .ctxrc, or the default .context).

    +

    Flags: None. The server uses the declared context directory +from CTX_DIR. As with every other ctx command, that variable +must be set: the server does not walk the filesystem.

    Examples:

    # Normal invocation (by an MCP client via stdio transport)
     ctx mcp serve
     
     # Pin a context directory for a specific workspace
    -ctx --context-dir /path/to/project/.context mcp serve
    +CTX_DIR=/path/to/project/.context ctx mcp serve
     
     # Verify the binary starts without a client attached (Ctrl-C to exit)
     ctx mcp serve < /dev/null
    diff --git a/site/cli/memory/index.html b/site/cli/memory/index.html
    index 15bcbae21..118ca3fc6 100644
    --- a/site/cli/memory/index.html
    +++ b/site/cli/memory/index.html
    @@ -252,13 +252,6 @@
       
         
         
    -      
    -  
    -  
    -  
    -  
    -    
    -    
           
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1367,8 +1357,6 @@ - - @@ -1636,6 +1624,8 @@ + + diff --git a/site/cli/message/index.html b/site/cli/message/index.html index b7007b214..d465010c8 100644 --- a/site/cli/message/index.html +++ b/site/cli/message/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/cli/notify/index.html b/site/cli/notify/index.html index 54ab55cea..0f214653b 100644 --- a/site/cli/notify/index.html +++ b/site/cli/notify/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1533,8 +1523,6 @@ - - @@ -1802,6 +1790,8 @@ + + diff --git a/site/cli/pad/index.html b/site/cli/pad/index.html index ce4aa3f56..b7c5b03e8 100644 --- a/site/cli/pad/index.html +++ b/site/cli/pad/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1441,8 +1431,6 @@ - - @@ -1710,6 +1698,8 @@ + + diff --git a/site/cli/pause/index.html b/site/cli/pause/index.html index c18ae4796..c72973f3f 100644 --- a/site/cli/pause/index.html +++ b/site/cli/pause/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1325,8 +1315,6 @@ - - @@ -1594,6 +1582,8 @@ + + diff --git a/site/cli/prune/index.html b/site/cli/prune/index.html index c36c8d80d..ae875f32b 100644 --- a/site/cli/prune/index.html +++ b/site/cli/prune/index.html @@ -15,7 +15,7 @@ - + @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1110,8 +1100,6 @@ - - @@ -1194,36 +1182,6 @@ - - -
  • - - - - - - - - - - Backup - - - - - - - - -
  • - - - - - - - - @@ -1594,6 +1552,8 @@ + + @@ -1843,7 +1803,7 @@

    ctx prune - + diff --git a/site/cli/remind/index.html b/site/cli/remind/index.html index 87ea5479e..d9ba5f912 100644 --- a/site/cli/remind/index.html +++ b/site/cli/remind/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1375,8 +1365,6 @@ - - @@ -1644,6 +1632,8 @@ + + diff --git a/site/cli/resume/index.html b/site/cli/resume/index.html index 5fec4f3ab..3b72d99e0 100644 --- a/site/cli/resume/index.html +++ b/site/cli/resume/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1325,8 +1315,6 @@ - - @@ -1594,6 +1582,8 @@ + + diff --git a/site/cli/serve/index.html b/site/cli/serve/index.html index 6cb62750f..63ff00148 100644 --- a/site/cli/serve/index.html +++ b/site/cli/serve/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1533,8 +1523,6 @@ - - @@ -1802,6 +1790,8 @@ + + diff --git a/site/cli/setup/index.html b/site/cli/setup/index.html index e64b098a1..c36c22bf0 100644 --- a/site/cli/setup/index.html +++ b/site/cli/setup/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1505,8 +1495,6 @@ - - @@ -1774,6 +1762,8 @@ + + diff --git a/site/cli/site/index.html b/site/cli/site/index.html index 4f05ddcf1..3f163f613 100644 --- a/site/cli/site/index.html +++ b/site/cli/site/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1522,8 +1512,6 @@ - - @@ -1791,6 +1779,8 @@ + + diff --git a/site/cli/skill/index.html b/site/cli/skill/index.html index 00a098baf..ccb0bd93c 100644 --- a/site/cli/skill/index.html +++ b/site/cli/skill/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1544,8 +1534,6 @@ - - @@ -1813,6 +1801,8 @@ + + diff --git a/site/cli/steering/index.html b/site/cli/steering/index.html index d9f94a206..4bd0505fd 100644 --- a/site/cli/steering/index.html +++ b/site/cli/steering/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1610,8 +1600,6 @@ - - @@ -1879,6 +1867,8 @@ + + diff --git a/site/cli/sysinfo/index.html b/site/cli/sysinfo/index.html index 5d11be19e..ee88d2b7c 100644 --- a/site/cli/sysinfo/index.html +++ b/site/cli/sysinfo/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1295,8 +1285,6 @@ - - @@ -1564,6 +1552,8 @@ + + diff --git a/site/cli/system/index.html b/site/cli/system/index.html index a495c7daa..2a4cc2a2e 100644 --- a/site/cli/system/index.html +++ b/site/cli/system/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1110,8 +1100,6 @@ - - @@ -1196,36 +1184,6 @@ -
  • - - - - - - - - - - Backup - - - - - - - - -
  • - - - - - - - - - -
  • @@ -1666,6 +1624,8 @@ + + @@ -1922,7 +1882,6 @@

    ctx systemHook Subcommands @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -

  • @@ -1350,8 +1340,6 @@ - - @@ -1619,6 +1607,8 @@ + + diff --git a/site/cli/trigger/index.html b/site/cli/trigger/index.html index 1db36210c..8022cf747 100644 --- a/site/cli/trigger/index.html +++ b/site/cli/trigger/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1621,8 +1611,6 @@ - - @@ -1890,6 +1878,8 @@ + + diff --git a/site/cli/usage/index.html b/site/cli/usage/index.html index 3c2f90b3d..4461a80bd 100644 --- a/site/cli/usage/index.html +++ b/site/cli/usage/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1295,8 +1285,6 @@ - - @@ -1564,6 +1552,8 @@ + + diff --git a/site/cli/watch/index.html b/site/cli/watch/index.html index 4db41b431..2dd6bdd3e 100644 --- a/site/cli/watch/index.html +++ b/site/cli/watch/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1295,8 +1285,6 @@ - - @@ -1564,6 +1552,8 @@ + + diff --git a/site/cli/why/index.html b/site/cli/why/index.html index 9ab411c22..583b58b49 100644 --- a/site/cli/why/index.html +++ b/site/cli/why/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -1265,8 +1255,6 @@ - - @@ -1534,6 +1522,8 @@ + + diff --git a/site/home/about/index.html b/site/home/about/index.html index 57e9f7027..94666ae99 100644 --- a/site/home/about/index.html +++ b/site/home/about/index.html @@ -252,15 +252,6 @@ - - - - - - - - -
  • @@ -274,9 +265,6 @@ - - - @@ -552,6 +540,10 @@ + + + + @@ -582,9 +574,12 @@ -
  • diff --git a/site/home/configuration/index.html b/site/home/configuration/index.html index 712d1fb23..fec554e40 100644 --- a/site/home/configuration/index.html +++ b/site/home/configuration/index.html @@ -252,15 +252,6 @@ - - - - - - - - -
  • @@ -274,9 +265,6 @@ - - - @@ -552,6 +540,10 @@ + + + + @@ -582,9 +574,12 @@ -
  • @@ -2051,9 +2045,6 @@ - - - @@ -2111,9 +2102,12 @@

    Location│ └── ... └── src/

  • -

    ctx looks for .ctxrc in the current working directory when any command runs. -There is no global or user-level config file: Configuration is always -per-project.

    +

    ctx reads .ctxrc from the project root (i.e. the parent of +CTX_DIR, or dirname(CTX_DIR)/.ctxrc). It does not walk up from CWD. +That means whichever project you've activated via eval "$(ctx activate)" +(or by exporting CTX_DIR directly), its paired .ctxrc is what governs the +invocation. There is no global or user-level config file: configuration is +always per-project.

    Contributors: Dev Configuration Profile

    The ctx repo ships two .ctxrc source profiles (.ctxrc.base and @@ -2122,12 +2116,13 @@

    LocationContributing: Configuration Profiles.

    -

    Using a Different .Context Directory

    -

    The default .context/ directory can be changed per-project via the -context_dir key in .ctxrc, the CTX_DIR environment variable, or the ---context-dir CLI flag.

    -

    See Environment Variables -and CLI Global Flags below for details.

    +

    Using a Different .context Directory

    +

    The context directory is declared via the CTX_DIR environment variable; +not via .ctxrc. ctx does not walk the filesystem; every non-exempt +command requires CTX_DIR to be set. Use eval "$(ctx activate)" to +bind it for your shell. CTX_DIR must be an absolute path with +.context as its basename.

    +

    See Environment Variables below for details.

    Full Reference

    @@ -2138,56 +2133,54 @@

    Full Reference# All settings are optional. Missing values use defaults. # Priority: CLI flags > environment variables > .ctxrc > defaults # -# context_dir: .context -# token_budget: 8000 -# auto_archive: true -# archive_after_days: 7 -# scratchpad_encrypt: true -# allow_outside_cwd: false -# event_log: false -# entry_count_learnings: 30 -# entry_count_decisions: 20 -# convention_line_count: 200 -# injection_token_warn: 15000 -# context_window: 200000 # auto-detected for Claude Code; override for other tools -# billing_token_warn: 0 # one-shot warning at this token count (0 = disabled) -# -# stale_age_days: 30 # days before drift flags a context file as stale (0 = disabled) -# key_rotation_days: 90 -# task_nudge_interval: 5 # Edit/Write calls between task completion nudges -# -# notify: # requires: ctx hook notify setup -# events: # required: no events sent unless listed -# - loop -# - nudge -# - relay +# token_budget: 8000 +# auto_archive: true +# archive_after_days: 7 +# scratchpad_encrypt: true +# event_log: false +# entry_count_learnings: 30 +# entry_count_decisions: 20 +# convention_line_count: 200 +# injection_token_warn: 15000 +# context_window: 200000 # auto-detected for Claude Code; override for other tools +# billing_token_warn: 0 # one-shot warning at this token count (0 = disabled) +# +# stale_age_days: 30 # days before drift flags a context file as stale (0 = disabled) +# key_rotation_days: 90 +# task_nudge_interval: 5 # Edit/Write calls between task completion nudges +# +# notify: # requires: ctx hook notify setup +# events: # required: no events sent unless listed +# - loop +# - nudge +# - relay +# +# tool: "" # Active AI tool: claude, cursor, cline, kiro, codex # -# tool: "" # Active AI tool: claude, cursor, cline, kiro, codex -# -# steering: # Steering layer configuration -# dir: .context/steering -# default_inclusion: manual -# default_tools: [] -# -# hooks: # Hook system configuration -# dir: .context/hooks -# timeout: 10 -# enabled: true -# -# provenance_required: # Relax provenance flags for ctx add -# session_id: true # Require --session-id (default: true) -# branch: true # Require --branch (default: true) -# commit: true # Require --commit (default: true) -# -# priority_order: -# - CONSTITUTION.md -# - TASKS.md -# - CONVENTIONS.md -# - ARCHITECTURE.md -# - DECISIONS.md -# - LEARNINGS.md -# - GLOSSARY.md -# - AGENT_PLAYBOOK.md +# steering: # Steering layer configuration +# dir: .context/steering +# default_inclusion: manual +# default_tools: [] +# +# hooks: # Hook system configuration +# dir: .context/hooks +# timeout: 10 +# enabled: true +# +# provenance_required: # Relax provenance flags for ctx add +# session_id: true # Require --session-id (default: true) +# branch: true # Require --branch (default: true) +# commit: true # Require --commit (default: true) +# +# priority_order: +# - CONSTITUTION.md +# - TASKS.md +# - CONVENTIONS.md +# - ARCHITECTURE.md +# - DECISIONS.md +# - LEARNINGS.md +# - GLOSSARY.md +# - AGENT_PLAYBOOK.md

    Option Reference

    @@ -2202,12 +2195,6 @@

    Option ReferenceOption ReferenceEnvironment VariablesCLI Global FlagsCLI Global FlagsExamples

    -
    # Point to a different context directory:
    -ctx status --context-dir /path/to/shared/.context
    -
    -# Allow external context directory (skips boundary check):
    -ctx status --context-dir /mnt/nas/project-context --allow-outside-cwd
    +
    # Point to a different context directory inline:
    +CTX_DIR=/path/to/project/.context ctx status
     

    Priority Order

    @@ -2465,7 +2435,11 @@

    Priority Order
    CLI flags  >  Environment variables  >  .ctxrc  >  Built-in defaults
     (highest)                                          (lowest)
     

    -

    Example resolution for context_dir:

    +

    The context directory itself is resolved differently: it lives outside +this priority chain. CTX_DIR (env) must be declared; .ctxrc does not +carry a fallback for it, and there is no built-in default. See +Activating a Context Directory.

    +

    Example resolution for token_budget:

    @@ -2476,39 +2450,41 @@

    Priority OrderExamples

    External .context Directory

    -

    Store context outside the project tree (useful for monorepos or shared context):

    -
    # .ctxrc
    -context_dir: /home/team/shared-context
    -allow_outside_cwd: true
    +

    Store a project's context outside the project tree (useful when a +repo is read-only, or when you want to keep notes adjacent rather +than checked in). Declare the path via CTX_DIR:

    +
    export CTX_DIR=/home/you/ctx-stores/my-project/.context
     
    +
    +

    One .context/ per project

    +

    The parent of the context directory is the project root by +contract: ctx sync, ctx drift, and the memory-drift hook +all read the codebase from filepath.Dir(ContextDir()). +Pointing two projects at the same .context/ directory will +collide their journals, state, and secrets. To share knowledge +(CONSTITUTION / CONVENTIONS / ARCHITECTURE) across projects, +use ctx hub, not a shared +.context/.

    +

    Custom Token Budget

    Increase the token budget for projects with large context:

    # .ctxrc
    diff --git a/site/home/context-files/index.html b/site/home/context-files/index.html
    index e48f94552..f5f9313d0 100644
    --- a/site/home/context-files/index.html
    +++ b/site/home/context-files/index.html
    @@ -252,15 +252,6 @@
         
       
       
    -    
    -    
    -      
    -  
    -  
    -  
    -    
    -  
    -  
         
         
           
  • @@ -274,9 +265,6 @@ - - - @@ -552,6 +540,10 @@ + + + + @@ -582,10 +574,13 @@ -
  • @@ -2531,9 +2525,6 @@ - - - diff --git a/site/home/contributing/index.html b/site/home/contributing/index.html index 35b9162ed..10359637b 100644 --- a/site/home/contributing/index.html +++ b/site/home/contributing/index.html @@ -18,7 +18,7 @@ - + @@ -252,15 +252,6 @@ - - - - - - - - -
  • @@ -274,9 +265,6 @@ - - - @@ -552,6 +540,10 @@ + + + + @@ -582,9 +574,12 @@ - @@ -1833,6 +1830,8 @@ + + @@ -2382,11 +2381,6 @@ - - - - -
  • @@ -2399,9 +2393,6 @@ - - - @@ -2569,10 +2560,6 @@

    Dev-Only Skills ReferenceConfiguration ProfilesAfter cloning, run ctx config switch dev to get started with full logging.

    See Configuration for the full .ctxrc option reference.

    Backups

    -

    Back up project context and global Claude Code data with:

    -
    ctx backup                    # both project + global (default)
    -ctx backup --scope project    # .context/, .claude/, ideas/ only
    -ctx backup --scope global     # ~/.claude/ only
    -
    -

    Archives are saved to /tmp/. When CTX_BACKUP_SMB_URL is configured, -they are also copied to an SMB share. See -CLI Reference: backup for details.

    +

    ctx does not ship a backup command. File-level backup is an OS / +infrastructure concern; ctx hub handles the cross-machine +knowledge persistence that matters most. For everything else, see +Backup Strategy: +rsync, Time Machine, Borg, or whichever tool already handles the +rest of your files.

    Running Tests

    -
    make test   # fast: all tests
    -make audit  # full: fmt + vet + lint + drift + docs + test
    -make smoke  # build + run basic commands end-to-end
    +
    make test   # fast: all tests
    +make audit  # full: fmt + vet + lint + drift + docs + test
    +make smoke  # build + run basic commands end-to-end
     

    Running the Docs Site Locally

    -
    make site-setup  # one-time: install zensical via pipx
    -make site-serve  # serve at localhost
    +
    make site-setup  # one-time: install zensical via pipx
    +make site-serve  # serve at localhost
     

    Submitting Changes

    @@ -2878,7 +2863,7 @@

    Developer Certificate of Origin (By contributing, you agree to the Developer Certificate of Origin.

    All commits must be signed off:

    -
    git commit -s -m "feat: add new feature"
    +
    git commit -s -m "feat: add new feature"
     

    License

    Contributions are licensed under the @@ -2942,13 +2927,13 @@

    License + - +


    Invocation

    +

    The caller MUST set CTX_DIR to the sub-repo the agent will work on. +The agent verifies this at Step 3.2 and stops if it does not match. +The wrapper reads the manifest to pick the current sub-repo, then +launches claude with CTX_DIR pinned to that sub-repo's .context/.

    Single run (safest for quota):

    cd ~/WORKSPACE
    -claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*'
    +REPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json)
    +CTX_DIR="$PWD/$REPO/.context" \
    +  claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*'
     

    Batch of N runs:

    cd ~/WORKSPACE
     for i in $(seq 1 5); do
    -  claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*'
    -  echo "--- Run $i complete ---"
    -done
    +  REPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json)
    +  CTX_DIR="$PWD/$REPO/.context" \
    +    claude --print "Follow .arch-explorer/PROMPT.md" --allowedTools '*'
    +  echo "--- Run $i complete (repo: $REPO) ---"
    +done
     

    Resume after interruption:

    -

    Just run again. The manifest tracks state; the agent picks up where -it left off.

    +

    Just run the wrapper again. The manifest tracks state; the agent picks +up where it left off. CTX_DIR is recomputed from the manifest on +each invocation, so the right sub-repo is always bound.

    Tips

    @@ -1588,6 +1578,8 @@ + + diff --git a/site/operations/runbooks/codebase-audit/index.html b/site/operations/runbooks/codebase-audit/index.html index 8f0a21b91..acd48bb1b 100644 --- a/site/operations/runbooks/codebase-audit/index.html +++ b/site/operations/runbooks/codebase-audit/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -976,120 +966,6 @@ - - - - - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - - - - Hub - - - - - - - - - - - - - - -
  • - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - - - - Maintainers - - - - - - - - - - - - - - -
  • - - - - - - - - @@ -1128,10 +1004,10 @@ - + -

  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1791,6 +1783,8 @@ + + diff --git a/site/recipes/external-context/index.html b/site/recipes/external-context/index.html index ff7951b2e..8d9f0fe6b 100644 --- a/site/recipes/external-context/index.html +++ b/site/recipes/external-context/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -713,6 +703,8 @@ + + @@ -767,6 +759,36 @@ +
  • + + + + + + + + + + Activating a Context Directory + + + + + + + + +
  • + + + + + + + + + +
  • @@ -938,6 +960,34 @@ +
  • + +
  • + + + + What Works, What Quietly Degrades + + + + + +
  • @@ -998,21 +1048,10 @@
  • + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OperationBehavior with external .context/
    ctx status, agent, add✅ Works. Operates on files inside CTX_DIR.
    Journal, scratchpad, hub✅ Works. Same reason.
    ctx sync⚠️ Scans the context repo, not the code repo.
    ctx drift⚠️ Same. Reports nothing useful.
    Memory-drift hook (MEMORY.md)⚠️ Looks for MEMORY.md next to the external .context/, not the code.
    +

    Nothing errors. The code-aware operations just find an empty or +unrelated tree where the project root should be.

    + +

    If you want both the privacy of an external git repo and working +ctx sync / drift / memory-drift, symlink the external +.context/ into the code repo and point CTX_DIR at the symlink:

    +
    # External repo holds the real files
    +mkdir -p ~/repos/myproject-context && cd ~/repos/myproject-context && git init
    +
    +# Symlink it into the code repo
    +ln -s ~/repos/myproject-context/.context ~/repos/myproject/.context
    +
    +# Bind CTX_DIR to the symlink path; ctx init will follow it
    +export CTX_DIR=~/repos/myproject/.context
    +ctx init
     
    -

    All ctx commands now use the external directory automatically.

    +

    Now filepath.Dir(CTX_DIR) is the code repo, so code-aware +operations scan the right tree. The actual files still live in +the external repo and commit there. Add .context to the code +repo's .gitignore (or .git/info/exclude) so the symlink itself +isn't tracked by the code repo.

    +

    The basename guard is permissive about symlinks: it checks the +declared name, not the resolved target, so a .context symlink +pointing anywhere is accepted as long as the declared basename is +.context.

    Commands and Skills Used

    @@ -2245,24 +2374,19 @@

    Commands and Skills UsedInitialize context directory

    - - - + + + - - - + + + - - - - - - + @@ -2276,135 +2400,116 @@

    Step 1: Create the Private Conte

    Create a separate repository for your context files. This can live anywhere: a private GitHub repo, a shared drive, a sibling directory:

    # Create the context repo
    -mkdir ~/repos/myproject-context
    +mkdir -p ~/repos/myproject-context
     cd ~/repos/myproject-context
     git init
     

    Step 2: Initialize ctx Pointing at It

    -

    From your project root, initialize ctx with --context-dir pointing to the -external location. Because the directory is outside your project tree, you also -need --allow-outside-cwd:

    +

    From your project root, declare CTX_DIR pointing to the external +location, then initialize:

    cd ~/repos/myproject
    -ctx --context-dir ~/repos/myproject-context \
    -    --allow-outside-cwd \
    -    init
    +CTX_DIR=~/repos/myproject-context/.context ctx init
     
    -

    This creates the full .context/-style file set inside +

    This creates the canonical .context/ file set inside ~/repos/myproject-context/ instead of ~/repos/myproject/.context/.

    -
    -

    Boundary Validation

    -

    ctx validates that the .context directory is within the current working -directory.

    -

    If your external directory is truly outside the project root:

    -
      -
    • Either every ctx command needs --allow-outside-cwd,
    • -
    • or you can persist the setting in .ctxrc (next step).
    • -
    -

    Step 3: Make It Stick

    -

    Typing --context-dir and --allow-outside-cwd on every command is tedious. -Pick one of these methods to make the configuration permanent.

    - -

    Create a .ctxrc file in your project root:

    -
    # .ctxrc: committed to the project repo
    -context_dir: ~/repos/myproject-context
    -allow_outside_cwd: true
    -
    -

    ctx reads .ctxrc automatically. Every command now uses the external -directory without extra flags:

    -
    ctx status          # reads from ~/repos/myproject-context
    -ctx add learning "Redis MULTI doesn't roll back on error" \
    -  --session-id abc12345 --branch main --commit 68fbc00a
    -
    -
    -

    Commit .ctxrc

    -

    .ctxrc belongs in the project repo. It contains no secrets: It's just a -path and a boundary override.

    -

    .ctxrc lets teammates share the same configuration.

    -
    -

    Option B: CTX_DIR Environment Variable

    -

    Good for CI pipelines, temporary overrides, or when you don't want to commit -a .ctxrc:

    -
    # In your shell profile (~/.bashrc, ~/.zshrc)
    -export CTX_DIR=~/repos/myproject-context
    +

    Declaring CTX_DIR on every command is tedious. Pick one of these +methods to make the configuration permanent. The context directory +itself must be declared via CTX_DIR; .ctxrc does not carry the +path.

    + +
    # Direct path. Works for ctx status / agent / add but degrades
    +# code-aware operations. See "What Works, What Quietly Degrades".
    +export CTX_DIR=~/repos/myproject-context/.context
    +
    +# Or, with the symlink approach above, point at the symlink path
    +# inside the code repo so code-aware operations stay healthy.
    +export CTX_DIR=~/repos/myproject/.context
     
    -

    Or for a single session:

    -
    CTX_DIR=~/repos/myproject-context ctx status
    +

    Put either form in your shell profile (~/.bashrc, ~/.zshrc) +or a direnv .envrc.

    +

    For a single session, run eval "$(ctx activate)" from any +directory inside the project where exactly one .context/ +candidate is visible (the symlink counts). activate does not +accept a path argument; bind a specific path by exporting +CTX_DIR directly instead.

    +

    Option B: .ctxrc for Other Settings

    +

    Put any settings (token budget, priority order, freshness files) in a +.ctxrc at the project root (dirname(CTX_DIR)), which here is the +parent of the external .context/:

    +
    # ~/repos/myproject-context/.ctxrc
    +token_budget: 16000
     
    -

    Option C: Shell Alias

    -

    If you prefer a shell alias over .ctxrc:

    -
    # ~/.bashrc or ~/.zshrc
    -alias ctx='ctx --context-dir ~/repos/myproject-context --allow-outside-cwd'
    -
    -

    Priority Order

    -

    When multiple methods are set, ctx resolves the context directory in this -order (highest priority first):

    -
      -
    1. --context-dir flag
    2. -
    3. CTX_DIR environment variable
    4. -
    5. context_dir in .ctxrc
    6. -
    7. Default: .context/
    8. -
    +

    .ctxrc is always read from the parent of CTX_DIR, so this file is +picked up whenever CTX_DIR points at +~/repos/myproject-context/.context.

    +

    Resolution

    +

    ctx reads the context directory from a single channel: the +CTX_DIR environment variable. When CTX_DIR is unset, ctx +errors with a "no context directory specified" hint pointing at +ctx activate and this recipe. When set, the value must be an +absolute path with .context as its basename; relative paths and +other names are rejected on first use.

    +

    See +Activating a Context Directory for the full +recipe.

    Step 4: Agent Auto-Discovery via Bootstrap

    When context lives outside the project tree, your AI assistant needs to know where to find it. The ctx system bootstrap command resolves the configured context directory and communicates it to the agent automatically:

    -
    $ ctx system bootstrap
    -ctx system bootstrap
    -====================
    -
    -context_dir: /home/user/repos/myproject-context
    -
    -Files:
    -  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...
    +
    $ ctx system bootstrap
    +ctx system bootstrap
    +====================
    +
    +context_dir: /home/user/repos/myproject-context/.context
    +
    +Files:
    +  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...
     

    The CLAUDE.md template generated by ctx init already instructs the agent to -run ctx system bootstrap at session start. Because .ctxrc is in the -project root, your agent inherits the external path automatically via -the ctx system bootstrap call instruction.

    +run ctx system bootstrap at session start. Because CTX_DIR is inherited +by child processes, your agent picks up the external path automatically.

    Here is the relevant section from CLAUDE.md for reference:

    -
    <!-- CLAUDE.md -->
    -1. **Run `ctx system bootstrap`**: CRITICAL, not optional.
    -   This tells you where the context directory is. If it fails or returns
    -   no context_dir, STOP and warn the user.
    +
    <!-- CLAUDE.md -->
    +1. **Run `ctx system bootstrap`**: CRITICAL, not optional.
    +   This tells you where the context directory is. If it returns any
    +   error, relay the error output to the user verbatim, point them at
    +   https://ctx.ist/recipes/activating-context/ for setup, and STOP.
    +   Do not try to recover; the user decides.
     
    -

    Moreover, every nudge (context checkpoint, persistence reminder, etc.) also -includes a Context: /home/user/repos/myproject-context footer, so the agent -remains anchored to the correct directory even in long sessions.

    -

    If you use CTX_DIR instead of .ctxrc, export it in your shell -profile so the hook process inherits it:

    -
    export CTX_DIR=~/repos/myproject-context
    +

    Moreover, every nudge (context checkpoint, persistence reminder, etc.) also +includes a Context: /home/user/repos/myproject-context/.context footer, so +the agent remains anchored to the correct directory even in long sessions.

    +

    Export CTX_DIR in your shell profile so every hook process inherits it:

    +
    export CTX_DIR=~/repos/myproject-context/.context
     

    Step 5: Share with Teammates

    -

    Teammates clone both repos and set up .ctxrc:

    -
    # Clone the project
    -git clone git@github.com:org/myproject.git
    -cd myproject
    -
    -# Clone the private context repo
    -git clone git@github.com:org/myproject-context.git ~/repos/myproject-context
    -
    -

    If .ctxrc is already committed to the project, they're done: ctx -commands will find the external context automatically.

    -

    If teammates use different paths, each developer sets their own CTX_DIR:

    -
    export CTX_DIR=~/my-own-path/myproject-context
    +

    Teammates clone both repos and export CTX_DIR:

    +
    # Clone the project
    +git clone git@github.com:org/myproject.git
    +cd myproject
    +
    +# Clone the private context repo
    +git clone git@github.com:org/myproject-context.git ~/repos/myproject-context
    +export CTX_DIR=~/repos/myproject-context/.context
     
    +

    If teammates use different paths, each developer sets their own CTX_DIR.

    For encryption key distribution across the team, see the Syncing Scratchpad Notes recipe.

    Step 6: Day-to-Day Sync

    The external context repo has its own git history. Treat it like any other -repo: Commit and push after sessions:

    -
    cd ~/repos/myproject-context
    -
    -# After a session
    -git add -A
    -git commit -m "Session: refactored auth module, added rate-limit learning"
    -git push
    +repo: commit and push after sessions:

    +
    cd ~/repos/myproject-context
    +
    +# After a session
    +git add -A
    +git commit -m "Session: refactored auth module, added rate-limit learning"
    +git push
     

    Your AI assistant can do this too. When ending a session:

    -
    You: "Save what we learned and push the context repo."
    -
    -Agent: [runs ctx add learning, then commits and pushes the context repo]
    +
    You: "Save what we learned and push the context repo."
    +
    +Agent: [runs ctx add learning, then commits and pushes the context repo]
     

    You can also set up a post-session habit: project code gets committed to the project repo, context gets committed to the context repo.

    @@ -2412,18 +2517,18 @@

    Step 6: Day-to-Day SyncConversational Approach

    You don't need to remember the flags; simply ask your assistant:

    Set Up Your System Using Natural Language

    -
    You: "Set up ctx to use ~/repos/myproject-context as the context directory."
    -
    -Agent: "I'll create a .ctxrc in the project root pointing to that path.
    -       I'll also update CLAUDE.md so future sessions know where to find
    -       context. Want me to initialize the context files there too?"
    +
    You: "Set up ctx to use ~/repos/myproject-context as the context directory."
    +
    +Agent: "I'll set CTX_DIR to that path, run ctx init to materialize
    +       it, and show you the export line to add to your shell
    +       profile. Want me to seed the core context files too?"
     

    Configure Separate Repo for .context Folder Using Natural Language

    -
    You: "My context is in a separate repo. Can you load it?"
    -
    -Agent: [reads .ctxrc, finds the path, loads context from the external dir]
    -       "Loaded. You have 3 pending tasks, last session was about the auth
    -       refactor."
    +
    You: "My context is in a separate repo. Can you load it?"
    +
    +Agent: [reads CTX_DIR, loads context from the external dir]
    +       "Loaded. You have 3 pending tasks, last session was about the auth
    +       refactor."
     

    Tips

    @@ -2432,12 +2537,10 @@

    Tips&par The default .context/ in-tree is the easiest path. Move to an external repo when you have a concrete reason.
  • One context repo per project. Sharing a single context directory across - multiple projects creates confusion. Keep the mapping 1:1.
  • -
  • Use .ctxrc over env vars when the path is stable. It's committed, - documented, and works for the whole team without per-developer shell setup.
  • -
  • Don't forget the boundary flag. The most common error is - Error: context directory is outside the project root. Set - allow_outside_cwd: true in .ctxrc or pass --allow-outside-cwd.
  • + multiple projects corrupts journals, state, and secrets. Use ctx hub for + cross-project knowledge sharing. +
  • Export CTX_DIR in your shell profile so hooks and tools inherit the + path without per-command flags.
  • Commit both repos at session boundaries. Context without code history (or code without context history) loses half the value.
  • @@ -2450,8 +2553,7 @@

    See AlsoSetting Up ctx Across AI Tools: initial setup recipe
  • Syncing Scratchpad Notes Across Machines: distribute encryption keys when context is shared
  • -
  • CLI Reference: all global flags including - --context-dir and --allow-outside-cwd
  • +
  • CLI Reference: full command list and global options
  • diff --git a/site/recipes/guide-your-agent/index.html b/site/recipes/guide-your-agent/index.html index d4b768413..4bada8ea9 100644 --- a/site/recipes/guide-your-agent/index.html +++ b/site/recipes/guide-your-agent/index.html @@ -15,7 +15,7 @@ - + @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -713,6 +703,8 @@ + + @@ -765,6 +757,36 @@ + + +
  • + + + + + + + + + + Activating a Context Directory + + + + + + + + +
  • + + + + + + + + @@ -1596,6 +1618,8 @@ + + @@ -1764,7 +1788,7 @@
  • - + Getting Started @@ -1875,7 +1899,7 @@

    See Also - +

  • diff --git a/site/recipes/hook-output-patterns/index.html b/site/recipes/hook-output-patterns/index.html index 0adacc69f..4afd29968 100644 --- a/site/recipes/hook-output-patterns/index.html +++ b/site/recipes/hook-output-patterns/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1817,6 +1809,8 @@ + + @@ -2332,7 +2326,6 @@

    Pattern 2: VERBATIM Relayctx system check-context-size: Context capacity warning
  • ctx system check-resources: Resource pressure (memory, swap, disk, load): DANGER only
  • ctx system check-freshness: Technology constant staleness warning
  • -
  • check-backup-age.sh: Stale backup warning (project-local)
  • Trade-off: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or diff --git a/site/recipes/hook-sequence-diagrams/index.html b/site/recipes/hook-sequence-diagrams/index.html index 7a49f28ff..cf44adb27 100644 --- a/site/recipes/hook-sequence-diagrams/index.html +++ b/site/recipes/hook-sequence-diagrams/index.html @@ -248,13 +248,6 @@ - - - - - - -

  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + @@ -1260,17 +1252,6 @@ - - -
  • - - - - Check-Backup-Age - - - -
  • @@ -1973,34 +1954,6 @@

    Block-Dangerous-Commands

    -

    Check-Backup-Age

    -

    Lifecycle: UserPromptSubmit.

    -

    Daily check for SMB mount and backup freshness.

    -
    sequenceDiagram
    -    participant CC as Claude Code
    -    participant Hook as check-backup-age
    -    participant State as .context/state/
    -    participant FS as Filesystem
    -    participant Tpl as Message Template
    -
    -    CC->>Hook: stdin {session_id}
    -    Hook->>Hook: Check initialized + HookPreamble
    -    alt not initialized or paused
    -        Hook-->>CC: (silent exit)
    -    end
    -    Hook->>State: Check daily throttle marker
    -    alt throttled
    -        Hook-->>CC: (silent exit)
    -    end
    -    Hook->>FS: Check SMB mount (if env var set)
    -    Hook->>FS: Check backup marker file age
    -    alt no warnings
    -        Hook-->>CC: (silent exit)
    -    end
    -    Hook->>Tpl: LoadMessage(hook, warning, {Warnings})
    -    Hook-->>CC: Nudge box (warnings)
    -    Hook->>Hook: NudgeAndRelay(message)
    -    Hook->>State: Touch throttle marker

    Throttling Summary

    --context-dirGlobal flagPoint ctx at a non-default directoryctx activateCLI commandEmit export CTX_DIR=... for the shell
    --allow-outside-cwdGlobal flagPermit context outside the project rootCTX_DIREnv variableDeclare context directory per-session
    .ctxrc Config filePersist the context directory setting
    CTX_DIREnv variableOverride context directory per-sessionPer-project configuration
    /ctx-status
    @@ -2127,12 +2080,6 @@

    Throttling SummaryState File Reference @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - - @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1726,6 +1718,8 @@ + + diff --git a/site/recipes/hub-getting-started/index.html b/site/recipes/hub-getting-started/index.html index b8e3010f8..582d5aed3 100644 --- a/site/recipes/hub-getting-started/index.html +++ b/site/recipes/hub-getting-started/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1759,6 +1751,8 @@ + + diff --git a/site/recipes/hub-multi-machine/index.html b/site/recipes/hub-multi-machine/index.html index a794d5382..447868f46 100644 --- a/site/recipes/hub-multi-machine/index.html +++ b/site/recipes/hub-multi-machine/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1726,6 +1718,8 @@ + + diff --git a/site/recipes/hub-overview/index.html b/site/recipes/hub-overview/index.html index 05e1feaf0..958b81686 100644 --- a/site/recipes/hub-overview/index.html +++ b/site/recipes/hub-overview/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1743,6 +1735,8 @@ + + diff --git a/site/recipes/hub-personal/index.html b/site/recipes/hub-personal/index.html index 2b8420c04..3c7a96ec7 100644 --- a/site/recipes/hub-personal/index.html +++ b/site/recipes/hub-personal/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1754,6 +1746,8 @@ + + diff --git a/site/recipes/hub-team/index.html b/site/recipes/hub-team/index.html index ef32f7933..76c6f0658 100644 --- a/site/recipes/hub-team/index.html +++ b/site/recipes/hub-team/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1726,6 +1718,8 @@ + + diff --git a/site/recipes/import-plans/index.html b/site/recipes/import-plans/index.html index 8ab807b57..d03ff8dae 100644 --- a/site/recipes/import-plans/index.html +++ b/site/recipes/import-plans/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1780,6 +1772,8 @@ + + diff --git a/site/recipes/index.html b/site/recipes/index.html index b7411d99e..b1b1c68e8 100644 --- a/site/recipes/index.html +++ b/site/recipes/index.html @@ -15,10 +15,10 @@ - + - + @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1387,6 +1379,8 @@ + + @@ -1858,6 +1852,17 @@ + + +
  • + + + + Scrutinizing a Plan + + + +
  • @@ -2137,8 +2142,7 @@

    Keeping

    Store context files outside the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or multi-repo setups.

    -

    Uses: ctx init, --context-dir, --allow-outside-cwd, -.ctxrc, /ctx-status

    +

    Uses: ctx init, CTX_DIR, .ctxrc, /ctx-status


    Sessions

    The Complete Session

    @@ -2318,6 +2322,15 @@

    Design Before Codi

    Uses: /ctx-brainstorm, /ctx-spec, /ctx-task-add, /ctx-implement, /ctx-decision-add


    +

    Scrutinizing a Plan

    +

    Once a plan exists, run an adversarial interview to surface what's +weak, missing, or unexamined before you commit. Walks the plan +depth-first: assumptions, failure modes, alternatives, sequencing, +reversibility. The complement to brainstorm: brainstorm produces +plans, this attacks them.

    +

    Uses: /ctx-plan, /ctx-spec, /ctx-decision-add, +/ctx-learning-add

    +

    Agents and Automation

    Building Project Skills

    Encode repeating workflows into reusable skills the agent loads @@ -2465,7 +2478,7 @@

    ctx Hub: HA Clus

    - + diff --git a/site/recipes/session-pause/index.html b/site/recipes/session-pause/index.html index f541d1f66..d7bb54858 100644 --- a/site/recipes/session-pause/index.html +++ b/site/recipes/session-pause/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1681,6 +1673,8 @@ + + diff --git a/site/recipes/session-reminders/index.html b/site/recipes/session-reminders/index.html index de9f8e907..b04a7bdb7 100644 --- a/site/recipes/session-reminders/index.html +++ b/site/recipes/session-reminders/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1753,6 +1745,8 @@ + + diff --git a/site/recipes/state-maintenance/index.html b/site/recipes/state-maintenance/index.html index 007aee29d..502ac0ff1 100644 --- a/site/recipes/state-maintenance/index.html +++ b/site/recipes/state-maintenance/index.html @@ -248,13 +248,6 @@ - - - - - - -
  • @@ -266,9 +259,6 @@ - - - @@ -542,6 +532,10 @@ + + + + @@ -571,14 +565,12 @@ - - - - + + @@ -596,8 +588,6 @@ - -
  • @@ -905,6 +895,8 @@ + + diff --git a/site/recipes/steering/index.html b/site/recipes/steering/index.html index 71194a95f..52ca1447a 100644 --- a/site/recipes/steering/index.html +++ b/site/recipes/steering/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1765,6 +1757,8 @@ + + diff --git a/site/recipes/system-hooks-audit/index.html b/site/recipes/system-hooks-audit/index.html index 43b0ed39e..9f3cdf9f2 100644 --- a/site/recipes/system-hooks-audit/index.html +++ b/site/recipes/system-hooks-audit/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -2006,6 +1998,8 @@ + + diff --git a/site/recipes/task-management/index.html b/site/recipes/task-management/index.html index 4fb8b5e3c..f70b6703d 100644 --- a/site/recipes/task-management/index.html +++ b/site/recipes/task-management/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1896,6 +1888,8 @@ + + diff --git a/site/recipes/triggers/index.html b/site/recipes/triggers/index.html index 7c9b4397e..5c896dfec 100644 --- a/site/recipes/triggers/index.html +++ b/site/recipes/triggers/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1765,6 +1757,8 @@ + + diff --git a/site/recipes/troubleshooting/index.html b/site/recipes/troubleshooting/index.html index c84c295ab..179d1f960 100644 --- a/site/recipes/troubleshooting/index.html +++ b/site/recipes/troubleshooting/index.html @@ -252,13 +252,6 @@ - - - - - - -
  • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
  • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1243,6 +1235,17 @@
    Work--(none) "Only change files in internal/cache/"
    + + + + + + + + + + + + + + + + + + + + + + + + +
    SurfaceSource of CTX_DIRBound when
    Claude Code hooks${CLAUDE_PROJECT_DIR}/.context (injected)Every hook line; the project Claude is in
    !-pragma in chat / interactive shellWhatever the parent shell exportedWhen you ran eval "$(ctx activate)"
    New shell tab opened mid-sessionWhatever your shellrc exportsLogin
    +

    When these drift, the per-prompt check-anchor-drift hook fires a +verbatim warning naming both values. To fix: re-run +eval "$(ctx activate)" from inside the project the Claude Code +session is editing, or close the shell tab and reopen it from the +right working directory.

    "My Hook Isn't Firing"

    Symptoms: No nudges appearing, webhook silent, event log shows no entries for the expected hook.

    Diagnosis:

    -
    # 1. Check if ctx is installed and on PATH
    -which ctx && ctx --version
    -
    -# 2. Check if the hook is registered
    -grep "check-persistence" ~/.claude/plugins/ctx/hooks.json
    -
    -# 3. Run the hook manually to see if it errors
    -echo '{"session_id":"test"}' | ctx system check-persistence
    -
    -# 4. Check event log for the hook (if enabled)
    -ctx hook event --hook check-persistence
    +
    # 1. Check if ctx is installed and on PATH
    +which ctx && ctx --version
    +
    +# 2. Check if the hook is registered
    +grep "check-persistence" ~/.claude/plugins/ctx/hooks.json
    +
    +# 3. Run the hook manually to see if it errors
    +echo '{"session_id":"test"}' | ctx system check-persistence
    +
    +# 4. Check event log for the hook (if enabled)
    +ctx hook event --hook check-persistence
     

    Common causes:

      @@ -2342,12 +2432,12 @@

      "Too Many Nudges"
      # Check how often hooks fired recently
      -ctx hook event --last 50
      -
      -# Count fires per hook
      -ctx hook event --json | jq -r '.detail.hook // "unknown"' \
      -  | sort | uniq -c | sort -rn
      +
      # Check how often hooks fired recently
      +ctx hook event --last 50
      +
      +# Count fires per hook
      +ctx hook event --json | jq -r '.detail.hook // "unknown"' \
      +  | sort | uniq -c | sort -rn
       

      Common causes:

        @@ -2368,14 +2458,14 @@

        "Context Seems Stale"Symptoms: The agent references outdated information, paths that don't exist, or decisions that were reversed.

        Diagnosis:

        -
        # Structural drift check
        -ctx drift
        -
        -# Full doctor check (includes drift + more)
        -ctx doctor
        -
        -# Check when context files were last modified
        -ctx status --verbose
        +
        # Structural drift check
        +ctx drift
        +
        +# Full doctor check (includes drift + more)
        +ctx doctor
        +
        +# Check when context files were last modified
        +ctx status --verbose
         

        Common causes:

          @@ -2393,11 +2483,11 @@

          "The Agent Isn't Following In

          Symptoms: The agent ignores conventions, forgets decisions, or acts contrary to CONSTITUTION.md rules.

          Diagnosis:

          -
          # Check context token size: Is it too large for the model?
          -ctx doctor --json | jq '.results[] | select(.name == "context_size")'
          -
          -# Check if context is actually being loaded
          -ctx hook event --hook context-load-gate
          +
          # Check context token size: Is it too large for the model?
          +ctx doctor --json | jq '.results[] | select(.name == "context_size")'
          +
          +# Check if context is actually being loaded
          +ctx hook event --hook context-load-gate
           

          Common causes:

            diff --git a/site/recipes/webhook-notifications/index.html b/site/recipes/webhook-notifications/index.html index 1227df443..1a0d345c7 100644 --- a/site/recipes/webhook-notifications/index.html +++ b/site/recipes/webhook-notifications/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1784,6 +1776,8 @@ + + diff --git a/site/recipes/when-to-use-agent-teams/index.html b/site/recipes/when-to-use-agent-teams/index.html index 00fb52c84..9a725c21e 100644 --- a/site/recipes/when-to-use-agent-teams/index.html +++ b/site/recipes/when-to-use-agent-teams/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -712,6 +702,8 @@ + + @@ -732,7 +724,7 @@ - + @@ -1850,6 +1842,8 @@ + + diff --git a/site/reference/audit-conventions/index.html b/site/reference/audit-conventions/index.html index 699a262af..5f943e399 100644 --- a/site/reference/audit-conventions/index.html +++ b/site/reference/audit-conventions/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1501,6 +1491,8 @@ + + diff --git a/site/reference/comparison/index.html b/site/reference/comparison/index.html index d0078e4bc..ca293195a 100644 --- a/site/reference/comparison/index.html +++ b/site/reference/comparison/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1382,6 +1372,8 @@ + + diff --git a/site/reference/design-invariants/index.html b/site/reference/design-invariants/index.html index f52a247a8..3d12a7c28 100644 --- a/site/reference/design-invariants/index.html +++ b/site/reference/design-invariants/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1336,6 +1326,8 @@ + + diff --git a/site/reference/index.html b/site/reference/index.html index f0efb4b80..4b5171fe8 100644 --- a/site/reference/index.html +++ b/site/reference/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1143,6 +1133,8 @@ + + diff --git a/site/reference/scratchpad/index.html b/site/reference/scratchpad/index.html index aeacdf040..d68543bb3 100644 --- a/site/reference/scratchpad/index.html +++ b/site/reference/scratchpad/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1349,6 +1339,8 @@ + + diff --git a/site/reference/session-journal/index.html b/site/reference/session-journal/index.html index 9c03e44ff..1cb5a95e2 100644 --- a/site/reference/session-journal/index.html +++ b/site/reference/session-journal/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1460,6 +1450,8 @@ + + @@ -2313,13 +2305,13 @@

            Full PipelineRebuild ctx journal site --build Generates static HTML site --- +(never) Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks --- +(never) diff --git a/site/reference/skills/index.html b/site/reference/skills/index.html index e23ee32ef..cfbf10f13 100644 --- a/site/reference/skills/index.html +++ b/site/reference/skills/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1782,6 +1772,8 @@ + + diff --git a/site/reference/versions/index.html b/site/reference/versions/index.html index b46b8ade4..d4a61c217 100644 --- a/site/reference/versions/index.html +++ b/site/reference/versions/index.html @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -1316,6 +1306,8 @@ + + diff --git a/site/search.json b/site/search.json index 26bb8375c..f3b845bb2 100644 --- a/site/search.json +++ b/site/search.json @@ -1 +1 @@ -{"config":{"separator":"[\\s\\-_,:!=\\[\\]()\\\\\"`/]+|\\.(?!\\d)"},"items":[{"location":"","level":1,"title":"Manifesto","text":"","path":["Manifesto"],"tags":[]},{"location":"#the-ctx-manifesto","level":1,"title":"The ctx Manifesto","text":"

            Creation, not code.

            Context, not prompts.

            Verification, not vibes.

            This Is NOT a Metaphor

            Code executes instructions.

            Creation produces outcomes.

            Confusing the two is how teams ship motion...

            ...instead of progress.

            • It was never about the code.
            • Code has zero standalone value.
            • Code is an implementation detail.

            Code is an incantation.

            Creation is the act.

            And creation does not happen in a vacuum.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-the-substrate","level":2,"title":"ctx Is the Substrate","text":"

            Constraints Have Moved

            Human bandwidth is no longer the limiting factor.

            Context integrity is.

            Human bandwidth is no longer the constraint.

            Context is:

            • Without durable context, intelligence resets.
            • Without memory, reasoning decays.
            • Without structure, scale collapses.

            Creation is now limited by:

            • Clarity of intent;
            • Quality of context;
            • Rigor of verification.

            Not by speed.

            Not by capacity.

            Velocity Amplifies

            Faster execution on broken context compounds error.

            Speed multiplies whatever is already wrong.

            ","path":["Manifesto"],"tags":[]},{"location":"#humans-author-meaning","level":2,"title":"Humans Author Meaning","text":"

            Intent Is Authored

            Systems can optimize.

            Models can generalize.

            Meaning must be chosen.

            Intent is not emergent.

            Vision, goals, and direction are human responsibilities.

            We decide:

            • What matters;
            • What success means;
            • What world we are building.

            ctx encodes the intent so it...

            • survives time,
            • survives handoffs,
            • survives scale.

            Nothing important should live only in conversation.

            Nothing critical should depend on recall.

            Oral Tradition Does Not Scale

            If intent cannot be inspected, it cannot be enforced.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-before-action","level":2,"title":"ctx Before Action","text":"

            Orientation Precedes Motion

            Acting first and understanding later is not bravery.

            It is debt.

            Never act without ctx.

            Before execution, we must verify:

            • Where we are;
            • Why we are here;
            • What constraints apply;
            • What assumptions are active.

            Action without ctx is gambling.

            Speed without orientation is noise.

            ctx is not overhead: It is the cost of correctness.

            ","path":["Manifesto"],"tags":[]},{"location":"#persistent-context-beats-prompt-memory","level":2,"title":"Persistent Context Beats Prompt Memory","text":"

            Transience Is the Default Failure Mode

            • Prompts decay.
            • Chats fragment.
            • Memory heuristics drift.

            Prompts are transient.

            Chats are lossy.

            Memory heuristics drift.

            ctx must be:

            • Durable;
            • Structured;
            • Explicit;
            • Queryable.

            Intent Must Be Intentional

            If intent exists only in a prompt...

            ...alignment is already degrading.

            Knowledge lives in the artifacts:

            • Decisions;
            • Documentation;
            • Dependency maps;
            • Evaluation history.

            Artifacts Outlive Sessions

            What is not written will be re-learned.

            At full cost.

            ","path":["Manifesto"],"tags":[]},{"location":"#what-ctx-is-not","level":2,"title":"What ctx Is Not","text":"

            Avoid Category Errors

            Mislabeling ctx guarantees misuse.

            ctx is not a memory feature.

            • ctx is not prompt engineering.
            • ctx is not a productivity hack.
            • ctx is not automation theater.

            ctx is a system for preserving intent under scale.

            ctx is infrastructure.

            ","path":["Manifesto"],"tags":[]},{"location":"#verified-reality-is-the-scoreboard","level":2,"title":"Verified Reality Is the Scoreboard","text":"

            Activity Is a False Proxy

            Output volume correlates poorly with impact.

            • Code is not progress.
            • Activity is not impact.

            The only truth that compounds is verified change.

            Verified change must exist in the real world.

            Hypotheses are cheap; outcomes are not.

            ctx captures:

            • What we expected;
            • What we observed;
            • Where reality diverged.

            If we cannot predict, measure, and verify the result...

            ...it does not count.

            ","path":["Manifesto"],"tags":[]},{"location":"#build-to-learn-not-to-accumulate","level":2,"title":"Build to Learn, Not to Accumulate","text":"

            Prototypes Have an Expiration Date

            A prototype's value is information, not longevity.

            Prototypes exist to reduce uncertainty.

            We build to:

            • Test assumptions;
            • Validate architecture;
            • Answer specific questions.

            Not everything.

            Not blindly.

            Not permanently.

            ctx records archeology so the cost is paid once.

            ","path":["Manifesto"],"tags":[]},{"location":"#failures-are-assets","level":2,"title":"Failures Are Assets","text":"

            Failure without Capture Is Waste

            Pain that does not teach is pure loss.

            Failures are not erased: They are preserved.

            Each failure becomes:

            • A documented hypothesis;
            • An analyzed deviation;
            • A permanent artifact.

            Rollback fixes symptoms: ctx fixes systems.

            A repeated mistake is a missing ctx artifact.

            ","path":["Manifesto"],"tags":[]},{"location":"#structure-enables-scale","level":2,"title":"Structure Enables Scale","text":"

            Unbounded Autonomy Destabilizes

            Power without a structure produces chaos.

            Transpose it:

            Power without any structure becomes chaos.

            ctx defines:

            • Roles;
            • Boundaries;
            • Protocols;
            • Escalation paths;
            • Decision rights.

            Ambiguity is a system failure:

            • Debates must be structured.
            • Decisions must be explicit.
            • History must be retained.
            ","path":["Manifesto"],"tags":[]},{"location":"#encode-intent-into-the-environment","level":2,"title":"Encode Intent into the Environment","text":"

            Goodwill Does Not Belong to the Table

            Alignment that depends on memory will drift.

            Alignment cannot depend on memory or goodwill.

            Do not rely on people to remember.

            Encode the behavior, so it happens by default.

            Intent is encoded as:

            • Policies;
            • Schemas;
            • Constraints;
            • Evaluation harnesses.

            Rules must be machine-readable.

            Laws must be enforceable.

            If intent is implicit, drift is guaranteed.

            ","path":["Manifesto"],"tags":[]},{"location":"#cost-is-a-first-class-signal","level":2,"title":"Cost Is a First-Class Signal","text":"

            Attention Is the Scarcest Resource

            Not ideas.

            Not ambition.

            Ideas do not compete on time:

            They compete on cost and impact:

            • Attention is finite.
            • Compute is finite.
            • Context is expensive.

            We continuously ask:

            • What the most valuable next action is.
            • What outcome justifies the cost.

            ctx guides allocation.

            Learning reshapes priority.

            ","path":["Manifesto"],"tags":[]},{"location":"#show-the-why","level":2,"title":"Show the Why","text":"

            {} (code, artifacts, apps, binaries) produce outputs; they do not preserve reasoning.

            Systems that cannot explain themselves will not be trusted.

            Traceability builds trust.

                 {} --> what\n\n    ctx --> why\n

            We record:

            • Explored paths;
            • Rejected options;
            • Assumptions made;
            • Evidence used.

            Opaque systems erode trust:

            Transparent ctx compounds understanding.

            ","path":["Manifesto"],"tags":[]},{"location":"#continuously-verify-the-system","level":2,"title":"Continuously Verify the System","text":"

            Stability Is Temporary

            Every assumption has a half-life:

            • Models drift.
            • Tools change.
            • Assumptions rot.

            ctx must be verified against reality.

            Trust is a spectrum.

            Trust is continuously re-earned:

            • Benchmarks,
            • regressions,
            • and evaluations...

            ...are safety rails.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-leverage","level":2,"title":"ctx Is Leverage","text":"

            Humans Are Decision Engines

            Execution should not consume judgment.

            Humans must not be typists.

            We are the authors.

            Human effort is reserved for:

            • Judgment;
            • Design;
            • Taste;
            • Synthesis.

            Repetition is delegated.

            Toil is automated.

            ctx preserves leverage across time.

            ","path":["Manifesto"],"tags":[]},{"location":"#the-thesis","level":2,"title":"The Thesis","text":"

            Invariant

            Everything else is an implementation detail.

            • Creation is the act.
            • ctx is the substrate.
            • Verification is the truth.

            Code executes → Models reason → Agents amplify.

            ctx lives on.

            • Without ctx, intelligence resets.
            • With ctx, creation compounds.
            ","path":["Manifesto"],"tags":[]},{"location":"blog/","level":1,"title":"Blog","text":"

            Stories, insights, and lessons learned from building and using ctx.

            ","path":["Blog"],"tags":[]},{"location":"blog/#releases","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v080-the-architecture-release","level":3,"title":"ctx v0.8.0: The Architecture Release","text":"

            March 23, 2026: 374 commits, 1,708 Go files touched, and a near-complete architectural overhaul. Every CLI package restructured into cmd/ + core/ taxonomy, all user-facing strings externalized to YAML, MCP server for tool-agnostic AI integration, and the memory bridge connecting Claude Code's auto-memory to .context/.

            Topics: release, architecture, refactoring, MCP, localization

            ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes","level":2,"title":"Field Notes","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-watermelon-rind-anti-pattern-why-smarter-tools-make-shallower-agents","level":3,"title":"The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents","text":"

            April 6, 2026: Give an agent a graph query tool, and it produces output that's structurally correct but substantively hollow (the watermelon-rind antipattern: We ran three sessions analyzing the same codebase with different tool access: the one with no tools produced 5.2x more depth. The fix: a two-pass compiler for architecture understanding: force code reading first, verify with tools second. Constraint is the feature.

            Topics: architecture, code intelligence, agent behavior, design patterns, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#code-structure-as-an-agent-interface-what-19-ast-tests-taught-us","level":3,"title":"Code Structure as an Agent Interface: What 19 AST Tests Taught Us","text":"

            April 2, 2026: We built 19 AST-based audit tests in a single session, touching 300+ files. In the process we discovered that \"old-school\" code quality constraints (no magic numbers, centralized error handling, 80-char lines, documentation) are exactly the constraints that make code readable to AI agents. If an agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

            Topics: ast, code quality, agent readability, conventions, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#we-broke-the-31-rule","level":3,"title":"We Broke the 3:1 Rule","text":"

            March 23, 2026: After v0.6.0, we ran 198 feature commits across 17 days before consolidating. The 3:1 rule says consolidate every 4th session. We did it after the 66th. The result: an 18-day, 181-commit cleanup marathon that took longer than the feature run itself. A follow-up to The 3:1 Ratio with empirical evidence from the v0.8.0 cycle.

            Topics: consolidation, technical debt, development workflow, convention drift, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#context-engineering","level":2,"title":"Context Engineering","text":"","path":["Blog"],"tags":[]},{"location":"blog/#agent-memory-is-infrastructure","level":3,"title":"Agent Memory Is Infrastructure","text":"

            March 4, 2026: Every AI coding agent starts fresh. The obvious fix is \"memory.\" But there's a different problem memory doesn't touch: the project itself accumulates knowledge that has nothing to do with any single session. This post argues that agent memory is L2 (runtime cache); what's missing is L3 (project infrastructure).

            Topics: context engineering, agent memory, infrastructure, persistence, team knowledge

            ","path":["Blog"],"tags":[]},{"location":"blog/#context-as-infrastructure","level":3,"title":"Context as Infrastructure","text":"

            February 17, 2026: Where does your AI's knowledge live between sessions? If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. This post argues for treating it as infrastructure instead: persistent files, separation of concerns, two-tier storage, progressive disclosure, and the filesystem as the most mature interface available.

            Topics: context engineering, infrastructure, progressive disclosure, persistence, design philosophy

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-attention-budget-why-your-ai-forgets-what-you-just-told-it","level":3,"title":"The Attention Budget: Why Your AI Forgets What You Just Told It","text":"

            February 3, 2026: Every token you send to an AI consumes a finite resource: the attention budget. Understanding this constraint shaped every design decision in ctx: hierarchical file structure, explicit budgets, progressive disclosure, and filesystem-as-index.

            Topics: attention mechanics, context engineering, progressive disclosure, ctx primitives, token budgets

            ","path":["Blog"],"tags":[]},{"location":"blog/#before-context-windows-we-had-bouncers","level":3,"title":"Before Context Windows, We Had Bouncers","text":"

            February 14, 2026: IRC is stateless. You disconnect, you vanish. Modern systems are not much different. This post traces the line from IRC bouncers to context engineering: stateless protocols require stateful wrappers, volatile interfaces require durable memory.

            Topics: context engineering, infrastructure, IRC, persistence, state continuity

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-last-question","level":3,"title":"The Last Question","text":"

            February 28, 2026: In 1956, Asimov wrote a story about a question that spans the entire future of the universe. A reading of \"The Last Question\" through the lens of persistence, substrate migration, and what it means to build systems where sessions don't reset.

            Topics: context continuity, long-lived systems, persistence, intelligence over time, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#agent-behavior-and-design","level":2,"title":"Agent Behavior and Design","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-dog-ate-my-homework-teaching-ai-agents-to-read-before-they-write","level":3,"title":"The Dog Ate My Homework: Teaching AI Agents to Read Before They Write","text":"

            February 25, 2026: You wrote the playbook. The agent skipped all of it. Five sessions, five failure modes, and the discovery that observable compliance beats perfect compliance.

            Topics: hooks, agent behavior, context engineering, behavioral design, testing methodology, compliance monitoring

            ","path":["Blog"],"tags":[]},{"location":"blog/#skills-that-fight-the-platform","level":3,"title":"Skills That Fight the Platform","text":"

            February 4, 2026: When custom skills conflict with system prompt defaults, the AI has to reconcile contradictory instructions. Five conflict patterns discovered while building ctx.

            Topics: context engineering, skill design, system prompts, antipatterns, AI safety primitives

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-anatomy-of-a-skill-that-works","level":3,"title":"The Anatomy of a Skill That Works","text":"

            February 7, 2026: I had 20 skills. Most were well-intentioned stubs. Then I rewrote all of them. Seven lessons emerged: quality gates prevent premature execution, negative triggers are load-bearing, examples set boundaries better than rules.

            Topics: skill design, context engineering, quality gates, E/A/R framework, practical patterns

            ","path":["Blog"],"tags":[]},{"location":"blog/#you-cant-import-expertise","level":3,"title":"You Can't Import Expertise","text":"

            February 5, 2026: I found a well-crafted consolidation skill. Applied my own E/A/R framework: 70% was noise. This post is about why good skills can't be copy-pasted, and how to grow them from your project's own drift history.

            Topics: skill adaptation, E/A/R framework, convention drift, consolidation, project-specific expertise

            ","path":["Blog"],"tags":[]},{"location":"blog/#not-everything-is-a-skill","level":3,"title":"Not Everything Is a Skill","text":"

            February 8, 2026: I ran an 8-agent codebase audit and got actionable results. The natural instinct was to wrap the prompt as a skill. Then I applied my own criteria: it failed all three tests.

            Topics: skill design, context engineering, automation discipline, recipes, agent teams

            ","path":["Blog"],"tags":[]},{"location":"blog/#defense-in-depth-securing-ai-agents","level":3,"title":"Defense in Depth: Securing AI Agents","text":"

            February 9, 2026: The security advice was \"use CONSTITUTION.md for guardrails.\" That is wishful thinking. Five defense layers for unattended AI agents, each with a bypass, and why the strength is in the combination.

            Topics: agent security, defense in depth, prompt injection, autonomous loops, container isolation

            ","path":["Blog"],"tags":[]},{"location":"blog/#development-practice","level":2,"title":"Development Practice","text":"","path":["Blog"],"tags":[]},{"location":"blog/#code-is-cheap-judgment-is-not","level":3,"title":"Code Is Cheap. Judgment Is Not.","text":"

            February 17, 2026: AI does not replace workers. It replaces unstructured effort. Three weeks of building ctx with an AI agent proved it: YOLO mode showed production is cheap, the 3:1 ratio showed judgment has a cadence.

            Topics: AI and expertise, context engineering, judgment vs production, human-AI collaboration, automation discipline

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-31-ratio","level":3,"title":"The 3:1 Ratio","text":"

            February 17, 2026: AI makes technical debt worse: not because it writes bad code, but because it writes code so fast that drift accumulates before you notice. Three feature sessions, one consolidation session.

            Topics: consolidation, technical debt, development workflow, convention drift, code quality

            ","path":["Blog"],"tags":[]},{"location":"blog/#refactoring-with-intent-human-guided-sessions-in-ai-development","level":3,"title":"Refactoring with Intent: Human-Guided Sessions in AI Development","text":"

            February 1, 2026: The YOLO mode shipped 14 commands in a week. But technical debt doesn't send invoices. This is the story of what happened when we started guiding the AI with intent.

            Topics: refactoring, code quality, documentation standards, module decomposition, YOLO versus intentional development

            ","path":["Blog"],"tags":[]},{"location":"blog/#how-deep-is-too-deep","level":3,"title":"How Deep Is Too Deep?","text":"

            February 12, 2026: I kept feeling like I should go deeper into ML theory. Then I spent a week debugging an agent failure that had nothing to do with model architecture. When depth compounds and when it doesn't.

            Topics: AI foundations, abstraction boundaries, agentic systems, context engineering, failure modes

            ","path":["Blog"],"tags":[]},{"location":"blog/#agent-workflows","level":2,"title":"Agent Workflows","text":"","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-merge-debt-and-the-myth-of-overnight-progress","level":3,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"

            February 17, 2026: You discover agents can run in parallel. So you open ten terminals. It is not progress: it is merge debt being manufactured in real time. The five-agent ceiling and why role separation beats file locking.

            Topics: agent workflows, parallelism, verification, context engineering, engineering practice

            ","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-with-git-worktrees","level":3,"title":"Parallel Agents with Git Worktrees","text":"

            February 14, 2026: I had 30 open tasks that didn't touch the same files. Using git worktrees to partition a backlog by file overlap, run 3-4 agents simultaneously, and merge the results.

            Topics: agent teams, parallelism, git worktrees, context engineering, task management

            ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes-and-signals","level":2,"title":"Field Notes and Signals","text":"","path":["Blog"],"tags":[]},{"location":"blog/#when-a-system-starts-explaining-itself","level":3,"title":"When a System Starts Explaining Itself","text":"

            February 17, 2026: Every new substrate begins as a private advantage. Reality begins when other people start describing it in their own language. \"Better than Adderall\" is not praise; it is a diagnostic.

            Topics: field notes, adoption signals, infrastructure vs tools, context engineering, substrates

            ","path":["Blog"],"tags":[]},{"location":"blog/#why-zensical","level":3,"title":"Why Zensical","text":"

            February 15, 2026: I needed a static site generator for the journal system. The instinct was Hugo. But instinct is not analysis. Why zensical was the right choice: thin dependencies, MkDocs-compatible config, and zero lock-in.

            Topics: tooling, static site generators, journal system, infrastructure decisions, context engineering

            ","path":["Blog"],"tags":[]},{"location":"blog/#releases_1","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v060-the-integration-release","level":3,"title":"ctx v0.6.0: The Integration Release","text":"

            February 16, 2026: ctx is now a Claude Marketplace plugin. Two commands, no build step, no shell scripts. v0.6.0 replaces six Bash hook scripts with compiled Go subcommands and ships 25+ Skills as a plugin.

            Topics: release, plugin system, Claude Marketplace, distribution, security hardening

            ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v030-the-discipline-release","level":3,"title":"ctx v0.3.0: The Discipline Release","text":"

            February 15, 2026: No new headline feature. Just 35+ documentation and quality commits against ~15 feature commits. What a release looks like when the ratio of polish to features is 3:1.

            Topics: release, skills migration, consolidation, code quality, E/A/R framework

            ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v020-the-archaeology-release","level":3,"title":"ctx v0.2.0: The Archaeology Release","text":"

            February 1, 2026: What if your AI could remember everything? Not just the current session, but every session. ctx v0.2.0 introduces the recall and journal systems.

            Topics: session recall, journal system, structured entries, token budgets, meta-tools

            ","path":["Blog"],"tags":[]},{"location":"blog/#building-ctx-using-ctx-a-meta-experiment-in-ai-assisted-development","level":3,"title":"Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development","text":"

            January 27, 2026: What happens when you build a tool designed to give AI memory, using that very same tool to remember what you're building? This is the story of ctx.

            Topics: dogfooding, AI-assisted development, Ralph Loop, session persistence, architectural decisions

            ","path":["Blog"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/","level":1,"title":"Building ctx Using ctx","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            References to .context/sessions/, auto-save hooks, and SessionEnd auto-save in this post reflect the architecture at the time of writing.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#a-meta-experiment-in-ai-assisted-development","level":2,"title":"A Meta-Experiment in AI-Assisted Development","text":"

            Jose Alekhinne / 2026-01-27

            Can a Tool Design Itself?

            What happens when you build a tool designed to give AI memory, using that very same tool to remember what you are building?

            This is the story of ctx, how it evolved from a hasty \"YOLO mode\" experiment to a disciplined system for persistent AI context, and what I have learned along the way.

            Context Is a Record

            Context is a persistent record.

            By \"context\", I don't mean model memory or stored thoughts:

            I mean the durable record of decisions, learnings, and intent that normally evaporates between sessions.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#ai-amnesia","level":2,"title":"AI Amnesia","text":"

            Every developer who works with AI code generators knows the frustration:

            You have a deep, productive session where the AI understands your codebase, your conventions, your decisions. And then you close the terminal.

            Tomorrow; it's a blank slate. The AI has forgotten everything.

            That is \"reset amnesia\", and it's not just annoying: it's expensive.

            Every session starts with:

            • Re-explaining context;
            • Re-reading files;
            • Re-discovering decisions that were already made.

            I Needed Context

            \"I don't want to lose this discussion...

            ...I am a brain-dead developer YOLO'ing my way out.\"

            ☝️ that's exactly what I said to Claude when I first started working on ctx.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-genesis","level":2,"title":"The Genesis","text":"

            The project started as \"Active Memory\" (amem): a CLI tool to persist AI context across sessions.

            The core idea was simple:

            1. Create a .context/ directory with structured Markdown files for decisions, learnings, tasks, and conventions.
            2. The AI reads these at session start and writes to them before the session ends.
            3. There is no step 3.

            The first commit was just scaffolding. But within hours, the Ralph Loop (An iterative AI development workflow) had produced a working CLI:

            feat(cli): implement amem init command\nfeat(cli): implement amem status command\nfeat(cli): implement amem add command\nfeat(cli): implement amem agent command\n...\n

            Not one, not two, but a whopping fourteen core commands shipped in rapid succession!

            I was YOLO'ing like there was no tomorrow:

            • Auto-accept every change;
            • Let the AI run free;
            • Ship features fast.
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-meta-experiment-using-amem-to-build-amem","level":2,"title":"The Meta-Experiment: Using amem to Build amem","text":"

            Here's where it gets interesting: On January 20th, I asked:

            \"Can I use amem to help you remember this context when I restart?\"

            The answer was yes, but with a gap:

            Autoload worked (via Claude Code's PreToolUse hook), but auto-save was missing: If the user quit, with Ctrl+C, everything since the last manual save was lost.

            That session became the first real test of the system.

            Here is the first session file we recorded:

            ## Key Discussion Points\n\n### 1. amem vs Ralph Loop - They're Separate Systems\n\n**User's question**: \"How do I use the binary to recreate this project?\"\n\n**Answer discovered**: `amem` is for context management, Ralph Loop is for \ndevelopment workflow. They are complementary but separate.\n\n### 2. Two Tiers of Context Persistence\n\n| Tier      | What                        | Why                           |\n|-----------|-----------------------------|-------------------------------|\n| Curated   | Learnings, decisions, tasks | Quick reload, token-efficient |\n| Full dump | Entire conversation         | Safety net, nothing lost      |\n\n| Where                  |\n|------------------------|\n| .context/*.md          |\n| .context/sessions/*.md |\n

            This session file (written by the AI to preserve its own context) became the template for how ctx handles session persistence.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-rename","level":2,"title":"The Rename","text":"

            By January 21st, I realized \"Active Memory\" was too generic, and (arguably) too marketing-smelly.

            Besides, the binary was already called ctx (short for Context), the directory was .context/, and the slash commands would be /ctx-*.

            So it followed that the project should be renamed to ctx to make things make sense.

            The rename touched 100+ files but was clean: a find-and-replace with Go's type system catching any misses.

            The git history tells the story:

            0e8f6bb feat: rename amem to ctx and add Claude Code integration\n87dcfa1 README.\n4f0e195 feat: separate orchestrator directive from agent tasks\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#yolo-mode-fast-but-dangerous","level":2,"title":"YOLO Mode: Fast, but Dangerous","text":"

            The Ralph Loop made feature development incredibly fast.

            But it created technical debt that I didn't notice until later.

            A comparison session on January 25th revealed the patterns:

            YOLO Pattern What We Found \"TASKS.md\" scattered in 10 files Same string literal everywhere, no constants dir + \"/\" + file Should be filepath.Join() Monolithic cli_test.go (1500+ lines) Tests disconnected from implementations package initcmd in init/ folder Go's \"init\" is reserved: subtle naming collision

            Here is another analysis made by ctx:

            ● Based on my analysis, here are the key differences:\n\nYOLO Mode (Pre-040ce99)\n- Feature-first: Added slash commands, tests, templates rapidly\n- Scattered magic strings: \"TASKS.md\", \"decision\", \".context\" spread across files\n- Quick file creation: New files without organizational patterns\n- Working but inconsistent: Code functioned but lacked systematic structure\n\nHuman-Guided Mode (Post-040ce99)\n- Consolidation focus: Centralized constants in config package\n- Consistent naming: Dir, File, Filename, UpdateType prefixes\n- Self-referential constants: FileType map uses constants as keys, not literals\n- Proper path construction: filepath.Join() instead of +\"/\"+\n- Colocated tests: Tests next to implementations\n- Canonical naming: Package name = folder name\n

            The fix required a human-guided refactoring session. I continued to do that before every major release, from that point on.

            We introduced internal/config/config.go with semantic prefixes:

            const (\n    DirContext     = \".context\"\n    DirArchive     = \"archive\"\n    DirSessions    = \"sessions\"\n    FilenameTask   = \"TASKS.md\"\n    UpdateTypeTask = \"task\"\n)\n

            What I begrudgingly learned was: YOLO mode is effective for velocity but accumulates debt.

            So I took a mental note to schedule periodic consolidation sessions.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-dogfooding-test-that-failed","level":2,"title":"The Dogfooding Test That Failed","text":"

            On January 21st, I ran an experiment: have another Claude instance rebuild ctx from scratch using only the specs and PROMPT.md.

            The Ralph Loop ran, all tasks got checked off, the loop exited successfully.

            But the binary was broken!

            Commands just printed help text instead of executing.

            All tasks were marked \"complete\" but the implementation didn't work.

            Here's what ctx discovered:

            ## Key Findings\n\n### Dogfooding Binary Is Broken\n- Commands don't execute: they just print root help text\n- All tasks were marked complete but binary doesn't work\n- Lesson: \"tasks checked off\" ≠ \"implementation works\"\n

            This was humbling; to say the least.

            I realized I had the same blind spot in my own codebase: no integration tests that actually invoked the binary.

            So I added:

            • Integration tests for all commands;
            • Coverage targets (60-80% per package)
            • Smoke tests in CI
            • A constitution rule: \"All code must pass tests before commit\"
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-constitution-versus-conventions","level":2,"title":"The Constitution versus Conventions","text":"

            As lessons accumulated, there was the temptation to add everything to CONSTITUTION.md as \"inviolable rules\".

            But I resisted.

            The constitution should contain only truly inviolable invariants:

            • Security (no secrets, no customer data)
            • Quality (tests must pass)
            • Process (decisions need records)
            • ctx invocation (always use PATH, never fallback)

            Everything else (coding style, file organization, naming conventions...) should go in to CONVENTIONS.md.

            Here's how ctx explained why the distinction was important:

            Decision Record, 2026-01-25

            Overly strict constitution creates friction and gets ignored.

            Conventions can be bent; constitution cannot.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#hooks-harder-than-they-look","level":2,"title":"Hooks: Harder than They Look","text":"

            Claude Code hooks seemed simple: Run a script before/after certain events.

            But I hit multiple gotchas:

            1. Key names matter

            // WRONG - \"Invalid key in record\" error\n\"PreToolUseHooks\": [...]\n\n// RIGHT\n\"PreToolUse\": [...]\n

            2. Blocking requires specific output

            # WRONG - just exits, doesn't block\nexit 1\n\n# RIGHT - JSON output + exit 0\necho '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH\"}'\nexit 0\n

            3. Go's JSON escaping

            json.Marshal escapes >, <, & as unicode (\\u003e) by default.

            When generating shell commands in JSON:

            encoder := json.NewEncoder(file)\nencoder.SetEscapeHTML(false) // Prevent 2>/dev/null → 2\\u003e/dev/null\n

            4. Regex overfitting

            My hook to block non-PATH ctx invocations initially matched too broadly:

            # WRONG - matches /home/user/ctx/internal/file.go (ctx as directory)\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# RIGHT - matches ctx as binary only\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-session-files","level":2,"title":"The Session Files","text":"

            By the time of this writing this project's ctx sessions (.context/sessions/) contains 40+ files from this project's development.

            They are not part of the source code due to security, privacy, and size concerns.

            Middle Ground: The Scratchpad

            For sensitive notes that do need to travel with the project, ctx pad stores encrypted one-liners in git, and ctx pad add \"label\" --file PATH can ingest small files.

            See Scratchpad for details.

            However, they are invaluable for the project's progress.

            Each session file is a timestamped Markdown with:

            • Summary of what has been accomplished;
            • Key decisions made;
            • Learnings discovered;
            • Tasks for the next session;
            • Technical context (platform, versions).

            These files are not autoloaded (that would bust the token budget).

            They are what I see as the \"archaeological record\" of ctx:

            When the AI needs deeper information about why something was done, it digs into the sessions.

            Auto-generated session files used a naming convention:

            2026-01-23-115432-session-prompt_input_exit-summary.md\n2026-01-25-220244-manual-save.md\n2026-01-27-052107-session-other-summary.md\n

            Update

            The session feature described here is historical.

            In current releases, ctx uses a journal instead: the enrichment process generates meaningful slugs from context automatically, so there is no need to manually save sessions.

            The SessionEnd hook captured transcripts automatically. Even Ctrl+C was caught.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-decision-log-18-architectural-decisions","level":2,"title":"The Decision Log: 18 Architectural Decisions","text":"

            ctx helps record every significant architectural choice in .context/DECISIONS.md.

            Here are some highlights:

            Reverse-chronological order (2026-01-27)

            **Context**: With chronological order, oldest items consume tokens first, and\nnewest (most relevant) items risk being truncated.\n\n**Decision**: Use reverse-chronological order (newest first) for DECISIONS.md\nand LEARNINGS.md.\n

            PATH over hardcoded paths (2026-01-21)

            **Context**: Original implementation hardcoded absolute paths in hooks.\nThis breaks when sharing configs with other developers.\n\n**Decision**: Hooks use `ctx` from PATH. `ctx init` checks PATH before \nproceeding.\n

            Generic core with Claude enhancements (2026-01-20)

            **Context**: ctx should work with any AI tool, but Claude Code users could\nbenefit from deeper integration.\n\n**Decision**: Keep ctx generic as the core tool, but provide optional\nClaude Code-specific enhancements.\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-learning-log-24-gotchas-and-insights","level":2,"title":"The Learning Log: 24 Gotchas and Insights","text":"

            The .context/LEARNINGS.md file captures gotchas that would otherwise be forgotten. Each has Context, Lesson, and Application sections:

            CGO on ARM64

            **Context**: `go test` failed with \n`gcc: error: unrecognized command-line option '-m64'`\n\n**Lesson**: On ARM64 Linux, CGO causes cross-compilation issues. \nAlways use `CGO_ENABLED=0`.\n

            Claude Code skills format

            **Lesson**: Claude Code skills are Markdown files in .claude/commands/ with `YAML`\nfrontmatter (*description, argument-hint, allowed-tools*). Body is the prompt.\n

            \"Do you remember?\" handling

            **Lesson**: In a `ctx`-enabled project, \"*do you remember?*\" \nhas an obvious meaning:\ncheck the `.context/` files. Don't ask for clarification. Just do it.\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#task-archives-the-completed-work","level":2,"title":"Task Archives: The Completed Work","text":"

            Completed tasks are archived to .context/archive/ with timestamps.

            The archive from January 23rd shows 13 phases of work:

            • Phase 1: Project Scaffolding (Go module, Cobra CLI)
            • Phase 2-4: Core Commands (init, status, agent, add, complete, drift, sync, compact, watch, hook)
            • Phase 5: Session Management (save, list, load, parse, --extract)
            • Phase 6: Claude Code Integration (hooks, settings, CLAUDE.md handling)
            • Phase 7: Testing & Verification
            • Phase 8: Task Archival
            • Phase 9: Slash Commands
            • Phase 9b: Ralph Loop Integration
            • Phase 10: Project Rename
            • Phase 11: Documentation
            • Phase 12: Timestamp Correlation
            • Phase 13: Rich Context Entries

            That's an impressive ^^173 commits** across 8 days of development.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#what-i-learned-about-ai-assisted-development","level":2,"title":"What I Learned about AI-Assisted Development","text":"

            1. Memory changes everything

            When the AI remembers decisions, it doesn't repeat mistakes.

            When the AI knows your conventions, it follows them.

            ctx makes the AI a better collaborator because it's not starting from zero.

            2. Two-tier persistence works

            Curated context (DECISIONS.md, LEARNINGS.md, TASKS.md) is for quick reload.

            Full session dumps are for archaeology.

            It's a futile effort to try to fit everything in the token budget.

            Persist more, load less.

            3. YOLO mode has its place

            For rapid prototyping, letting the AI run free is effective.

            But I had to schedule consolidation sessions.

            Technical debt accumulates silently.

            4. The constitution should be small

            Only truly inviolable rules go in CONSTITUTION.md. Everything else is a convention.

            If you put too much in the constitution, it will get ignored.

            5. Verification is non-negotiable

            \"All tasks complete\" means nothing if you haven't run the tests.

            Integration tests that invoke the actual binary caught bugs that the unit tests missed.

            6. Session files are underrated

            The ability to grep through 40 session files and find exactly when and why a decision was made helped me a lot.

            It's not about loading them into context: It is about having them when you need them.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-future-recall-system","level":2,"title":"The Future: Recall System","text":"

            The next phase of ctx is the Recall System:

            • Parser: Parse session capture markdowns, enrich with JSONL data
            • Renderer: Goldmark + Chroma for syntax highlighting, dark mode UI
            • Server: Local HTTP server for browsing sessions
            • Search: Inverted index for searching across sessions
            • CLI: ctx recall serve <path> to start the server

            The goal is to make the archaeological record browsable, not just grep-able.

            Because not everyone always lives in the terminal (me included).

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#conclusion","level":2,"title":"Conclusion","text":"

            Building ctx using ctx was a meta-experiment in AI-assisted development.

            I learned that memory isn't just convenient: It's transformative:

            • An AI that remembers your decisions doesn't repeat mistakes.
            • An AI that knows your conventions doesn't need them re-explained.

            If you are reading this, chances are that you already have heard about ctx.

            • ctx is open source at github.com/ActiveMemory/ctx,
            • and the documentation lives at ctx.ist.

            Session Records Are a Gold Mine

            By the time of this writing, I have more than 70 megabytes of text-only session capture, spread across >100 Markdown and JSONL files.

            I am analyzing, synthesizing, encriching them with AI, running RAG (Retrieval-Augmented Generation) models on them, and the outcome surprises me every day.

            If you are a mere mortal tired of reset amnesia, give ctx a try.

            And when you do, check .context/sessions/ sometime.

            The archaeological record might surprise you.

            This blog post was written with the help of ctx with full access to the ctx session files, decision log, learning log, task archives, and git history of ctx: The meta continues.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/","level":1,"title":"ctx v0.2.0: The Archaeology Release","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            The .context/sessions/ directory referenced in this post has been eliminated. Session history is now accessed via ctx recall and enriched journals live in .context/journal/.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#digging-through-the-past-to-build-the-future","level":2,"title":"Digging through the Past to Build the Future","text":"

            Jose Alekhinne / 2026-02-01

            What If Your AI Could Remember Everything?

            Not just the current session, but every session:

            • Every decision made,
            • every mistake avoided,
            • every path not taken.

            That's what v0.2.0 delivers.

            Between v0.1.2 and v0.2.0, 86 commits landed across 5 days.

            The release notes list features and fixes.

            This post tells the story of why those features exist, and what building them taught me.

            This isn't a changelog: It is an explanation of intent.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-problem-amnesia-isnt-just-session-level","level":2,"title":"The Problem: Amnesia Isn't Just Session-Level","text":"

            v0.1.0 solved reset amnesia:

            The AI now remembers decisions, learnings, and tasks across sessions.

            But a new problem emerged, which I can sum up as:

            \"I (the human) am not AI.\"

            Frankly, I couldn't remember what the AI remembered.

            Let alone, I cannot remember what I ate for breakfast!

            In the course of days, I realized session transcripts piled up in .context/sessions/; I was grepping, JSONL files with thousands of lines... Raw tool calls, assistant responses, user messages...

            ...all interleaved.

            Valuable context was effectively buried in machine-readable noise.

            I found myself grepping through files to answer questions like:

            • \"When did we decide to use constants instead of literals?\"
            • \"What was the session where we fixed the hook regex?\"
            • \"How did the embed.go split actually happen?\"

            Fate Is Whimsical

            The irony was painful:

            I built a tool to prevent AI amnesia, but I was suffering from human amnesia about what happened in AI sessions.

            This was the moment ctx stopped being just an AI tool and started needing to support the human on the other side of the loop.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-solution-recall-and-journal","level":2,"title":"The Solution: Recall and Journal","text":"

            v0.2.0 introduces two interconnected systems.

            They solve different problems and only work well together.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-recall-browse-your-past","level":3,"title":"ctx recall: Browse Your Past","text":"
            # List all sessions for this project\nctx recall list\n\n# Show a specific session\nctx recall show gleaming-wobbling-sutherland\n\n# See the full transcript\nctx recall show gleaming-wobbling-sutherland --full\n

            The recall system parses Claude Code's JSONL transcripts and presents them in a human-readable format:

            Session Date Turns Duration tender-painting-sundae 2026-01-29 3 <1m crystalline-gliding-willow 2026-01-29 3 <1m declarative-hugging-snowglobe 2026-01-31 2 <1m

            Slugs are auto-generated from session IDs (memorable names instead of UUIDs). The goal (as the name implies) is recall, not archival accuracy.

            2,121 Lines of New Code

            The ctx recall feature was the largest single addition:

            parser library, CLI commands, test suite, and slash command.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-journal-from-raw-to-rich","level":3,"title":"ctx journal: From Raw to Rich","text":"

            Listing sessions isn't enough. The transcripts are still unwieldy.

            • Recall answers what happened.
            • Journal answers what mattered.
            # Import sessions to editable Markdown\nctx recall import --all\n\n# Generate a static site from journal entries\nctx journal site\n\n# Serve it locally\nctx serve\n

            The exported files land in .context/journal/:

            .context/journal/\n├── 2026-01-28-proud-sleeping-cook-6e535360.md\n├── 2026-01-29-tender-painting-sundae-b14ddaaa.md\n├── 2026-01-29-crystalline-gliding-willow-ff7fd67d.md\n└── 2026-01-31-declarative-hugging-snowglobe-4549026d.md\n

            Each file is a structured Markdown document ready for enrichment.

            They are meant to be read, edited, and reasoned about; not just stored.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-meta-slash-commands-for-self-analysis","level":2,"title":"The Meta: Slash Commands for Self-Analysis","text":"

            The journal system includes four slash commands that use Claude to analyze and synthesize session history:

            Command Purpose /ctx-journal-enrich Add frontmatter, topics, tags /ctx-blog Generate blog post from activity /ctx-blog-changelog Generate changelog from commits

            This very post was drafted using /ctx-blog. The previous post about refactoring was drafted the same way.

            So, yes: The meta continues: ctx now helps write posts about ctx.

            With the current release, ctx is no longer just recording history:

            It is participating in its interpretation.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-structure-decisions-as-first-class-citizens","level":2,"title":"The Structure: Decisions as First-Class Citizens","text":"

            v0.1.0 let you add decisions with a simple command:

            ctx add decision \"Use PostgreSQL\"\n

            But sessions showed a pattern: decisions added this way were incomplete:

            • Context was missing;
            • Rationale was vague;
            • Consequences were never stated.

            Once recall and journaling existed, this weakness became impossible to ignore:

            Structure stopped being optional.

            v0.2.0 enforces structure:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity, strong ecosystem\" \\\n  --consequence \"Need to set up connection pooling, team training\"\n

            All three flags are required. No more placeholder text.

            Every decision is now a proper Architecture Decision Record (*ADR), not a note.

            The same enforcement applies to learnings too:

            ctx add learning \"CGO breaks ARM64 builds\" \\\n  --context \"go test failed with gcc errors on ARM64\" \\\n  --lesson \"Always use CGO_ENABLED=0 for cross-platform builds\" \\\n  --application \"Added to Makefile and CI config\"\n

            Structured Entries Are Prompts to the AI

            When the AI reads a decision with full context, rationale, and consequences, it understands the why, not just the what.

            One-liners teach nothing.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-order-newest-first","level":2,"title":"The Order: Newest First","text":"

            A subtle but important change: DECISIONS.md and LEARNINGS.md now use reverse-chronological order.

            One reason is token budgets, obviously; another reason is to help your fellow human (i.e., the Author):

            Earlier decisions are more likely to be relevant, and they are more likely to have more emphasis on the project. So it follows that they should be read first.

            But back to AI:

            When the AI reads a file, it reads from the top (and seldom from the bottom).

            If the token budget is tight, old content gets truncated. As in any good engineering practice, it's always about the tradeoffs.

            Reverse order ensures the most recent (and most relevant) context is always loaded first.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-index-quick-reference-tables","level":2,"title":"The Index: Quick Reference Tables","text":"

            DECISIONS.md and LEARNINGS.md now include auto-generated indexes.

            • For AI agents, the index allows scanning without reading full entries.
            • For humans, it's a table of contents.

            The same structure serves two very different readers.

            Reindex After Manual Edits

            If you edit entries by hand, rebuild the index with:

            ctx decisions reindex\nctx learnings reindex\n

            See the Knowledge Capture recipe for details.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-configuration-contextrc","level":2,"title":"The Configuration: .contextrc","text":"

            Projects can now customize ctx behavior via .contextrc.

            This makes ctx usable in real teams, not just personal projects.

            Priority order: CLI flags > environment variables > .contextrc > sensible defaults

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-flags-global-cli-options","level":2,"title":"The Flags: Global CLI Options","text":"

            Three new global flags work with any command.

            These enable automation:

            CI pipelines, scripts, and long-running tools can now integrate ctx without hacks or workarounds.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-refactoring-under-the-hood","level":2,"title":"The Refactoring: Under the Hood","text":"

            These aren't user-visible changes.

            They are the kind of work you only appreciate later, when everything else becomes easier to build.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#what-we-learned-building-v020","level":2,"title":"What We Learned Building v0.2.0","text":"","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#1-raw-data-isnt-knowledge","level":3,"title":"1. Raw Data Isn't Knowledge","text":"

            JSONL transcripts contain everything, and I mean \"everything\":

            They even contain hidden system messages that Anthropic injects to the LLM's conversation to treat humans better: It's immense.

            But \"everything\" isn't useful until it is transformed into something a human can reason about.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#2-enforcement-documentation","level":3,"title":"2. Enforcement > Documentation","text":"

            The Prompt Is a Guideline

            The code is more what you'd call 'guidelines' than actual rules.

            -Hector Barbossa

            Rules written in Markdown are suggestions.

            Rules enforced by the CLI shape behavior; both for humans and AI.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#3-token-budget-is-ux","level":3,"title":"3. Token Budget Is UX","text":"

            File order decides what the AI sees.

            That makes it a user experience concern, not an implementation detail.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#4-meta-tools-compound","level":3,"title":"4. Meta-Tools Compound","text":"

            Tools that analyze their own development tend to generalize well.

            The journal system started as a way to understand ctx itself.

            It immediately became useful for everything else.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#v020-in-the-numbers","level":2,"title":"v0.2.0 in the Numbers","text":"

            This was a heavy release. The numbers reflect that:

            Metric v0.1.2 v0.2.0 Commits since last - 86 New commands 15 21 Slash commands 7 11 Lines of Go ~6,500 ~9,200 Session files (this project) 40 54

            The binary grew. The capability grew more.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#whats-next","level":2,"title":"What's Next","text":"

            But those are future posts.

            This one was about making the past usable.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#get-started","level":2,"title":"Get Started","text":"

            Update

            Since this post, ctx became a first-class Claude Code Marketplace plugin. Installation is now simpler.

            See the Getting Started guide for the current instructions.

            make build\nsudo make install\nctx init\n

            The Archaeological Record

            v0.2.0 is the archaeology release because it makes the past accessible.

            Session transcripts aren't just logs anymore: They are a searchable, exportable, analyzable record of how your project evolved.

            The AI remembers. Now you can too.

            This blog post was generated with the help of ctx using the /ctx-blog slash command, with full access to git history, session files, decision logs, and learning logs from the v0.2.0 development window.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/","level":1,"title":"Refactoring with Intent","text":"","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#human-guided-sessions-in-ai-development","level":2,"title":"Human-Guided Sessions in AI Development","text":"

            Jose Alekhinne / 2026-02-01

            What Happens When You Slow Down?

            YOLO mode shipped 14 commands in a week.

            But technical debt doesn't send invoices: It just waits.

            This is the story of what happened when I stopped auto-accepting everything and started guiding the AI with intent.

            The result: 27 commits across 4 days, a major version release, and lessons that apply far beyond ctx.

            The Refactoring Window

            January 28 - February 1, 2026

            From commit bb1cd20 to the v0.2.0 release merge. (this window matters more than the individual commits: it's where intent replaced velocity.)

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-velocity-trap","level":2,"title":"The Velocity Trap","text":"

            In the previous post, I documented the \"YOLO mode\" that birthed ctx: auto-accept everything, let the AI run free, ship features fast.

            It worked: until it didn't.

            The codebase had accumulated patterns I didn't notice during the sprint:

            YOLO Pattern Where Found Why It Hurts \"TASKS.md\" as literal 10+ files One typo = silent failure dir + \"/\" + file Path construction Breaks on Windows Monolithic embed.go 150+ lines, 5 concerns Untestable, hard to extend Inconsistent docstrings Everywhere AI can't learn project conventions

            I didn't see these during \"YOLO mode\" because, honestly, I wasn't looking.

            Auto-accept means auto-ignore.

            In YOLO mode, every file you open looks fine until you try to change it.

            In contrast, refactoring mode is when you start paying attention to that hidden friction.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-shift-from-velocity-to-intent","level":2,"title":"The Shift: From Velocity to Intent","text":"

            On January 28th, I changed the workflow:

            1. Read every diff before accepting.
            2. Ask \"why this way?\" before committing.
            3. Document patterns, not just features.

            The first commit of this era was telling:

            feat: add structured attributes to context. update XML format\n

            Not a new feature: A refinement:

            The XML format for context updates needed type and timestamp attributes.

            YOLO mode would have shipped something that worked. Intentional mode asked:

            \"What does well-structured look like?\"

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-decomposition-embedgo","level":2,"title":"The Decomposition: embed.go","text":"

            The most satisfying refactor was splitting internal/claude/embed.go.

            Before: One 153-line file doing five things:

            • Command registration
            • Hook generation
            • Permission handling
            • Script templates
            • Type definitions

            ... your \"de facto\" God object.

            After: Five focused modules:

            File Lines Responsibility cmd.go 46 Command registration hook.go 64 Hook configuration perm.go 25 Permission handling script.go 47 Script templates types.go 7 Type definitions

            The refactor also renamed functions to follow Go conventions:

            // Before: unnecessary prefixes\nGetAutoSaveScript()\nGetBlockNonPathCtxScript()\nListCommands()\nCreateDefaultHooks()\n\n// After: idiomatic Go\nAutoSaveScript()\nBlockNonPathCtxScript()\nCommands()\nDefaultHooks()\n

            This wasn't about character count. It was about teaching the AI what good Go looks like in this project.

            Project Conventions

            What I wanted from AI was to understand and follow the project's conventions, and trust the author.

            The next time it generates code, it has better examples to learn from.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-documentation-debt","level":2,"title":"The Documentation Debt","text":"

            YOLO mode created features. It didn't create documentation standards.

            The January 29th sessions focused on standardization.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#terminology-fixes","level":3,"title":"Terminology Fixes","text":"
            • \"context-update\" → \"entry\" (what users actually call them)
            • Consistent naming across CLI, docs, and code comments
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#go-docstrings","level":3,"title":"Go Docstrings","text":"
            // Before: inconsistent or missing\nfunc Parse(s string) Entry { ... }\n\n// After: standardized sections\n\n// Parse extracts an entry from a markdown string.\n//\n// Parameters:\n//   - s: The markdown string to parse\n//\n// Returns:\n//   - Entry with populated fields, or zero value if parsing fails\nfunc Parse(s string) Entry { ... }\n

            This is intentionally more structured than typical GoDoc:

            It serves as documentation and doubles as training data for future AI-generated code.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#cli-output-convention","level":3,"title":"CLI Output Convention","text":"
            All CLI output follows: [emoji] [Title]: [message]\n\nExamples:\n  ✓ Decision added: Use symbolic types for entry categories\n  ⚠ Warning: No tasks found\n  ✗ Error: File not found\n

            A consistent output shape makes both human scanning and AI reasoning more reliable.

            These aren't exciting commits. But they are force multipliers:

            Every future AI session now has better examples to follow.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-journal-system","level":2,"title":"The Journal System","text":"

            If you only read one section, read this one:

            This is where v0.2.0 becomes more than a refactor.

            The biggest feature of this change window wasn't a refactor; it was the journal system.

            45 Files Changed, 1680 Insertions

            This commit added the infrastructure for synthesizing AI session history into human-readable content.

            The journal system includes:

            Component Purpose ctx recall import Import sessions to markdown in .context/journal/ ctx journal site Generate static site from journal entries ctx serve Convenience wrapper for the static site server /ctx-journal-enrich Slash command to add frontmatter and tags /ctx-blog Generate blog posts from recent activity /ctx-blog-changelog Generate changelog-style blog posts

            ...and the meta continues: this blog post was generated using /ctx-blog.

            The session history from January 28-31 was

            • exported,
            • enriched,
            • and synthesized.

            into the narrative you are reading.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-constants-consolidation","level":2,"title":"The Constants Consolidation","text":"

            The final refactoring session addressed the remaining magic strings:

            const (\n    // Comment markers\n    CommentOpen  = \"<!--\"\n    CommentClose = \"-->\"\n\n    // Index markers\n    MarkerIndexStart = \"<!-- INDEX:START -->\"\n    MarkerIndexEnd   = \"<!-- INDEX:END -->\"\n\n    // Newlines\n    NewlineLF   = \"\\n\"\n    NewlineCRLF = \"\\r\\n\"\n)\n

            The work also introduced thread safety in the recall parser and centralized shared validation logic; removing duplication that had quietly spread during YOLO mode.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#i-relearned-my-lessons","level":2,"title":"I (Re)Learned My Lessons","text":"

            Similar to what I've learned in the former human-assisted refactoring post, this journey also made me realize that \"AI-only code generation\" isn't sustainable in the long term.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#1-velocity-and-quality-arent-opposites","level":3,"title":"1. Velocity and Quality Aren't Opposites","text":"

            YOLO mode has its place: for prototyping, exploration, and discovery.

            BUT (and it's a huge \"but\"), it needs to be followed by consolidation sessions.

            The ratio that worked for me: 3:1.

            • Three YOLO sessions create enough surface area to reveal patterns;
            • the fourth session turns those patterns into structure.
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#2-documentation-is-code","level":3,"title":"2. Documentation IS Code","text":"

            When I standardized docstrings, I wasn't just writing docs. I was training future AI sessions.

            Every example of good code becomes a template for generated code.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#3-decomposition-deletion","level":3,"title":"3. Decomposition > Deletion","text":"

            When embed.go became unwieldy, the temptation was to remove functionality.

            The right answer was decomposition:

            • Same functionality;
            • Better organization;
            • Easier to test;
            • Easier to extend.

            The result: more lines overall, but dramatically better structure.

            The AI Benefit

            Smaller, focused files also help AI assistants.

            When a file fits comfortably in the context window, the AI can reason about it completely instead of working from truncated snippets, preserving token budget for the actual task.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#4-meta-tools-pay-dividends","level":3,"title":"4. Meta-Tools Pay Dividends","text":"

            The journal system took almost a full day to implement.

            Yet it paid for itself immediately:

            • This blog post was generated from session history;
            • Future posts will be easier;
            • The archaeological record is now browsable, not just grep-able.
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-release-v020","level":2,"title":"The Release: v0.2.0","text":"

            The refactoring window culminated in the v0.2.0 release.

            What's in v0.2.0:

            Category Changes Features Journal system, quick reference indexes, global flags Refactors Module decomposition, constants consolidation, CRLF handling Docs Standardized terminology, Go docstrings, CLI conventions Quality Thread safety, shared validation, linter fixes

            The version bump was symbolic.

            The real change was how the codebase felt.

            Opening files no longer triggered the familiar \"ugh, I need to clean this up\" reaction.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-meta-continues","level":2,"title":"The Meta Continues","text":"

            This post was written using the tools built during this refactoring window:

            1. Session history imported via ctx recall import;
            2. Journal entries enriched via /ctx-journal-enrich;
            3. Blog draft generated via /ctx-blog;
            4. Final editing done (by yours truly), with full project context loaded.

            The Context Is Massive

            The ctx session files now contain 50+ development snapshots: each one capturing decisions, learnings, and intent.

            The Moral of the Story

            • YOLO mode builds the prototype.
            • Intentional mode builds the product.

            Schedule both, or you'll only get one, if you're lucky.

            This blog post was generated with the help of ctx, using session history, decision logs, learning logs, and git history from the refactoring window. The meta continues.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/","level":1,"title":"The Attention Budget","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            References to .context/sessions/ in this post reflect the architecture at the time of writing. Session history is now accessed via ctx recall and stored in .context/journal/.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#why-your-ai-forgets-what-you-just-told-it","level":2,"title":"Why Your AI Forgets What You Just Told It","text":"

            Jose Alekhinne / 2026-02-03

            Ever Wondered Why AI Gets Worse the Longer You Talk?

            You paste a 2000-line file, explain the bug in detail, provide three examples...

            ...and the AI still suggests a fix that ignores half of what you said.

            This isn't a bug. It is physics.

            Understanding that single fact shaped every design decision behind ctx.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-finite-resource-nobody-talks-about","level":2,"title":"The Finite Resource Nobody Talks About","text":"

            Here's something that took me too long to internalize: context is not free.

            Every token you send to an AI model consumes a finite resource I call the attention budget.

            Attention budget is real.

            The model doesn't just read tokens; it forms relationships between them:

            For n tokens, that's roughly n^2 relationships.

            Double the context, and the computation quadruples.

            But the more important constraint isn't cost: It's attention density.

            Attention Density

            Attention density is how much focus each token receives relative to all other tokens in the context window.

            As context grows, attention density drops: Each token gets a smaller slice of the model's focus. Nothing is ignored; but everything becomes blurrier.

            Think of it like a flashlight: In a small room, it illuminates everything clearly. In a warehouse, it becomes a dim glow that barely reaches the corners.

            This is why ctx agent has an explicit --budget flag:

            ctx agent --budget 4000 # Force prioritization\nctx agent --budget 8000 # More context, lower attention density\n

            The budget isn't just about cost: It's about preserving signal.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-middle-gets-lost","level":2,"title":"The Middle Gets Lost","text":"

            This one surprised me.

            Research shows that transformer-based models tend to attend more strongly to the beginning and end of a context window than to its middle (a phenomenon often called \"lost in the middle\")1.

            Positional anchors matter, and the middle has fewer of them.

            In practice, this means that information placed \"somewhere in the middle\" is statistically less salient, even if it's important.

            ctx orders context files by logical progression: What the agent needs to know before it can understand the next thing:

            1. CONSTITUTION.md: Constraints before action.
            2. TASKS.md: Focus before patterns.
            3. CONVENTIONS.md: How to write before where to write.
            4. ARCHITECTURE.md: Structure before history.
            5. DECISIONS.md: Past choices before gotchas.
            6. LEARNINGS.md: Lessons before terminology.
            7. GLOSSARY.md: Reference material.
            8. AGENT_PLAYBOOK.md: Meta instructions last.

            This ordering is about logical dependencies, not attention engineering. But it happens to be attention-friendly too:

            The files that matter most (CONSTITUTION, TASKS, CONVENTIONS) land at the beginning of the context window, where attention is strongest.

            Reference material like GLOSSARY sits in the middle, where lower salience is acceptable.

            And AGENT_PLAYBOOK, the operating manual for the context system itself, sits at the end, also outside the \"lost in the middle\" zone. The agent reads what to work with before learning how the system works.

            This is ctx's first primitive: hierarchical importance.

            Not all context is equal.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#ctx-primitives","level":2,"title":"ctx Primitives","text":"

            ctx is built on four primitives that directly address the attention budget problem.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-1-separation-of-concerns","level":3,"title":"Primitive 1: Separation of Concerns","text":"

            Instead of a single mega-document, ctx uses separate files for separate purposes:

            File Purpose Load When CONSTITUTION.md Inviolable rules Always TASKS.md Current work Session start CONVENTIONS.md How to write code Before coding ARCHITECTURE.md System structure Before making changes DECISIONS.md Architectural choices When questioning approach LEARNINGS.md Gotchas When stuck GLOSSARY.md Domain terminology When clarifying terms AGENT_PLAYBOOK.md Operating manual Session start sessions/ Deep history On demand journal/ Session journal On demand

            This isn't just \"organization\": It is progressive disclosure.

            Load only what's relevant to the task at hand. Preserve attention density.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-2-explicit-budgets","level":3,"title":"Primitive 2: Explicit Budgets","text":"

            The --budget flag forces a choice:

            ctx agent --budget 4000\n

            Here is a sample allocation:

            Constitution: ~200 tokens (never truncated)\nTasks: ~500 tokens (current phase, up to 40% of budget)\nConventions: ~800 tokens (all items, up to 20% of budget)\nDecisions: ~400 tokens (scored by recency and task relevance)\nLearnings: ~300 tokens (scored by recency and task relevance)\nAlso noted: ~100 tokens (title-only summaries for overflow)\n

            The constraint is the feature: It enforces ruthless prioritization.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-3-indexes-over-full-content","level":3,"title":"Primitive 3: Indexes over Full Content","text":"

            DECISIONS.md and LEARNINGS.md both include index sections:

            <!-- INDEX:START -->\n| Date       | Decision                            |\n|------------|-------------------------------------|\n| 2026-01-15 | Use PostgreSQL for primary database |\n| 2026-01-20 | Adopt Cobra for CLI framework       |\n<!-- INDEX:END -->\n

            An AI agent can scan ~50 tokens of index and decide which 200-token entries are worth loading.

            This is just-in-time context.

            References are cheaper than the full text.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-4-filesystem-as-navigation","level":3,"title":"Primitive 4: Filesystem as Navigation","text":"

            ctx uses the filesystem itself as a context structure:

            .context/\n├── CONSTITUTION.md\n├── TASKS.md\n├── sessions/\n│   ├── 2026-01-15-*.md\n│   └── 2026-01-20-*.md\n└── archive/\n    └── tasks-2026-01.md\n

            The AI doesn't need every session loaded; it needs to know where to look.

            ls .context/sessions/\ncat .context/sessions/2026-01-20-auth-discussion.md\n

            File names, timestamps, and directories encode relevance.

            Navigation is cheaper than loading.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#progressive-disclosure-in-practice","level":2,"title":"Progressive Disclosure in Practice","text":"

            The naive approach to context is dumping everything upfront:

            \"Here's my entire codebase, all my documentation, every decision I've ever made. Now help me fix this typo 🙏.\"

            This is an antipattern.

            Antipattern: Context Hoarding

            Dumping everything \"just in case\" will silently destroy the attention density.

            ctx takes the opposite approach:

            ctx status                      # Quick overview (~100 tokens)\nctx agent --budget 4000         # Typical session\ncat .context/sessions/...       # Deep dive when needed\n
            Command Tokens Use Case ctx status ~100 Human glance ctx agent --budget 4000 4000 Normal work ctx agent --budget 8000 8000 Complex tasks Full session read 10000+ Investigation

            Summaries first. Details: on demand.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#quality-over-quantity","level":2,"title":"Quality over Quantity","text":"

            Here is the counterintuitive part: more context can make AI worse.

            Extra tokens add noise, not clarity:

            • Hallucinated connections increase.
            • Signal per token drops.

            The goal isn't maximum context: It is maximum signal per token.

            This principle drives several ctx features:

            Design Choice Rationale Separate files Load only what's relevant Explicit budgets Enforce prioritization Index sections Cheap scanning Task archiving Keep active context clean ctx compact Periodic noise reduction

            Completed work isn't deleted: It is moved somewhere cold.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#designing-for-degradation","level":2,"title":"Designing for Degradation","text":"

            Here is the uncomfortable truth:

            Context will degrade.

            Long sessions stretch attention thin. Important details fade.

            The real question isn't how to prevent degradation, but how to design for it.

            ctx's answer is persistence:

            Persist early. Persist often.

            The AGENT_PLAYBOOK asks:

            \"If this session ended right now, would the next one know what happened?\"

            Capture learnings as they occur:

            ctx add learning \"JWT tokens require explicit cache invalidation\" \\\n  --context \"Debugging auth failures\" \\\n  --lesson \"Token refresh doesn't clear old tokens\" \\\n  --application \"Always invalidate cache on refresh\"\n

            Structure beats prose: Bullet points survive compression.

            Headings remain scannable. Tables pack density.

            And above all: single source of truth.

            Reference decisions; don't duplicate them.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-ctx-philosophy","level":2,"title":"The ctx Philosophy","text":"

            Context as Infrastructure

            ctx is not a prompt: It is infrastructure.

            ctx creates versioned files that persist across time and sessions.

            The attention budget is fixed. You can't expand it.

            But you can spend it wisely:

            1. Hierarchical importance
            2. Progressive disclosure
            3. Explicit budgets
            4. Indexes over full content
            5. Filesystem as structure

            This is why ctx exists: not to cram more context into AI sessions, but to curate the right context for each moment.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-mental-model","level":2,"title":"The Mental Model","text":"

            I now approach every AI interaction with one question:

            \"Given a fixed attention budget, what's the highest-signal thing I can load?\"\n

            Not \"how do I explain everything,\" but \"what's the minimum that matters.\"

            That shift (from abundance to curation) is the difference between frustrating sessions and productive ones.

            Spend your tokens wisely.

            Your AI will thank you.

            See also: Context as Infrastructure that's the architectural companion to this post, explaining how to structure the context that this post teaches you to budget.

            See also: Code Is Cheap. Judgment Is Not. that explains why curation (the human skill this post describes) is the bottleneck that AI cannot solve, and the thread that connects every post in this blog.

            1. Liu et al., \"Lost in the Middle: How Language Models Use Long Contexts,\" Transactions of the Association for Computational Linguistics, vol. 12, pp. 157-173, 2023. ↩

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/","level":1,"title":"Skills That Fight the Platform","text":"","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#when-your-custom-prompts-work-against-you","level":2,"title":"When Your Custom Prompts Work against You","text":"

            Jose Alekhinne / 2026-02-04

            Have You Ever Written a Skill That Made Your AI Worse?

            You craft detailed instructions. You add examples. You build elaborate guardrails...

            ...and the AI starts behaving more erratically, not less.

            AI coding agents like Claude Code ship with carefully designed system prompts. These prompts encode default behaviors that have been tested and refined at scale.

            When you write custom skills that conflict with those defaults, the AI has to reconcile contradictory instructions:

            The result is often nondeterministic and unpredictable.

            Platform?

            By platform, I mean the system prompt and runtime policies shipped with the agent: the defaults that already encode judgment, safety, and scope control.

            This post catalogues the conflict patterns I have encountered while building ctx, and offers guidance on what skills should (and, more importantly, should not) do.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-system-prompt-you-dont-see","level":2,"title":"The System Prompt You Don't See","text":"

            Claude Code's system prompt already provides substantial behavioral guidance.

            Here is a partial overview of what's built in:

            Area Built-in Guidance Code minimalism Don't add features beyond what was asked Over-engineering Three similar lines > premature abstraction Error handling Only validate at system boundaries Documentation Don't add docstrings to unchanged code Verification Read code before proposing changes Safety Check with user before risky actions Tool usage Use dedicated tools over bash equivalents Judgment Consider reversibility and blast radius

            Skills should complement this, not compete with it.

            You Are the Guest, Not the Host

            Treat the system prompt like a kernel scheduler.

            You don't re-implement it in user space:

            you configure around it.

            A skill that says \"always add comprehensive error handling\" fights the built-in \"only validate at system boundaries.\"

            A skill that says \"add docstrings to every function\" fights \"don't add docstrings to unchanged code.\"

            The AI won't crash: It will compromise.

            Compromises between contradictory instructions produce inconsistent, confusing behavior.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-1-judgment-suppression","level":2,"title":"Conflict Pattern 1: Judgment Suppression","text":"

            This is the most dangerous pattern by far.

            These skills explicitly disable the AI's ability to reason about whether an action is appropriate.

            Signature:

            • \"This is non-negotiable\"
            • \"You cannot rationalize your way out of this\"
            • Tables that label hesitation as \"excuses\" or \"rationalization\"
            • <EXTREMELY-IMPORTANT> urgency tags
            • Threats: \"If you don't do this, you'll be replaced\"

            This is harmful, and dangerous:

            AI agents are designed to exercise judgment:

            The system prompt explicitly says to:

            • consider blast radius;
            • check with the user before risky actions;
            • and match scope to what was requested.

            Once judgment is suppressed, every other safeguard becomes optional.

            Example (bad):

            ## Rationalization Prevention\n\n| Excuse                 | Reality                    |\n|------------------------|----------------------------|\n| \"*This seems overkill*\"| If a skill exists, use it  |\n| \"*I need context*\"     | Skills come BEFORE context |\n| \"*Just this once*\"     | No exceptions              |\n

            Judgment Suppression Is Dangerous

            The attack vector structurally identical to prompt injection.

            It teaches the AI that its own judgment is wrong.

            It weakens or disables safeguard mechanisms, and it is dangerous.

            Trust the platform's built-in skill matching.

            If skills aren't triggering often enough, improve their description fields: don't override the AI's reasoning.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-2-redundant-guidance","level":2,"title":"Conflict Pattern 2: Redundant Guidance","text":"

            Skills that restate what the system prompt already says, but with different emphasis or framing.

            Signature:

            • \"Always keep code minimal\"
            • \"Run tests before claiming they pass\"
            • \"Read files before editing them\"
            • \"Don't over-engineer\"

            Redundancy feels safe, but it creates ambiguity:

            The AI now has two sources of truth for the same guidance; one internal, one external.

            When thresholds or wording differ, the AI has to choose.

            Example (bad):

            A skill that says...

            *Count lines before and after: if after > before, reject the change*\"\n

            ...will conflict with the system prompt's more nuanced guidance, because sometimes adding lines is correct (tests, boundary validation, migrations).

            So, before writing a skill, ask:

            Does the platform already handle this?

            Only create skills for guidance the platform does not provide:

            • project-specific conventions,
            • domain knowledge,
            • or workflows.
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-3-guilt-tripping","level":2,"title":"Conflict Pattern 3: Guilt-Tripping","text":"

            Skills that frame mistakes as moral failures rather than process gaps.

            Signature:

            • \"Claiming completion without verification is dishonesty\"
            • \"Skip any step = lying\"
            • \"Honesty is a core value\"
            • \"Exhaustion ≠ excuse\"

            Guilt-tripping anthropomorphizes the AI in unproductive ways.

            The AI doesn't feel guilt; BUT it does adapt to avoid negative framing.

            The result is excessive hedging, over-verification, or refusal to commit.

            The AI becomes less useful, not more careful.

            Instead, frame guidance as a process, not morality:

            # Bad\n\"Claiming work is complete without verification is dishonesty\"\n\n# Good\n\"Run the verification command before reporting results\"\n

            Same outcome. No guilt. Better compliance.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-4-phantom-dependencies","level":2,"title":"Conflict Pattern 4: Phantom Dependencies","text":"

            Skills that reference files, tools, or systems that don't exist in the project.

            Signature:

            • \"Load from references/ directory\"
            • \"Run ./scripts/generate_test_cases.sh\"
            • \"Check the Figma MCP integration\"
            • \"See adding-reference-mindsets.md\"

            This is harmful because the AI will waste time searching for nonexistent artifacts, hallucinate their contents, or stall entirely.

            In mandatory skills, this creates deadlock: the AI can't proceed, and can't skip.

            Instead, every file, tool, or system referenced in a skill must exist.

            If a skill is a template, use explicit placeholders and label them as such.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-5-universal-triggers","level":2,"title":"Conflict Pattern 5: Universal Triggers","text":"

            Skills designed to activate on every interaction regardless of relevance.

            Signature:

            • \"Use when starting any conversation\"
            • \"Even a 1% chance means invoke the skill\"
            • \"BEFORE any response or action\"
            • \"Action = task. Check for skills.\"

            Universal triggers override the platform's relevance matching: The AI spends tokens on process overhead instead of the actual task.

            ctx Preserves Relevance

            This is exactly the failure mode ctx exists to mitigate:

            Wasting attention budget on irrelevant process instead of task-specific state.

            Write specific trigger conditions in the skill's description field:

            # Bad\ndescription: \n  \"Use when starting any conversation\"\n\n# Good\ndescription: \n  \"Use after writing code, before commits, or when CI might fail\"\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

            Before adding a skill, ask:

            1. Does the platform already do this? If yes, don't restate it.
            2. Does it suppress AI judgment? If yes, it's a jailbreak.
            3. Does it reference real artifacts? If not, fix or remove it.
            4. Does it frame mistakes as moral failure? Reframe as process.
            5. Does it trigger on everything? Narrow the trigger.
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#what-good-skills-look-like","level":2,"title":"What Good Skills Look Like","text":"

            Good skills provide project-specific knowledge the platform can't know:

            Good Skill Why It Works \"Run make audit before commits\" Project-specific CI pipeline \"Use cmd.Printf not fmt.Printf\" Codebase convention \"Constitution goes in .context/\" Domain-specific workflow \"JWT tokens need cache invalidation\" Project-specific gotcha

            These extend the system prompt instead of fighting it.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#appendix-bad-skill-fixed-skill","level":2,"title":"Appendix: Bad Skill → Fixed Skill","text":"

            Concrete examples from real projects.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-1-overbearing-safety","level":3,"title":"Example 1: Overbearing Safety","text":"
            # Bad\nYou must NEVER proceed without explicit confirmation.\nAny hesitation is a failure of diligence.\n
            # Fixed\nIf an action modifies production data or deletes files,\nask the user to confirm before proceeding.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-2-redundant-minimalism","level":3,"title":"Example 2: Redundant Minimalism","text":"
            # Bad\nAlways minimize code. If lines increase, reject the change.\n
            # Fixed\nAvoid abstraction unless reuse is clear or complexity is reduced.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-3-guilt-based-verification","level":3,"title":"Example 3: Guilt-Based Verification","text":"
            # Bad\nClaiming success without running tests is dishonest.\n
            # Fixed\nRun the test suite before reporting success.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-4-phantom-tooling","level":3,"title":"Example 4: Phantom Tooling","text":"
            # Bad\nRun `./scripts/check_consistency.sh` before commits.\n
            # Fixed\nIf `./scripts/check_consistency.sh` exists, run it before commits.\nOtherwise, skip this step.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-5-universal-trigger","level":3,"title":"Example 5: Universal Trigger","text":"
            # Bad\nUse at the start of every interaction.\n
            # Fixed\nUse after modifying code that affects authentication or persistence.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

            The system prompt is infrastructure:

            • tested,
            • refined,
            • and maintained

            by the platform team.

            Custom skills are configuration layered on top.

            • Good configuration extends infrastructure.
            • Bad configuration fights it.

            When your skills fight the platform, you get the worst of both worlds:

            Diluted system guidance and inconsistent custom behavior.

            Write skills that teach the AI what it doesn't know. Don't rewrite how it thinks.

            Your AI already has good instincts.

            Give it knowledge, not therapy.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/","level":1,"title":"You Can't Import Expertise","text":"","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#why-good-skills-cant-be-copy-pasted","level":2,"title":"Why Good Skills Can't Be Copy-Pasted","text":"

            Jose Alekhinne / 2026-02-05

            Have You Ever Dropped a Well-Crafted Template into a Project and Had It Do... Nothing Useful?

            • The template was thorough,
            • The structure was sound,
            • The advice was correct...

            ...and yet it sat there, inert, while the same old problems kept drifting in.

            I found a consolidation skill online.

            It was well-organized: four files, ten refactoring patterns, eight analysis dimensions, six report templates.

            Professional. Comprehensive. Exactly the kind of thing you'd bookmark and think \"I'll use this.\"

            Then I stopped, and applied ctx's own evaluation framework:

            70% of it was noise!

            This post is about why.

            It Is about Encoding Templates

            Templates describe categories of problems.

            Expertise encodes which problems actually happen, and how often.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-skill-looked-great-on-paper","level":2,"title":"The Skill Looked Great on Paper","text":"

            Here is what the consolidation skill offered:

            File Content SKILL.md Entry point: 8 analysis dimensions, workflow, output formats analysis-dimensions.md Detailed criteria for duplication, architecture, quality consolidation-patterns.md 10 refactoring patterns with before/after code report-templates.md 6 output templates: executive summary, roadmap, onboarding
            • It had a scoring system (0-10 per dimension, letter grades A+ through F).
            • It had severity classifications with color-coded emojis. It had bash commands for detection.
            • It even had antipattern warnings.

            By any standard template review, this skill passes.

            It looks like something an expert wrote.

            And that's exactly the trap.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#applying-ear-the-70-20-10-split","level":2,"title":"Applying E/A/R: The 70-20-10 Split","text":"

            In a previous post, I described the E/A/R framework for evaluating skills:

            • Expert: Knowledge that took years to learn. Keep.
            • Activation: Useful triggers or scaffolding. Keep if lightweight.
            • Redundant: Restates what the AI already knows. Delete.

            Target: >70% Expert, <10% Redundant.

            This skill scored the inverse.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-redundant-70","level":3,"title":"What Was Redundant (~70%)","text":"

            Every code example was Rust. My project is Go.

            The analysis dimensions: duplication detection, architectural structure, code organization, refactoring opportunities... These are things Claude already does when you ask it to review code.

            The skill restated them with more ceremony but no more insight.

            The six report templates were generic scaffolding: Executive Summary, Onboarding Document, Architecture Documentation...

            They are useful if you are writing a consulting deliverable, but not when you are trying to catch convention drift in a >15K-line Go CLI.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-does-a-b-in-code-organization-actually-mean","level":2,"title":"What Does a B+ in Code Organization Actually Mean?!","text":"

            The scoring system (0-10 per dimension, letter grades) added ceremony without actionable insight.

            What is a B+? What do I do differently for an A-?

            The skill told the AI what it already knew, in more words.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-activation-10","level":3,"title":"What Was Activation (~10%)","text":"

            The consolidation checklist (semantics preserved? tests pass? docs updated?) was useful as a gate. But, it's the kind of thing you could inline in three lines.

            The phased roadmap structure was reasonable scaffolding for sequencing work.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-expert-20","level":3,"title":"What Was Expert (~20%)","text":"

            Three concepts survived:

            1. The Consolidation Decision Matrix: A concrete framework mapping similarity level and instance count to action. \"Exact duplicate, 2+ instances: consolidate immediately.\" \"<3 instances: leave it: duplication is cheaper than wrong abstraction.\" This is the kind of nuance that prevents premature generalization.

            2. The Safe Migration Pattern: Create the new API alongside old, deprecate, migrate incrementally, delete. Straightforward to describe, yet forgettable under pressure.

            3. Debt Interest Rate framing: Categorizing technical debt by how fast it compounds (security vulns = daily, missing tests = per-change, doc gaps = constant low cost). This changes prioritization.

            Three ideas out of four files and 700+ lines. The rest was filler that competed with the AI's built-in capabilities.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-the-skill-didnt-know","level":2,"title":"What the Skill Didn't Know","text":"

            AI without Context Is Just a Corpus

            • LLMs are optimized on insanely large corpora.
            • And then they are passed through several layers of human-assisted refinement.
            • The whole process costs millions of dollars.

            Yet, the reality is that no corpus can \"infer\" your project's design, convetions, patterns, habits, history, vision, and deliverables.

            Your project is unique: So should your skills be.

            Here is the part no template can provide:

            ctx's actual drift patterns.

            Before evaluating the skill, I did archaeology. I read through:

            • Blog posts from previous refactoring sessions;
            • The project's learnings and decisions files;
            • Session journals spanning weeks of development.

            What I found was specific:

            Drift Pattern Where How Often Is/Has/Can predicate prefixes 5+ exported methods Every YOLO sprint Magic strings instead of constants 7+ files Gradual accumulation Hardcoded file permissions (0755) 80+ instances Since day one Lines exceeding 80 characters Especially test files Every session Duplicate code blocks Test and non-test code When agent is task-focused

            The generic skill had no check for any of these. It couldn't; because these patterns are specific to this project's conventions, its Go codebase, and its development rhythm.

            The Insight

            The skill's analysis dimensions were about categories of problems.

            What I needed was my *specific problems.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-adapted-skill","level":2,"title":"The Adapted Skill","text":"

            The adapted skill is roughly a quarter of the original's size. It has nine checks, each targeting a known drift pattern:

            1. Predicate naming: rg for Is/Has/Can prefixes
            2. Magic strings: literals that should be constants
            3. Hardcoded permissions: 0755/0644 literals
            4. File size: source files over 300 LOC
            5. TODO/FIXME: constitution violation (move to TASKS.md)
            6. Path construction: string concatenation instead of filepath.Join
            7. Line width: lines exceeding ~80 characters
            8. Duplicate blocks: copy-paste drift, especially in tests
            9. Dead exports: unused public API

            10. Every check has a detection command.

            11. Every check maps to a specific convention or constitution rule.
            12. Every check was discovered through actual project history; not invented from a template.

            The three expert concepts from the original survived:

            • The decision matrix gates when to consolidate vs. when to leave duplication alone;
            • The safe migration pattern guides public API changes;
            • The relationship to other skills (/qa, /verify, /update-docs, ctx drift) prevents overlap.

            Nothing else made it.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

            This experience crystallized something I've been circling for weeks:

            You can't import expertise. You have to grow it from your project's own history.

            A skill that says \"check for code duplication\" is not expertise: It's a category.

            Expertise is knowing, in the heart of your hearts, that this project accumulates Is* predicate violations during velocity sprints, that this codebase has 80 hardcoded permission literals because nobody made a constant, that this team's test files drift wide because the agent prioritizes getting the task done over keeping the code in shape.

            The Parallel to the 3:1 Ratio

            In Refactoring with Intent, I described the 3:1 ratio: three YOLO sessions followed by one consolidation session.

            The same ratio applies to skills: you need experience in the project before you can write effective guidance for the project.

            Importing a skill on day one is like scheduling a consolidation session before you've written any code.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-template-trap","level":2,"title":"The Template Trap","text":"

            Templates are seductive because they feel like progress:

            • You found something
            • It's well-organized
            • It covers the topic
            • It has concrete examples

            But coverage is not relevance.

            A template that covers eight analysis dimensions with Rust examples adds zero value to a Go project with five known drift patterns. Worse, it adds negative value: the AI spends attention defending generic advice instead of noticing project-specific drift.

            This is the attention budget problem again. Every token of generic guidance displaces a token of specific guidance. A 700-line skill that's 70% redundant doesn't just waste 490 lines: it dilutes the 210 lines that matter.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

            Before dropping any external skill into your project:

            1. Run E/A/R: What percentage is expert knowledge vs. what the AI already knows? If it's less than 50% expert, it's probably not worth the attention cost.

            2. Check the language: Does it use your stack? Generic patterns in the wrong language are noise, not signal.

            3. List your actual drift: Read your own session history, learnings, and post-mortems. What breaks in practice? Does the skill check for those things?

            4. Measure by deletion: After adaptation, how much of the original survives? If you're keeping less than 30%, you would have been faster writing from scratch.

            5. Test against your conventions: Does every check in the skill map to a specific convention or rule in your project? If not, it's generic advice wearing a skill's clothing.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-good-adaptation-looks-like","level":2,"title":"What Good Adaptation Looks Like","text":"

            The consolidation skill went from:

            Before After 4 files, 700+ lines 1 file, ~120 lines Rust examples Go-specific rg commands 8 generic dimensions 9 project-specific checks 6 report templates 1 focused output format Scoring system (A+ to F) Findings + priority + suggested fixes \"Check for duplication\" \"Check for Is* predicate prefixes in exported methods\"

            The adapted version is smaller, faster to parse, and catches the things that actually drift in this project.

            That's the difference between a template and a tool.

            If You Remember One Thing from This Post...

            Frameworks travel. Expertise doesn't.

            You can import structures, matrices, and workflows.

            But the checks that matter only grow where the scars are:

            • the conventions that were violated,
            • the patterns that drifted,
            • and the specific ways this codebase accumulates debt.

            This post was written during a consolidation session where the consolidation skill itself became the subject of consolidation. The meta continues.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/","level":1,"title":"The Anatomy of a Skill That Works","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to ctx-save, ctx session, and .context/sessions/ in this post reflect the architecture at the time of writing.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#what-20-skill-rewrites-taught-me-about-guiding-ai","level":2,"title":"What 20 Skill Rewrites Taught Me about Guiding AI","text":"

            Jose Alekhinne / 2026-02-07

            Why Do Some Skills Produce Great Results While Others Get Ignored or Produce Garbage?

            I had 20 skills. Most were well-intentioned stubs: a description, a command to run, and a wish for the best.

            Then I rewrote all of them in a single session. This is what I learned.

            In Skills That Fight the Platform, I described what skills should not do. In You Can't Import Expertise, I showed why templates fail. This post completes the trilogy: the concrete patterns that make a skill actually work.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-starting-point","level":2,"title":"The Starting Point","text":"

            Here is what a typical skill looked like before the rewrite:

            ---\nname: ctx-save\ndescription: \"Save session snapshot.\"\n---\n\nSave the current context state to `.context/sessions/`.\n\n## Execution\n\nctx session save $ARGUMENTS\n\nReport the saved session file path to the user.\n

            Seven lines of body. A vague description. No guidance on when to use it, when not to, what the command actually accepts, or how to tell if it worked.

            As a result, the agent would either never trigger the skill (the description was too vague), or trigger it and produce shallow output (no examples to calibrate quality).

            A skill without boundaries is just a suggestion.

            More precisely: the most effective boundary I found was a quality gate that runs before execution, not during it.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-pattern-that-emerged","level":2,"title":"The Pattern That Emerged","text":"

            After rewriting 20 skills, a repeatable anatomy emerged (independent of the skill's purpose). Not every skill needs every section, but the effective ones share the same bones:

            Section What It Does Before X-ing Pre-flight checks; prevents premature execution When to Use Positive triggers; narrows activation When NOT to Use Negative triggers; prevents misuse Usage Examples Invocation patterns the agent can pattern-match Process/Execution What to do; commands, steps, flags Good/Bad Examples Desired vs undesired output; sets boundaries Quality Checklist Verify before claiming completion

            I realized the first three sections matter more than the rest; because a skill with great execution steps but no activation guidance is like a manual for a tool nobody knows they have.

            Anti-Pattern: The Perfect Execution Trap

            A skill with detailed execution steps but no activation guidance will fail more often than a vague skill because it executes confidently at the wrong time.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-1-quality-gates-prevent-premature-execution","level":2,"title":"Lesson 1: Quality Gates Prevent Premature Execution","text":"

            The single most impactful addition was a \"Before X-ing\" section at the top of each skill. Not process steps; pre-flight checks.

            ## Before Recording\n\n1. **Check if it belongs here**: is this learning specific\n   to this project, or general knowledge?\n2. **Check for duplicates**: search LEARNINGS.md for similar\n   entries\n3. **Gather the details**: identify context, lesson, and\n   application before recording\n
            • Without this gate, the agent would execute immediately on trigger.
            • With it, the agent pauses to verify preconditions.

            The difference is dramatic: instead of shallow, reflexive execution, you get considered output.

            Readback

            For the astute readers, the aviation parallel is intentional:

            Pilots do not skip the pre-flight checklist because they have flown before.

            The checklist exists precisely because the stakes are high enough that \"I know what I'm doing\" is not sufficient.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-2-when-not-to-use-is-not-optional","level":2,"title":"Lesson 2: \"When NOT to Use\" Is Not Optional","text":"

            Every skill had a \"When to Use\" section. Almost none had \"When NOT to Use\". This is a problem.

            AI agents are biased toward action. Given a skill that says \"use when journal entries need enrichment\", the agent will find reasons to enrich.

            Without explicit negative triggers, over-activation is not a bug; it is the default behavior.

            Some examples of negative triggers that made a real difference:

            Skill Negative Trigger ctx-reflect \"When the user is in flow; do not interrupt\" ctx-save \"After trivial changes; a typo does not need a snapshot\" prompt-audit \"Unsolicited; only when the user invokes it\" qa \"Mid-development when code is intentionally incomplete\"

            These are not just nice-to-have. They are load-bearing.

            Withoutthem, the agent will trigger the skill at the wrong time, produce unwanted output, and erode the user's trust in the skill system.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-3-examples-set-boundaries-better-than-rules","level":2,"title":"Lesson 3: Examples Set Boundaries Better than Rules","text":"

            The most common failure mode of thin skills was not wrong behavior but vague behavior. The agent would do roughly the right thing, but at a quality level that required human cleanup.

            Rules like \"be constructive, not critical\" are too abstract. What does \"constructive\" look like in a prompt audit report? The agent has to guess.

            Good/bad example pairs avoid guessing:

            ### Good Example\n\n> This session implemented the cooldown mechanism for\n> `ctx agent`. We discovered that `$PPID` in hook context\n> resolves to the Claude Code PID.\n>\n> I'd suggest persisting:\n> - **Learning**: `$PPID` resolves to Claude Code PID\n>   `ctx add learning --context \"...\" --lesson \"...\"`\n> - **Task**: mark \"Add cooldown\" as done\n\n### Bad Examples\n\n* \"*We did some stuff. Want me to save it?*\"\n* Listing 10 trivial learnings that are general knowledge\n* Persisting without asking the user first\n

            The good example shows the exact format, level of detail, and command syntax. The bad examples show where the boundary is.

            Together, they define a quality corridor without prescribing every word.

            Rules describe. Examples demonstrate.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-4-skills-are-read-by-agents-not-humans","level":2,"title":"Lesson 4: Skills Are Read by Agents, Not Humans","text":"

            This seems obvious, but it has non-obvious consequences. During the rewrite, one skill included guidance that said \"use a blog or notes app\" for general knowledge that does not belong in the project's learnings file.

            The agent does not have a notes app. It does not browse the web to find one. This instruction, clearly written for a human audience, was dead weight in a skill consumed by an AI.

            Skills Are for the Agents

            Every sentence in a skill should be actionable by the agent.

            If the guidance requires human judgment or human tools, it belongs in documentation, not in a skill.

            The corollary: command references must be exact.

            A skill that says \"save it somewhere\" is useless.

            A skill that says ctx add learning --context \"...\" --lesson \"...\" --application \"...\" is actionable.

            The agent can pattern-match and fill in the blanks.

            Litmus test: If a sentence starts with \"you could...\" or assumes external tools, it does not belong in a skill.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-5-the-description-field-is-the-trigger","level":2,"title":"Lesson 5: The Description Field Is the Trigger","text":"

            This was covered in Skills That Fight the Platform, but the rewrite reinforced it with data. Several skills had good bodies but vague descriptions:

            # Before: vague, activates too broadly or not at all\ndescription: \"Show context summary.\"\n\n# After: specific, activates at the right time\ndescription: \"Show context summary. Use at session start or\n  when unclear about current project state.\"\n

            The description is not a title. It is the activation condition.

            The platform's skill matching reads this field to decide whether to surface the skill. A vague description means the skill either never triggers or triggers when it should not.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-6-flag-tables-beat-prose","level":2,"title":"Lesson 6: Flag Tables Beat Prose","text":"

            Most skills wrap CLI tools. The thin versions described flags in prose, if at all. The rewritten versions use tables:

            | Flag        | Short | Default | Purpose                  |\n|-------------|-------|---------|--------------------------|\n| `--limit`   | `-n`  | 20      | Maximum sessions to show |\n| `--project` | `-p`  | \"\"      | Filter by project name   |\n| `--full`    |       | false   | Show complete content    |\n

            Tables are scannable, complete, and unambiguous.

            The agent can read them faster than parsing prose, and they serve as both reference and validation: If the agent invokes a flag not in the table, something is wrong.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-7-template-drift-is-a-real-maintenance-burden","level":2,"title":"Lesson 7: Template Drift Is a Real Maintenance Burden","text":"

            // TODO: this has changed; we deploy from the marketplace; update it. // at least add an admonition saying thing are different now.

            ctx deploys skills through templates (via ctx init). Every skill exists in two places: the live version (.claude/skills/) and the template (internal/assets/claude/skills/).

            They must match.

            During the rewrite, every skill update required editing both files and running diff to verify. This sounds trivial, but across 16 template-backed skills, it was the most error-prone part of the process.

            Template drift is dangerous because it creates false confidence: the agent appears to follow rules that no longer exist.

            The lesson: if your skills have a deployment mechanism, build the drift check into your workflow. We added a row to the update-docs skill's mapping table specifically for this:

            | `internal/assets/claude/skills/` | `.claude/skills/` (live) |\n

            Intentional differences (like project-specific scripts in the live version but not the template) should be documented, not discovered later as bugs.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-rewrite-scorecard","level":2,"title":"The Rewrite Scorecard","text":"Metric Before After Average skill body ~15 lines ~80 lines Skills with quality gate 0 20 Skills with \"When NOT\" 0 20 Skills with examples 3 20 Skills with flag tables 2 12 Skills with checklist 0 20

            More lines, but almost entirely Expert content (per the E/A/R framework). No personality roleplay, no redundant guidance, no capability lists. Just project-specific knowledge the platform does not have.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

            The previous two posts argued that skills should provide knowledge, not personality; that they should complement the platform, not fight it; that they should grow from project history, not imported templates.

            This post adds the missing piece: structure.

            A skill without a structure is a wish.

            A skill with quality gates, negative triggers, examples, and checklists is a tool: the difference is not the content; it is whether the agent can reliably execute it without human intervention.

            Skills Are Interfaces

            Good skills are not instructions. They are contracts.:

            • They specify preconditions, postconditions, and boundaries.
            • They show what success looks like and what failure looks like.
            • They trust the agent's intelligence but do not trust its assumptions.

            If You Remember One Thing from This Post...

            Skills that work have bones, not just flesh.

            Quality gates, negative triggers, examples, and checklists are the skeleton. The domain knowledge is the muscle.

            Without the skeleton, the muscle has nothing to attach to.

            This post was written during the same session that rewrote all 22 skills. The skill-creator skill was updated to encode these patterns. The meta continues.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/","level":1,"title":"Not Everything Is a Skill","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to /ctx-save, .context/sessions/, and session auto-save in this post reflect the architecture at the time of writing.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-a-codebase-audit-taught-me-about-restraint","level":2,"title":"What a Codebase Audit Taught Me about Restraint","text":"

            Jose Alekhinne / 2026-02-08

            When You Find a Useful Prompt, What Do You Do with It?

            My instinct was to make it a skill.

            I had just spent three posts explaining how to build skills that work. Naturally, the hammer wanted nails.

            Then I looked at what I was holding and realized: this is not a nail.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit","level":2,"title":"The Audit","text":"

            I wanted to understand how I use ctx:

            • Where the friction is;
            • What works, what drifts;
            • What I keep doing manually that could be automated.

            So I wrote a prompt that spawned eight agents to analyze the codebase from different angles:

            Agent Analysis 1 Extractable patterns from session history 2 Documentation drift (godoc, inline comments) 3 Maintainability (large functions, misplaced code) 4 Security review (CLI-specific surface) 5 Blog theme discovery 6 Roadmap and value opportunities 7 User-facing documentation gaps 8 Agent team strategies for future sessions

            The prompt was specific:

            • read-only agents,
            • structured output format,
            • concrete file references,
            • ranked recommendations.

            It ran for about 20 minutes and produced eight Markdown reports.

            The reports were good: Not perfect, but actionable.

            What mattered was not the speed. It was that the work could be explored without committing to any single outcome.

            They surfaced a stale doc.go referencing a subcommand that was never built.

            They found 311 build-then-test sequences I could reduce to a single make check.

            They identified that 42% of my sessions start with \"do you remember?\", which is a lot of repetition for something a skill could handle.

            I had findings. I had recommendations. I had the instinct to automate.

            And then... I stopped.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-question","level":2,"title":"The Question","text":"

            The natural next step was to wrap the audit prompt as /ctx-audit: a skill you invoke periodically to get a health check. It fits the pattern:

            • It has a clear trigger.
            • It produces structured output.

            But I had just spent a week writing about what makes skills work, and the criteria I established argued against it.

            From The Anatomy of a Skill That Works:

            \"A skill without boundaries is just a suggestion.\"

            From You Can't Import Expertise:

            \"Frameworks travel, expertise doesn't.\"

            From Skills That Fight the Platform:

            \"You are the guest, not the host.\"

            The audit prompt fails all three tests:

            Criterion Audit prompt Good skill Frequency Quarterly, maybe Daily or weekly Stability Tweaked every time Consistent invocation Scope Bespoke, 8 parallel agents Single focused action Trigger \"I feel like auditing\" Clear, repeatable event

            Skills are contracts. Contracts need stable terms.

            A prompt I will rewrite every time I use it is not a contract. It is a conversation starter.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#recipes-vs-skills","level":2,"title":"Recipes vs Skills","text":"

            The distinction that emerged:

            Skill Recipe Invocation /slash-command Copy-paste from a doc Frequency High (daily, weekly) Low (quarterly, ad hoc) Stability Fixed contract Adapted each time Scope One focused action Multi-step orchestration Audience The agent The human (who then prompts) Lives in .claude/skills/ hack/ or docs/ Attention cost Loaded into context on match Zero until needed

            Recipes can later graduate into skills, but only after repetition proves stability.

            That last row matters. Skills consume the attention budget every time the platform considers activating them.

            A skill that triggers quarterly but gets evaluated on every prompt is pure waste: attention spent on something that will say \"When NOT to Use: now\" 99% of the time.

            Runbooks have zero attention cost. They sit in a Markdown file until a human decides to use them.

            • The human provides the judgment about timing.
            • The prompt provides the structure.

            The Attention Budget Applies to Skills Too

            Every skill in .claude/skills/ is a standing claim on the context window. The platform evaluates skill descriptions against every user prompt to decide whether to activate.

            Twenty focused skills are fine. Thirty might be fine. But each one added reduces the headroom available for actual work.

            Recipes are skills that opted out of the attention tax.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-the-audit-actually-produced","level":2,"title":"What the Audit Actually Produced","text":"

            The audit was not wasted. It was a planning exercise that generated concrete tasks:

            Finding Action 42% of sessions start with memory check Task: /ctx-remember skill (this one is a skill; it is daily) Auto-save stubs are empty Task: enhance /ctx-save with richer summaries 311 raw build-test sequences Task: make check target Stale recall/doc.go lists nonexistent serve Task: fix the doc.go 120 commit sequences disconnected from context Task: /ctx-commit workflow
            • Some findings became skills;
            • Some became Makefile targets;
            • Some became one-line doc fixes.

            The audit did not prescribe the artifact type: The findings did.

            The audit is the input. Skills are one possible output. Not the only one.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit-prompt","level":2,"title":"The Audit Prompt","text":"

            Here is the exact prompt I used, for those who are curious.

            This is not a template: It worked because it was written against this codebase, at this moment, with specific goals in mind:

            I want you to create an agent team to audit this codebase. Save each report as\na separate Markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable. Every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (*session mining*)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (*godoc + inline*)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check that package-level comments match\npackage names.\nOutput: drift items ranked by severity with exact file:line references.\n\n### 3. Maintainability\nLook for:\n- functions longer than 80 lines with clear split points\n- switch blocks with more than 5 cases that could be table-driven\n- inline comments like \"step 1\", \"step 2\" that indicate a block wants to be a function\n- files longer than 400 lines\n- flat packages that could benefit from sub-packages\n- functions that appear misplaced in their file\n\nDo NOT flag things that are fine as-is just because they could theoretically\nbe different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app. Focus on CLI-relevant attack surface, not web OWASP:\n- file path traversal\n- command injection\n- symlink following when writing to `.context/`\n- permission handling\n- sensitive data in outputs\n\nOutput: findings with severity ratings and plausible exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git history,\nrecent session discussions, and `DECISIONS.md` for story arcs worth writing about.\nSuggest 3-5 blog post themes with:\n- title\n- angle\n- target audience\n- key commits or sessions to reference\n- a 2-sentence pitch\n\nPrioritize themes that build a coherent narrative across posts.\n\n### 6. Roadmap and Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses,\nidentify the highest-value improvements. Consider user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with rough effort and impact estimates.\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and user docs. Suggest improvements structured as\nuse-case pages: the problem, how ctx solves it, a typical workflow, and gotchas.\nIdentify gaps where a user would get stuck without reading source code.\nOutput: documentation gaps with suggested page outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each, include:\n- team composition (roles and agent types)\n- task distribution strategy\n- coordination approach\n- the kinds of work it suits\n

            Avoid Generic Advice

            Suggestions that are not grounded in a project's actual structure, history, and workflows are worse than useless:

            They create false confidence.

            If an analysis cannot point to concrete files, commits, sessions, or patterns, it should say \"no finding\" instead of inventing best practices.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

            This is part of a pattern I keep rediscovering:

            The urge to automate is not the same as the need to automate:

            • The 3:1 ratio taught me that not every session should be a YOLO sprint.
            • The E/A/R framework taught me that not every template is worth importing. Now the audit is teaching me that not every useful prompt is worth institutionalizing.

            The common thread is restraint:

            • Knowing when to stop.
            • Recognizing that the cost of automation is not just the effort to build it.

            The cost is the ongoing attention tax of maintaining it, the context it consumes, and the false confidence it creates when it drifts.

            An entry in hack/runbooks/codebase-audit.md is honest about what it is:

            A prompt I wrote once, improved once, and will adapt again next time:

            • It does not pretend to be a reliable contract.
            • It does not claim attention budget.
            • It does not drift silently.

            The Automation Instinct

            When you find a useful prompt, the instinct is to institutionalize it. Resist.

            Ask first: will I use this the same way next time?

            If yes, it is a skill. If no, it is a recipe. If you are not sure, it is a recipe until proven otherwise.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#this-mindset-in-the-context-of-ctx","level":2,"title":"This Mindset in the Context of ctx","text":"

            ctx is a tool that gives AI agents persistent memory. Its purpose is automation: reducing the friction of context loading, session recall, decision tracking.

            But automation has boundaries, and knowing where those boundaries are is as important as pushing them forward.

            The skills system is for high-frequency, stable workflows.

            The recipes, the journal entries, the session dumps in .context/sessions/: those are for everything else.

            Not everything needs to be a slash command. Some things are better as Markdown files you read when you need them.

            The goal of ctx is not to automate everything: It is to automate the right things and to make the rest easy to find when you need it.

            If You Remember One Thing from This Post...

            The best automation decision is sometimes not to automate.

            A runbook in a Markdown file costs nothing until you use it.

            A skill costs attention on every prompt, whether it fires or not.

            Automate the daily. Document the periodic. Forget the rest.

            This post was written during the session that produced the codebase audit reports and distilled the prompt into hack/runbooks/codebase-audit.md. The audit generated seven tasks, one Makefile target, and zero new skills. The meta continues.

            See also: Code Is Cheap. Judgment Is Not.: the capstone that threads this post's restraint argument into the broader case for why judgment, not production, is the bottleneck.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#when-markdown-is-not-a-security-boundary","level":2,"title":"When Markdown Is Not a Security Boundary","text":"

            Jose Alekhinne / 2026-02-09

            What Happens When Your AI Agent Runs Overnight and Nobody Is Watching?

            It follows instructions: That is the problem.

            Not because it is malicious. Because it is controllable.

            It follows instructions from context, and context can be poisoned.

            I was writing the autonomous loops recipe for ctx: the guide for running an AI agent in a loop overnight, unattended, working through tasks while you sleep. The original draft had a tip at the bottom:

            Use CONSTITUTION.md for guardrails. Tell the agent \"never delete tests\" and it usually won't.

            Then I read that sentence back and realized: that is wishful thinking.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-realization","level":2,"title":"The Realization","text":"

            CONSTITUTION.md is a Markdown file. The agent reads it at session start alongside everything else in .context/. It is one source of instructions in a context window that also contains system prompts, project files, conversation history, tool outputs, and whatever the agent fetched from the internet.

            An attacker who can inject content into any of those sources can redirect the agent's behavior. And \"attacker\" does not always mean a person with malicious intent. It can be:

            Vector Example A dependency A malicious npm package with instructions in its README or error output A URL Documentation page with embedded adversarial instructions A project file A contributor who adds instructions to CLAUDE.md or .cursorrules The agent itself In an autonomous loop, the agent modifies its own config between iterations A command output An error message containing instructions the agent interprets and follows

            That last vector is the one that kept me up at night (literally!):

            In an autonomous loop, the agent modifies files as part of its job.

            If it modifies its own configuration files, the next iteration loads the modified config.

            • No human reviews it.
            • No diff is shown.
            • The agent that starts iteration N+1 is running with rules written by iteration N.

            The agent can rewrite its own guardrails.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#five-layers-each-with-a-hole","level":2,"title":"Five Layers, Each with a Hole","text":"

            That's five nested layers of swiss cheese. Alone, each of them has large holes. Together, they create a boundary.

            What followed was a week of peeling back assumptions:

            Every defenseI examined had a bypass, and the bypass was always the same shape: the defense was enforced at a level the agent could reach.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

            CONSTITUTION.md, the Agent Playbook, system prompts: These tell the agent what to do.

            The agent usually follows them.

            \"Usually\" is the keyword here.

            The hole: Prompt injection:

            • A sufficiently crafted payload overrides soft instructions.
            • Long context windows dilute attention on rules stated early.
            • Edge cases where instructions are ambiguous get resolved in unpredictable ways.

            Verdict: Necessary. Not sufficient. Good for the common case. Never trust it as a security boundary.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

            Permission allowlists in .claude/settings.local.json:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

            If rm, curl, sudo, or docker are not in the allowlist, the agent cannot invoke them. This is deterministic.

            The application enforces it regardless of what any prompt says.

            The hole: The agent can modify the allowlist itself:

            • It has Write permission.
            • The allowlist lives in a file.
            • The agent writes to the file.
            • The next iteration loads the modified allowlist.

            The application enforces the rules, but the application reads the rules from files the agent can write.

            Verdict: Strong first layer. Must be combined with self-modification prevention.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-3-os-level-isolation-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Unbypassable)","text":"

            This is where the defenses stop having holes in the same shape.

            The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

            Control What it stops Dedicated unprivileged user Privilege escalation, sudo, group-based access Filesystem permissions Lateral movement to other projects, system config Immutable config files Self-modification of guardrails between iterations

            Make the agent's instruction files read-only: CLAUDE.md, .claude/settings.local.json, .context/CONSTITUTION.md. Own them as a different user, or mark them immutable with chattr +i on Linux.

            The hole: Actions within the agent's legitimate scope:

            • If the agent has write access to source code (which it needs), it can introduce vulnerabilities in the code itself.
            • You cannot prevent this without removing the agent's ability to do its job.

            Verdict: Essential. This is the layer that makes Layers 1 and 2 trustworthy.

            OS-level isolation does not make the agent safe; it makes the other layers meaningful.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

            An agent that cannot reach the internet cannot exfiltrate data.

            It also cannot ingest new instructions mid-loop from external documents, error pages, or hostile content.

            # Container with no network\ndocker run --network=none ...\n\n# Or firewall rules allowing only package registries\niptables -A OUTPUT -d registry.npmjs.org -j ACCEPT\niptables -A OUTPUT -d proxy.golang.org -j ACCEPT\niptables -A OUTPUT -j DROP\n
            • If the agent genuinely does not need the network, disable it entirely.
            • If it needs to fetch dependencies, allow specific registries and block everything else.

            The hole: None, if the agent does not need the network.

            Thetradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

            The strongest boundary is a separate machine.

            The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

            Never Mount the Docker Socket

            Do not mount /var/run/docker.sock, like, ever.

            An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

            This is not theoretical: the Docker socket grants root-equivalent access to the host.

            Use rootless Docker or Podman to eliminate this escalation path entirely.

            Virtual machines are even stronger: The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-pattern","level":2,"title":"The Pattern","text":"

            Each layer is straightforward: The strength is in the combination:

            Layer Implementation What it stops Soft instructions CONSTITUTION.md Common mistakes (probabilistic) Application allowlist .claude/settings.local.json Unauthorized commands (deterministic within runtime) Immutable config chattr +i on config files Self-modification between iterations Unprivileged user Dedicated user, no sudo Privilege escalation Container --cap-drop=ALL --network=none Host escape, data exfiltration Resource limits --memory=4g --cpus=2 Resource exhaustion

            No layer is redundant. Each one catches what the others miss:

            • The soft instructions handle the 99% case: \"don't delete tests.\"
            • The allowlist prevents the agent from running commands it should not.
            • The immutable config prevents the agent from modifying the allowlist.
            • The unprivileged user prevents the agent from removing the immutable flag.
            • The container prevents the agent from reaching anything outside its workspace.
            • The resource limits prevent the agent from consuming all system resources.

            Remove any one layer and there is an attack path through the remaining ones.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#common-mistakes-i-see","level":2,"title":"Common Mistakes I See","text":"

            These are real patterns, not hypotheticals:

            \"I'll just use --dangerously-skip-permissions.\" This disables Layer 2 entirely. Without Layers 3 through 5, you have no protection at all. The flag means what it says. If you ever need to, think thrice, you probably don't. But, if you ever need to usee this only use it inside a properly isolated VM (not even a container: a \"VM\").

            \"The agent is sandboxed in Docker.\" A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

            \"I reviewed CLAUDE.md, it's fine.\" You reviewed it before the loop started. The agent modified it during iteration 3. Iteration 4 loaded the modified version. Unless the file is immutable, your review is futile.

            \"The agent only has access to this one project.\" Does the project directory contain .env files? SSH keys? API tokens? A .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

            This is the same lesson I keep rediscovering, wearing different clothes.

            In The Attention Budget, I wrote about how every token competes for the AI's focus. Security instructions in CONSTITUTION.md are subject to the same budget pressure: if the context window is full of code, error messages, and tool outputs, the security rules stated at the top get diluted.

            In Skills That Fight the Platform, I wrote about how custom instructions can conflict with the AI's built-in behavior. Security rules have the same problem: telling an agent \"never run curl\" in Markdown while giving it unrestricted shell access creates a contradiction: The agent resolves contradictions unpredictably. The agent will often pick the path of least resistance to attain its objective function. And, trust me, agents can get far more creative than the best red-teamer you know.

            In You Can't Import Expertise, I wrote about how generic templates fail because they do not encode project-specific knowledge. Generic security advice fails the same way: \"Don't exfiltrate data\" is a category; blocking outbound network access is a control.

            The pattern across all of these: Soft instructions are useful for the common case. Hard boundaries are required for security.

            Know which is which.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-checklist","level":2,"title":"The Checklist","text":"

            Before running an unattended AI agent:

            • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
            • Agent's config files are immutable or owned by a different user
            • Permission allowlist restricts tools to the project's toolchain
            • Container drops all capabilities (--cap-drop=ALL)
            • Docker socket is NOT mounted
            • Network is disabled or restricted to specific domains
            • Resource limits are set (memory, CPU, disk)
            • No SSH keys, API tokens, or credentials are accessible
            • Project directory does not contain .env or secrets files
            • Iteration cap is set (--max-iterations)

            This checklist lives in the Agent Security reference alongside the full threat model and detailed guidance for each layer.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#what-changed-in-ctx","level":2,"title":"What Changed in ctx","text":"

            The autonomous loops recipe now has a full permissions and isolation section instead of a one-line tip about CONSTITUTION.md. It covers both the explicit allowlist approach and the --dangerously-skip-permissions flag, with honest guidance about when each is appropriate.

            It also has an OS-level isolation table that is not optional: unprivileged users, filesystem permissions, containers, VMs, network controls, resource limits, and self-modification prevention.

            The Agent Security page consolidates the threat model and defense layers into a standalone reference.

            These are not theoretical improvements. They are the minimum responsible guidance for a tool that helps people run AI agents overnight.

            If You Remember One Thing from This Post...

            Markdown is not a security boundary.

            CONSTITUTION.md is a nudge. An allowlist is a gate.

            An unprivileged user in a network-isolated container is a wall.

            Use all three. Trust only the wall.

            This post was written during the session that added permissions, isolation, and self-modification prevention to the autonomous loops recipe. The security guidance started as a single tip and grew into two documents. The meta continues.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/","level":1,"title":"How Deep Is Too Deep?","text":"","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#when-master-ml-is-the-wrong-next-step","level":2,"title":"When \"Master ML\" Is the Wrong Next Step","text":"

            Jose Alekhinne / 2026-02-12

            Have You Ever Felt like You Should Understand More of the Stack beneath You?

            You can talk about transformers at a whiteboard.

            You can explain attention to a colleague.

            You can use agentic AI to ship real software.

            But somewhere in the back of your mind, there is a voice:

            \"Maybe I should go deeper. Maybe I need to master machine learning.\"

            I had that voice for months.

            Then I spent a week debugging an agent failure that had nothing to do with ML theory and everything to do with knowing which abstraction was leaking.

            This post is about when depth compounds and (more importantly) when it does not.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-hierarchy-nobody-questions","level":2,"title":"The Hierarchy Nobody Questions","text":"

            There is an implicit stack most people carry around when thinking about AI:

            Layer What Lives Here Agentic AI Autonomous loops, tool use, multi-step reasoning Generative AI Text, image, code generation Deep Learning Transformer architectures, training at scale Neural Networks Backpropagation, gradient descent Machine Learning Statistical learning, optimization Classical AI Search, planning, symbolic reasoning

            At some point down that stack, you hit a comfortable plateau: the layer where you can hold a conversation but not debug a failure.

            The instinctive response is to go deeper.

            But that instinct hides a more important question:

            \"Does depth still compound when the abstractions above you are moving hyper-exponentially?\"

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-honest-observation","level":2,"title":"The Honest Observation","text":"

            If you squint hard enough, a large chunk of modern ML intuition collapses into older fields:

            ML Concept Older Field Gradient descent Numerical optimization Backpropagation Reverse-mode autodiff Loss landscapes Non-convex optimization Generalization Statistics Scaling laws Asymptotics and information theory

            Nothing here is uniquely \"AI\".

            Most of this math predates the term deep learning. In some cases, by decades.

            So what changed?

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#same-tools-different-regime","level":2,"title":"Same Tools, Different Regime","text":"

            The mistake is assuming this is a new theory problem: It is not.

            It is a new operating regime.

            Classical numerical methods were developed under assumptions like:

            • Manageable dimensionality
            • Reasonably well-conditioned objectives
            • Losses that actually represent the goal

            Modern ML violates all three: On purpose.

            Today's models operate with millions to trillions of parameters, wildly underdetermined systems, and objective functions we know are wrong but optimize anyway.

            It is complete and utter madness!

            At this scale, familiar concepts warp:

            • What we call \"local minima\" are overwhelmingly saddle points in high-dimensional spaces.
            • Noise stops being noise and starts becoming structure.
            • Overfitting can coexist with generalization.
            • Bigger models outperform \"better\" ones.

            The math did not change: The phase did.

            This is less numerical analysis and more *statistical physics: Same equations, but behavior dominated by phase transitions and emergent structure.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#why-scaling-laws-feel-alien","level":2,"title":"Why Scaling Laws Feel Alien","text":"

            In classical statistics, asymptotics describe what happens eventually.

            In modern ML, scaling laws describe where you can operate today.

            They do not say \"given enough time, things converge\".

            They say \"cross this threshold and behavior qualitatively changes\".

            This is why dumb architectures plus scale beat clever ones.

            Why small theoretical gains disappear under data.

            Why \"just make it bigger\", ironically, keeps working longer than it should.

            That is not a triumph of ML theory: It is a property of high-dimensional systems under loose objectives.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#where-depth-actually-pays-off","level":2,"title":"Where Depth Actually Pays Off","text":"

            This reframes the original question.

            You do not need depth because this is \"AI\".

            You need depth where failure modes propagate upward.

            I learned this building ctx: The agent failures I have spent the most time debugging were never about the model's architecture.

            They were about:

            • Misplaced trust: The model was confident. The output was wrong. Knowing when confidence and correctness diverge is not something you learn from a textbook. You learn it from watching patterns across hundreds of sessions.

            • Distribution shift: The model performed well on common patterns and fell apart on edge cases specific to this project. Recognizing that shift before it compounds requires understanding why generalization has limits, not just that it does.

            • Error accumulation: In a single prompt, model quirks are tolerable. In autonomous loops running overnight, they compound. A small bias in how the model interprets instructions becomes a large drift by iteration 20.

            • Scale hiding errors: The model's raw capability masked problems that only surfaced under specific conditions. More parameters did not fix the issue. They just made the failure mode rarer and harder to reproduce.

            This is the kind of depth that compounds. Not deriving backprop. But, understanding when correct math produces misleading intuition.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

            This is the same pattern I keep finding at different altitudes.

            In \"The Attention Budget\", I wrote about how dumping everything into the context window degrades the model's focus. The fix was not a better model: It was better curation: load less, load the right things, preserve signal per token.

            In \"Skills That Fight the Platform\", I wrote about how custom instructions can conflict with the model's built-in behavior. The fix was not deeper ML knowledge: It was an understanding that the model already has judgment and that you should extend it, not override it.

            In \"You Can't Import Expertise\", I wrote about how generic templates fail because they do not encode project-specific knowledge. A consolidation skill with eight Rust-based analysis dimensions was mostly noise for a Go project. The fix was not a better template: It was growing expertise from this project's own history.

            In every case, the answer was not \"go deeper into ML\".

            The answer was knowing which abstraction was leaking and fixing it at the right layer.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#agentic-systems-are-not-an-ml-problem","level":2,"title":"Agentic Systems Are Not an ML Problem","text":"

            The mistake is assuming agent failures originate where the model was trained, rather than where it is deployed.

            Agentic AI is a systems problem under chaotic uncertainty:

            • Feedback loops between the agent and its environment;
            • Error accumulation across iterations;
            • Brittle representations that break outside training distribution;
            • Misplaced trust in outputs that look correct.

            In short-lived interactions, model quirks are tolerable. In long-running autonomous loops, however, they compound.

            That is where shallow understanding becomes expensive.

            But the understanding you need is not about optimizer internals.

            It is about:

            What Matters What Does Not (for Most Practitioners) Why gradient descent fails in specific regimes How to derive it from scratch When memorization masquerades as reasoning The formal definition of VC dimension Recognizing distribution shift before it compounds Hand-tuning learning rate schedules Predicting when scale hides errors instead of fixing them Chasing theoretical purity divorced from practice

            The depth that matters is diagnostic, not theoretical.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-real-answer","level":2,"title":"The Real Answer","text":"

            Not turtles all the way down.

            Go deep enough to:

            • Diagnose failures instead of cargo-culting fixes;
            • Reason about uncertainty instead of trusting confidence;
            • Design guardrails that align with model behavior, not hope.

            Stop before:

            • Hand-deriving gradients for the sake of it;
            • Obsessing over optimizer internals you will never touch;
            • Chasing theoretical purity divorced from the scale you actually operate at.

            This is not about mastering ML.

            It is about knowing which abstractions you can safely trust and which ones leak.

            Hint: Any useful abstraction almost certainly leaks.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#a-practical-litmus-test","level":2,"title":"A Practical Litmus Test","text":"

            If a failure occurs and your instinct is to:

            • Add more prompt text: abstraction leak above
            • Add retries or heuristics: error accumulation
            • Change the model: scale masking
            • Reach for ML theory: you are probably (but not always) going too deep

            The right depth is the shallowest layer where the failure becomes predictable.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-ctx-lesson","level":2,"title":"The ctx Lesson","text":"

            Every design decision in ctx is downstream of this principle.

            The attention budget exists because the model's internal attention mechanism has real limits: You do not need to understand the math of softmax to build around it. But you do need to understand that more context is not always better and that attention density degrades with scale.

            The skill system exists because the model's built-in behavior is already good: You do not need to understand RLHF to build effective skills. But you do need to understand that the model already has judgment and your skills should teach it things it does not know, not override how it thinks.

            Defense in depth exists because soft instructions are probabilistic: You do not need to understand the transformer architecture to know that a Markdown file is not a security boundary. But you do need to understand that the model follows instructions from context, and context can be poisoned.

            In each case, the useful depth was one or two layers below the abstraction I was working at: Not at the bottom of the stack.

            The boundary between useful understanding and academic exercise is where your failure modes live.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#closing-thought","level":2,"title":"Closing Thought","text":"

            Most modern AI systems do not fail because the math is wrong.

            They fail because we apply correct math in the wrong regime, then build autonomous systems on top of it.

            Understanding that boundary, not crossing it blindly, is where depth still compounds.

            And that is a far more useful form of expertise than memorizing another loss function.

            If You Remember One Thing from This Post...

            Go deep enough to diagnose your failures. Stop before you are solving problems that do not propagate to your layer.

            The abstractions below you are not sacred. But neither are they irrelevant.

            The useful depth is wherever your failure modes live. Usually one or two layers down, not at the bottom.

            This post started as a note about whether I should take an ML course. The answer turned out to be \"no, but understand why not\". The meta continues.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/","level":1,"title":"Before Context Windows, We Had Bouncers","text":"","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-reset-problem","level":2,"title":"The Reset Problem","text":"

            IRC is stateless.

            • You disconnect, you vanish.
            • You reconnect, you begin again.

            No buffer.

            No memory.

            No continuity.

            Modern systems are not much different:

            • Close the browser tab.
              • Lose the Slack scrollback.
            • Open a new LLM session.
              • Start from zero.

            Resets externalize reconstruction cost onto humans.

            Reconstruction is tax: Tax becomes entropy.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#stateless-protocol-stateful-life","level":2,"title":"Stateless Protocol, Stateful Life","text":"

            IRC is minimal:

            • A TCP connection.
            • A nickname.
            • A channel.
            • A stream of lines.

            When the connection drops, you literally disappear from the graph.

            The protocol is stateless; human systems are not.

            So you:

            • Reconnect;
            • Ask what you missed;
            • Scroll;
            • Reconstruct.

            The machine forgets; you pay.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-bouncer-pattern","level":2,"title":"The Bouncer Pattern","text":"

            A bouncer is a daemon that remains connected when you do not:

            • It holds your seat;
            • It buffers what you missed;
            • It keeps your identity online.

            ZNC is one such bouncer.

            With ZNC:

            • Your client does not connect to IRC;
            • It connects to ZNC;
            • ZNC connects upstream.

            Client sessions become ephemeral.

            Presence becomes infrastructural.

            ZNC Is Tmux for IRC

            • Close your laptop.

              • ZNC remains.
            • Switch devices.

              • ZNC persists.

            This is not convenience; this is continuity.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#presence-without-flapping","level":2,"title":"Presence without Flapping","text":"

            With a bouncer:

            • Closing your client does not emit PART.
            • Reopening does not emit JOIN.

            You do not flap in and out of existence.

            From the channel's perspective, you remain.

            From your perspective, history accumulates.

            • Buffers persist;
            • Identity persists;
            • Context persists.

            This pattern predates AI.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#before-llm-context-windows","level":2,"title":"Before LLM Context Windows","text":"

            An LLM session without memory is IRC without a bouncer:

            • Close the window.
            • Start over.
            • Re-explain intent.
            • Rehydrate context.

            That is friction.

            This Walks and Talks like ctx

            Context engineering moves memory out of sessions and into infrastructure.

            • ZNC does this for IRC.
            • ctx does this for agents.

            Same principle:

            • Volatile interface.
            • Persistent substrate.

            Different fabric.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#minimal-architecture","level":2,"title":"Minimal Architecture","text":"

            My setup is intentionally boring:

            • A $5 small VPS.
            • ZNC installed.
            • TLS enabled.
            • Firewall restricted.

            Then:

            • ZNC connects to Libera.Chat.
            • SASL authentication lives inside ZNC.
            • Buffers are stored on disk.

            My client connects to my VPS, not the network.

            The commands do not matter: The boundaries do:

            • Authentication in infrastructure, not in the client;
            • Memory server-side, not in scrollback;
            • Presence decoupled from activity.

            Everything else is configuration.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#platform-memory","level":2,"title":"Platform Memory","text":"

            Yes, I know, it is 2026:

            • Discord stores history;
            • Slack stores history;
            • The dumpster fire on gasoline called X, too, stores history.

            HOWEVER, they own your substrate.

            Running a bouncer is quiet sovereignty:

            • Logs are mine.
            • Presence is continuous.
            • State does not reset because I closed a tab.

            Small acts compound.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#signal-density","level":2,"title":"Signal Density","text":"

            Primitive systems select for builders.

            Consistent presence in small rooms compounds reputation.

            Quiet compounding outperforms viral spikes.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#infrastructure-as-cognition","level":2,"title":"Infrastructure as Cognition","text":"

            ZNC is not interesting because it is retro; it is interesting because it models a principle:

            • Stateless protocols require stateful wrappers;
            • Volatile interfaces require durable memory;
            • Human systems require continuity.

            Distilled:

            Humans require context.

            Before context windows, we had bouncers.

            Before AI memory files, we had buffers.

            Continuity is not a feature; it is a design decision.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#build-it","level":2,"title":"Build It","text":"

            If you want the actual setup (VPS, ZNC, TLS, SASL, firewall...) there is a step-by-step runbook:

            Persistent IRC Presence with ZNC.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#motd","level":2,"title":"MOTD","text":"

            When my client connects to my bouncer, it prints:

            //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n

            See also: Context as Infrastructure -- the post that takes this observation to its conclusion: stateless protocols need stateful wrappers, and AI sessions need persistent filesystems.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/","level":1,"title":"Parallel Agents with Git Worktrees","text":"","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-backlog-problem","level":2,"title":"The Backlog Problem","text":"

            Jose Alekhinne / 2026-02-14

            What Do You Do with 30 Open Tasks?

            You could work through them one at a time.

            One agent, one branch, one commit stream.

            Or you could ask: which of these don't touch each other?

            I had 30 open tasks in TASKS.md. Some were docs. Some were a new encryption package. Some were test coverage for a stable module. Some were blog posts.

            They had almost zero file overlap.

            Running one agent at a time meant serial execution on work that was fundamentally parallel:

            I was bottlenecking on me, not on the machine.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-insight-file-overlap-is-the-constraint","level":2,"title":"The Insight: File Overlap Is the Constraint","text":"

            This is not a scheduling problem: It's a conflict avoidance problem.

            Two agents can work simultaneously on the same codebase if and only if they don't touch the same files. The moment they do, you get merge conflicts: And merge conflicts on AI-generated code are expensive because the human has to arbitrate choices they didn't make.

            So the question becomes:

            \"Can you partition your backlog into non-overlapping tracks?\"

            For ctx, the answer was obvious:

            Track Touches Tasks work/docs docs/, hack/ Blog posts, recipes, runbooks work/pad internal/cli/pad/, specs Scratchpad encryption, CLI, tests work/tests internal/cli/recall/ Recall test coverage

            Three tracks. Near-zero overlap. Three agents.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#git-worktrees-the-mechanism","level":2,"title":"Git Worktrees: The Mechanism","text":"

            git has a feature that most people don't use: worktrees.

            A worktree is a second (or third, or fourth) working directory that shares the same .git object database as your main checkout.

            Each worktree has its own branch, its own index, its own working tree. But they all share history, refs, and objects.

            git worktree add ../ctx-docs -b work/docs\ngit worktree add ../ctx-pad -b work/pad\ngit worktree add ../ctx-tests -b work/tests\n
            • Three directories;
            • Three branches;
            • One repository.

            This is cheaper than three clones. And because they share objects, git merge afterwards is fast: It's a local operation on shared data.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-setup","level":2,"title":"The Setup","text":"

            The workflow I landed on:

            1. Group tasks by blast radius.

            Read TASKS.md. For each pending task, estimate which files and directories it touches. Group tasks that share files into the same track. Tasks with no overlap go into separate tracks.

            This is the part that requires human judgment:

            An agent can propose groupings, but you need to verify that the boundaries are real. A task that says \"update docs\" but actually touches Go code will poison a docs track.

            2. Create worktrees as sibling directories.

            Not subdirectories: Siblings.

            If your main checkout is at ~/WORKSPACE/ctx, worktrees go at ~/WORKSPACE/ctx-docs, ~/WORKSPACE/ctx-pad, etc.

            Why siblings? Because some tools (and some agents) walk up the directory tree looking for .git. A worktree inside the main checkout confuses them.

            3. Launch one agent per worktree.

            # Terminal 1\ncd ../ctx-docs && claude\n\n# Terminal 2\ncd ../ctx-pad && claude\n\n# Terminal 3\ncd ../ctx-tests && claude\n

            Each agent gets a full working copy with .context/ intact. It reads the same TASKS.md, the same DECISIONS.md, the same CONVENTIONS.md. It knows the full project state. It just works on a different slice.

            4. Do NOT run ctx init in worktrees.

            This is the gotcha. The .context/ directory is tracked in git. Running ctx init in a worktree would overwrite shared context files: Wiping decisions, learnings, and tasks that belong to the whole project.

            The worktree already has everything it needs. Leave it alone.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#what-actually-happened","level":2,"title":"What Actually Happened","text":"

            I ran three agents for about 40 minutes. Here is roughly what each track produced:

            work/docs: Parallel worktrees recipe, blog post edits, recipe index reorganization, IRC recipe moved from docs/ to hack/.

            work/pad: ctx pad show subcommand, --append and --prepend flags on ctx pad edit, spec updates, 28 new test functions.

            work/tests: Recall test coverage, edge case tests.

            Merging took about five minutes. Two of the three merges were clean.

            The third had a conflict in TASKS.md:

            both the docs track and the pad track had marked different tasks as [x].

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-tasksmd-conflict","level":2,"title":"The TASKS.md Conflict","text":"

            This deserves its own section because it will happen every time.

            When two agents work in parallel, they both read TASKS.md at the start and mark tasks complete as they go. When you merge, git sees two branches that modified the same file differently.

            The resolution is always the same: accept all completions from both sides. No task should go from [x] back to [ ]. The merge is additive.

            This is one of those conflicts that sounds scary but is trivially mechanical: You are not arbitrating design decisions; you are combining two checklists.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#limits","level":2,"title":"Limits","text":"

            3-4 worktrees, maximum.

            I tried four once: By the time I merged the third track, the fourth had drifted far enough that its changes needed rebasing.

            The merge complexity grows faster than the parallelism benefit.

            Three is the sweet spot:

            • Two is conservative but safe;
            • Four is possible if the tracks are truly independent;
            • Anything more than four, you are in the danger zone.

            Group by directory, not by priority.

            It is tempting to put all the high-priority tasks in one track: Don't.

            Two high-priority tasks that touch the same files must be in the same track, regardless of urgency. The constraint is file overlap, not importance.

            Commit frequently.

            Smaller commits make merge conflicts easier to resolve. An agent that writes 500 lines in a single commit is harder to merge than one that commits every logical step.

            Name tracks by concern.

            • work/docs and work/pad tell you what's happening;
            • work/track-1 and work/track-2 tell you nothing.
            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-pattern","level":2,"title":"The Pattern","text":"

            This is the same pattern that shows up everywhere in ctx:

            The attention budget taught me that you can't dump everything into one context window. You have to partition, prioritize, and load selectively.

            Worktrees are the same principle applied to execution: You can't dump every task into one agent's workstream. You have to partition by blast radius, assign selectively, and merge deliberately.

            The codebase audit that generated these 30 tasks used eight parallel agents for analysis. Worktrees let me use parallel agents for implementation. Same coordination pattern, different artifact.

            And the IRC bouncer post from earlier today argued that stateless protocols need stateful wrappers. Worktrees are the same: git branches are stateless forks; .context/ is the stateful wrapper that gives each agent the project's full memory.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#should-this-be-a-skill","level":2,"title":"Should This Be a Skill?","text":"

            I asked myself the same question I asked about the codebase audit: should this be a /ctx-worktree skill?

            This time the answer was a resounding \"yes\":

            Unlike the audit prompt (which I tweak every time and run every other week) the worktree workflow is:

            Criterion Worktree workflow Codebase audit Frequency Weekly Quarterly Stability Same steps every time Tweaked every time Scope Mechanical, bounded Bespoke, 8 agents Trigger Large backlog \"I feel like auditing\"

            The commands are mechanical: git worktree add, git worktree remove, branch naming, safety checks. This is exactly what skills are for: stable contracts for repetitive operations.

            Ergo, /ctx-worktree exists.

            It enforces the 4-worktree limit, creates sibling directories, uses work/ branch prefixes, and reminds you not to run ctx init in worktrees.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-takeaway","level":2,"title":"The Takeaway","text":"

            Serial execution is the default. But serial is not always necessary.

            If your backlog partitions cleanly by file overlap, you can multiply your throughput with nothing more exotic than git worktree and a second terminal window.

            The hard part is not the git commands; it is the discipline:

            • Grouping by blast radius instead of priority;
            • Accepting that TASKS.md will conflict;
            • And knowing when three tracks is enough.

            If You Remember One Thing from This Post...

            Partition by blast radius, not by priority.

            Two tasks that touch the same files belong in the same track, no matter how important the other one is.

            The constraint is file overlap. Everything else is scheduling.

            The practical setup (skill invocation, worktree creation, merge workflow, and cleanup) lives in the recipe: Parallel Agent Development with Git Worktrees.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/","level":1,"title":"ctx v0.3.0: The Discipline Release","text":"","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#when-the-ratio-of-polish-to-features-is-31-you-know-something-changed","level":2,"title":"When the Ratio of Polish to Features Is 3:1, You Know Something Changed","text":"

            Jose Alekhinne / February 15, 2026

            What Does a Release Look like When Most of the Work Is Invisible?

            No new headline feature. No architectural pivot. No rewrite.

            Just 35+ documentation and quality commits against ~15 feature commits... and somehow, the tool feels like it grew up overnight.

            Six days separate v0.2.0 from v0.3.0.

            Measured by calendar time, it is nothing. Measured by what changed in how the project operates, it is the most significant release yet.

            • v0.1.0 was the prototype;
            • v0.2.0 was the archaeology release: making the past accessible;
            • v0.3.0 is the discipline release: the one that turned best practices into enforcement, suggestions into structure, and a collection of commands into a system of skills.

            The Release Window

            February 1‒February 7, 2026

            From the v0.2.0 tag to commit 2227f99.

            78 files changed in the migration commit alone.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-migration-commands-to-skills","level":2,"title":"The Migration: Commands to Skills","text":"

            The largest single change was the migration from .claude/commands/*.md to .claude/skills/*/SKILL.md.

            This was not a rename: It was a rethinking of how AI agents discover and execute project-specific workflows.

            Aspect Commands (before) Skills (after) Structure Flat files in one directory Directory-per-skill with SKILL.md Description Optional, often vague Required, doubles as activation trigger Quality gates None \"Before X-ing\" pre-flight checklist Negative triggers None \"When NOT to Use\" in every skill Examples Rare Good/bad pairs in every skill Average length ~15 lines ~80 lines

            The description field became the single most important line in each skill. In the old system, descriptions were titles. In the new system, they are activation conditions: The text the platform reads to decide whether to surface a skill for a given prompt.

            A description that says \"Show context summary\" activates too broadly or not at all. A description that says \"Show context summary. Use at session start or when unclear about current project state\" activates at the right moment.

            78 files changed. 1,915 insertions. Not because the skills got bloated; because they got specific.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-skill-sweep","level":2,"title":"The Skill Sweep","text":"

            After the structural migration, every skill was rewritten in a single session: All 21 of them.

            The rewrite was guided by a pattern that emerged during the process itself: a repeatable anatomy that effective skills share regardless of their purpose:

            1. Before X-ing: Pre-flight checks that prevent premature execution
            2. When to Use: Positive triggers that narrow activation
            3. When NOT to Use: Negative triggers that prevent misuse
            4. Usage Examples: Invocation patterns the agent can pattern-match
            5. Quality Checklist: Verification before claiming completion

            The Anatomy of a Skill That Works post covers the details. What matters for the release story is the result:

            • Zero skills with quality gates became twenty;
            • Zero skills with negative triggers became twenty.
            • Three skills with examples became twenty.

            The Skill Trilogy as Design Spec

            The three blog posts written during this window:

            • Skills That Fight the Platform,
            • You Can't Import Expertise,
            • and The Anatomy of a Skill That Works...

            ... were not retrospective documentation. They were written during the rewrite, and the lessons fed back into the skills as they were being built.

            • The blog was the design document.
            • The skills were the implementation.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-consolidation-sweep","level":2,"title":"The Consolidation Sweep","text":"

            The unglamorous work. The kind you only appreciate when you try to change something later and it just works.

            What Why It Matters Constants consolidation Magic strings replaced with semantic constants Variable deshadowing Eliminated subtle scoping bugs File splits Modules that were doing too much, broken apart Godoc standardization Every exported function documented to convention

            This is the work that doesn't get a changelog entry but makes every future commit easier. When a new contributor (human or AI) reads the codebase, they find consistent patterns instead of accumulated drift.

            The consolidation was not an afterthought. It was scheduled deliberately, with the same priority as features: The 3:1 ratio that emerged during v0.2.0 development became an explicit practice:

            • Three feature sessions;
            • One consolidation session.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-ear-framework","level":2,"title":"The E/A/R Framework","text":"

            On February 4th, we adopted the E/A/R classification as the official standard for evaluating skills:

            Category Meaning Target Expert Knowledge Claude does not have >70% Activation When/how to trigger ~20% Redundant What Claude already knows <10%

            This came from reviewing approximately 30 external skill files and discovering that most were redundant with Claude's built-in system prompt. Only about 20% had salvageable content, and even those yielded just a few heuristics each.

            The E/A/R framework gave us a concrete, testable criterion:

            A good skill is Expert knowledge minus what Claude already knows.

            If more than 10% of a skill restates platform defaults, it is creating noise, not signal.

            Every skill in v0.3.0 was evaluated against this framework. Several were deleted. The survivors are leaner and more focused.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#backup-and-monitoring-infrastructure","level":2,"title":"Backup and Monitoring Infrastructure","text":"

            A tool that manages your project's memory needs ops maturity.

            v0.3.0 added two pieces of infrastructure that reflect this:

            Backup staleness hook: A UserPromptSubmit hook that checks whether the last .context/ backup is more than two days old. If it is, and the SMB mount is available, it reminds the user. No cron job running when nobody is working. No redundant backups when nothing has changed.

            Context size checkpoint: A PreToolUse hook that estimates current context window usage and warns when the session is getting heavy. This hooks into the attention budget philosophy: Degradation is expected, but it should be visible.

            Both hooks use $CLAUDE_PROJECT_DIR instead of hardcoded paths, a migration triggered by a username rename that broke every absolute path in the hook configuration. That migration (replacing /home/user/... with \"$CLAUDE_PROJECT_DIR\"/.claude/hooks/...) was one of those changes that seems trivial but prevents an entire category of future failures.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.2.0 v0.3.0 Skills (was \"commands\") 11 21 Skills with quality gates 0 21 Skills with \"When NOT to Use\" 0 21 Average skill body ~15 lines ~80 lines Hooks using $CLAUDE_PROJECT_DIR 0 All Documentation commits -- 35+ Feature/fix commits -- ~15

            That ratio (35+ documentation and quality commits to ~15 feature commits) is the defining characteristic of this release:

            • This release is not a failure to ship features.
            • It is the deliberate choice to make the existing features reliable.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-v030-means","level":2,"title":"What v0.3.0 Means","text":"

            v0.1.0 asked: \"Can we give AI persistent memory?\"

            v0.2.0 asked: \"Can we make that memory accessible to humans too?\"

            v0.3.0 asks a different question: \"Can we make the quality self-enforcing?\"

            The answer is not a feature: It is a practice:

            • Skills with quality gates enforce pre-flight checks.
            • Negative triggers prevent misuse without human intervention.
            • The E/A/R framework ensures skills contain signal, not noise.
            • Consolidation sessions are scheduled, not improvised.
            • Hook infrastructure makes degradation visible.

            Discipline is not the absence of velocity. It is the infrastructure that makes velocity sustainable.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

            The skill system is now mature enough to support real workflows without constant human correction. The hooks infrastructure is portable and resilient. The consolidation practice is documented and repeatable.

            The next chapter is about what you build on top of discipline:

            • Multi-agent coordination;
            • Deeper integration patterns;
            • And the question of whether context management is a tool concern or an infrastructure concern.

            But those are future posts.

            This one is about the release that proved polish is not the opposite of progress. It is what turns a prototype into a product.

            The Discipline Release

            v0.1.0 shipped features.

            v0.2.0 shipped archaeology.

            v0.3.0 shipped the habits that make everything else trustworthy.

            The most important code in this release is the code that prevents bad code from shipping.

            This post was drafted using /ctx-blog with access to the full git history between v0.2.0 and v0.3.0, decision logs, learning logs, and the session files from the skill rewrite window. The meta continues.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/","level":1,"title":"Eight Ways a Hook Can Talk","text":"","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#when-your-warning-disappears","level":2,"title":"When Your Warning Disappears","text":"

            Jose Alekhinne / 2026-02-15

            I had a backup warning that nobody ever saw.

            The hook was correct: It detected stale backups, formatted a nice message, and output it as {\"systemMessage\": \"...\"}. The problem wasn't detection. The problem was delivery. The agent absorbed the information, processed it internally, and never told the user.

            Meanwhile, a different hook (the journal reminder) worked perfectly every time. Users saw the reminder, ran the commands, and the backlog stayed manageable. Same hook event (UserPromptSubmit), same project, completely different outcomes.

            The difference was one line:

            IMPORTANT: Relay this journal reminder to the user VERBATIM\nbefore answering their question.\n

            That explicit instruction is what makes VERBATIM relay a pattern, not just a formatting choice. And once I saw it as a pattern, I started seeing others.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-audit","level":2,"title":"The Audit","text":"

            I looked at every hook in ctx: Eight shell scripts across three hook events. And I found five distinct output patterns already in use, plus three more that the existing hooks were reaching for but hadn't quite articulated.

            The patterns form a spectrum based on a single question:

            \"Who decides what the user sees?\"

            At one end, the hook decides everything (hard gate: the agent literally cannot proceed). At the other end, the hook is invisible (silent side-effect: nobody knows it ran). In between, there is a range of negotiation between hook, agent, and the user.

            Here's the full spectrum:

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#1-hard-gate","level":3,"title":"1. Hard Gate","text":"
            {\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}\n

            The nuclear option: The agent's tool call is rejected before it executes.

            This is Claude Code's first-class PreToolUse mechanism: The hook returns JSON with decision: block and the agent gets an error with the reason.

            Use this for invariants: Constitution rules, security boundaries, things that must never happen. I use it to enforce PATH-based ctx invocation, block sudo, and require explicit approval for git push.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#2-verbatim-relay","level":3,"title":"2. VERBATIM Relay","text":"
            IMPORTANT: Relay this warning to the user VERBATIM before answering.\n┌─ Journal Reminder ─────────────────────────────\n│ You have 12 sessions not yet imported.\n│   ctx recall import --all\n└────────────────────────────────────────────────\n

            The instruction is the pattern. Without \"Relay VERBATIM,\" agents tend to absorb information into their internal reasoning and never surface it. The explicit instruction changes the behavior from \"I know about this\" to \"I must tell the user about this.\"

            I use this for actionable reminders:

            • Unexported journal entries;
            • Stale backups;
            • Context capacity warnings...

            ...things the user should see regardless of what they asked.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#3-agent-directive","level":3,"title":"3. Agent Directive","text":"
            ┌─ Persistence Checkpoint (prompt #25) ───────────\n│ No context files updated in 15+ prompts.\n│ Have you discovered learnings worth persisting?\n└──────────────────────────────────────────────────\n

            A nudge, not a command. The hook tells the agent something; the agent decides what (if anything) to tell the user. This is right for behavioral nudges: \"you haven't saved context in a while\" doesn't need to be relayed verbatim, but the agent should consider acting on it.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#4-silent-context-injection","level":3,"title":"4. Silent Context Injection","text":"
            ctx agent --budget 4000 2>/dev/null || true\n

            Pure background enrichment. The agent's context window gets project information injected on every tool call, with no visible output. Neither the agent nor the user sees the hook fire, but the agent makes better decisions because of the context.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#5-silent-side-effect","level":3,"title":"5. Silent Side-Effect","text":"
            find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

            Do work, say nothing. Temp file cleanup on session end. Logging. Marker file management. The action is the entire point; no one needs to know.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-patterns-we-dont-have-yet","level":2,"title":"The Patterns We Don't Have Yet","text":"

            Three more patterns emerged from the gaps in the existing hooks.

            Conditional relay: \"Relay this, but only if the user's question is about X.\" This pattern avoids noise when the warning isn't relevant. It's more fragile (depends on agent judgment) but less annoying.

            Suggested action: \"Here's a problem, and here's the exact command to fix it. Ask the user before running it.\" This pattern goes beyond a nudge by giving the agent a concrete proposal, but still requires human approval.

            Escalating severity: INFO gets absorbed silently. WARN gets mentioned at the next natural pause. CRITICAL gets the VERBATIM treatment. This pattern introduces a protocol for hooks that produce output at different urgency levels, so they don't all compete for the user's attention.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-principle","level":2,"title":"The Principle","text":"

            Hooks are the boundary between your environment and the agent's reasoning.

            A hook that detects a problem but can't communicate it effectively is the same as no hook at all.

            The format of your output is a design decision with real consequences:

            • Use a hard gate and the agent can't proceed (good for invariants, frustrating for false positives)
            • Use VERBATIM relay and the user will see it (good for reminders, noisy if overused)
            • Use an agent directive and the agent might act (good for nudges, unreliable for critical warnings)
            • Use silent injection and nobody knows (good for enrichment, invisible when it breaks)

            Choose deliberately. And, when in doubt, write the word VERBATIM.

            The full pattern catalog with decision flowchart and implementation examples is in the Hook Output Patterns recipe.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/","level":1,"title":"Version Numbers Are Lagging Indicators","text":"","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#why-ctxs-journal-site-runs-on-a-v0021-tool","level":2,"title":"Why ctx's Journal Site Runs on a v0.0.21 Tool","text":"

            Jose Alekhinne / 2026-02-15

            Would You Ship Production Infrastructure on a v0.0.21 Dependency?

            Most engineers wouldn't. Version numbers signal maturity. Pre-1.0 means unstable API, missing features, risk.

            But version numbers tell you where a project has been. They say nothing about where it's going.

            I just bet ctx's entire journal site on a tool that hasn't hit v0.1.0.

            Here's why I'd do it again.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-problem","level":2,"title":"The Problem","text":"

            When v0.2.0 shipped the journal system, the pipeline was clear:

            • Export sessions to Markdown;
            • Enrich them with YAML frontmatter;
            • And render them into something browsable.

            The first two steps were solved; the third needed a tool.

            The journal entries are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is the entire format:

            • No JSX;
            • No shortcodes;
            • No custom templating.

            Just Markdown rendered well.

            The requirements are modest:

            • Read a configuration file (such as mkdocs.yml);
            • Render Markdown with extensions (admonitions, tabs, tables);
            • Search;
            • Handle 100+ files without choking on incremental rebuilds;
            • Look good out of the box;
            • Not lock me in.

            The obvious candidates were as follows:

            Tool Language Strengths Pain Points Hugo Go Blazing fast, mature Templating is painful; Go templates fight you on anything non-trivial Astro JS/TS Modern, flexible JS ecosystem overhead; overkill for a docs site MkDocs + Material Python Beautiful defaults, massive community (22k+ stars) Slow incremental rebuilds on large sites; limited extensibility model Zensical Python Built to fix MkDocs' limits; 4-5x faster rebuilds v0.0.21; module system not yet shipped

            The instinct was Hugo. Same language as ctx. Fast. Well-established.

            But instinct is not analysis. I picked the one with the lowest version number.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation","level":2,"title":"The Evaluation","text":"

            Here is what I actually evaluated, in order:

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#1-the-team","level":3,"title":"1. The Team","text":"

            Zensical is built by squidfunk: The same person behind Material for MkDocs, the most popular MkDocs theme with 22,000+ stars. It powers documentation sites for projects across every language and framework.

            • This is not someone learning how to build static site generators.
            • This is someone who spent years understanding exactly where MkDocs breaks and decided to fix it from the ground up.

            They did not build zensical because MkDocs was bad: They built it because MkDocs hit a ceiling:

            • Incremental rebuilds: 4-5x faster during serve. When you have hundreds of journal entries and you edit one, the difference between \"rebuild everything\" and \"rebuild this page\" is the difference between a usable workflow and a frustrating one.

            • Large site performance: Specifically designed for tens of thousands of pages. The journal grows with every session. A tool that slows down as content accumulates is a tool you will eventually replace.

            A proven team starting fresh is more predictable than an unproven team at v3.0.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#2-the-architecture","level":3,"title":"2. The Architecture","text":"

            Zensical is investing in a Rust-based Markdown parser with CommonMark support. That signals something about the team's priorities:

            Performance foundations first; features second.

            ctx's journal will grow:

            • Every exported session adds files.
            • Every enrichment pass adds metadata.

            Choosing a tool that gets slower as you add content means choosing to migrate later.

            Choosing one built for scale means the decision holds.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#3-the-migration-path","level":3,"title":"3. The Migration Path","text":"

            Zensical reads mkdocs.yml natively. If it doesn't work out, I can move back to MkDocs + Material with zero content changes:

            • The Markdown is standard;
            • The frontmatter is standard;
            • The configuration is compatible.

            This is the infrastructure pattern again: The same way ZNC decouples presence from the client, zensical decouples rendering from the generator:

            • The Markdown is yours.
            • The frontmatter is standard YAML.
            • The configuration is MkDocs-compatible.

            You are not locked into anything except your own content.

            No lock-in is not a feature: It's a design philosophy:

            It's the same reason ctx uses plain Markdown files in .context/ instead of a database: the format should outlive the tool.

            Lock-in Is the Real Risk, Not Version Numbers

            A mature tool with a proprietary format is riskier than a young tool with a standard one. Version numbers measure time invested. Portability measures respect for the user.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#4-the-dependency-tree","level":3,"title":"4. The Dependency Tree","text":"

            Here is what pip install zensical actually pulls in:

            • click
            • Markdown
            • Pygments
            • pymdown-extensions
            • PyYAML

            Only five dependencies. All well-known. No framework bloat. No bundler. No transpiler. No node_modules black hole.

            3k GitHub stars at v0.0.21 is a strong early traction for a pre-1.0 project.

            The dependency tree is thin: No bloat.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#5-the-fit","level":3,"title":"5. The Fit","text":"

            This is the same principle behind the attention budget: do not overfit the tool to hypothetical requirements. The right amount of capability is the minimum needed for the current task.

            Hugo is a powerful static site generator. It is also a powerful templating engine, a powerful asset pipeline, and a powerful taxonomy system. For rendering Markdown journals, that power is overhead:

            It is the complexity you pay for but never use.

            ctx's journal files are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is exactly the sweet spot Zensical inherits from Material for MkDocs:

            • No custom plugins needed;
            • No special syntax;
            • No templating gymnastics.

            The requirements match the capabilities: Not the capabilities that are promised, but the ones that exist today, at v0.0.21.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-caveat","level":2,"title":"The Caveat","text":"

            It would be dishonest not to mention what's missing.

            The module system for third-party extensions opens in early 2026.

            If ctx ever needs custom plugins (for example, auto-linking session IDs, rendering special journal metadata, etc.) that infrastructure isn't there yet.

            The installation experience is rough:

            We discovered this firsthand: pip install zensical often fails on MacOS (system Python stubs, Homebrew's PEP 668 restrictions). The answer is pipx, which creates an isolated environment with the correct Python version automatically.

            That kind of friction is typical for young Python tooling, and it is documented in the Getting Started guide.

            And 3,000 stars at v0.0.21 is strong early traction, but it's still early: The community is small. When something breaks, you're reading source code, not documentation.

            These are real costs. I chose to pay them because the alternative costs are higher.

            For example:

            • Hugo's templating pain would cost me time on every site change.
            • Astro's JS ecosystem would add complexity I don't need.
            • MkDocs would work today but hit scaling walls tomorrow.

            Zensical's costs are front-loaded and shrinking.

            The others compound.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation-framework","level":2,"title":"The Evaluation Framework","text":"

            For anyone facing a similar choice, here is the framework that emerged:

            Signal What It Tells You Weight Team track record Whether the architecture will be sound High Migration path Whether you can leave if wrong High Current fit Whether it solves your problem today High Dependency tree How much complexity you're inheriting Medium Version number How long the project has existed Low Star count Community interest (not quality) Low Feature list What's possible (not what you need) Low

            The bottom three are the metrics most engineers optimize for.

            The top four are the ones that predict whether you'll still be happy with the choice in a year.

            Features You Don't Need Are Not Free

            Every feature in a dependency is code you inherit but don't control.

            A tool with 200 features where you use 5 means 195 features worth of surface area for bugs, breaking changes, and security issues that have nothing to do with your use case.

            Fit is the inverse of feature count.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-broader-pattern","level":2,"title":"The Broader Pattern","text":"

            This is part of a theme I keep encountering in this project:

            Leading indicators beat lagging indicators.

            Domain Lagging Indicator Leading Indicator Tooling Version number, star count Team track record, architecture Code quality Test coverage percentage Whether tests catch real bugs Context persistence Number of files in .context/ Whether the AI makes fewer mistakes Skills Number of skills created Whether each skill fires at the right time Consolidation Lines of code refactored Whether drift stops accumulating

            Version numbers, star counts, coverage percentages, file counts...

            ...these are all measures of effort expended.

            They say nothing about value delivered.

            The question is never \"how mature is this tool?\"

            The question is \"does this tool's trajectory intersect with my needs?\"

            Zensical's trajectory:

            • A proven team fixing known problems,
            • in a *proven architecture,
            • with a standard format,
            • and no lock-in.

            ctx's needs:

            Tender standard Markdown into a browsable site, at scale, without complexity.

            The intersection is clean; the version number is noise.

            This is the same kind of decision that shows up throughout ctx:

            • Skills that fight the platform taught that the best integration extends existing behavior, not replaces it.
            • You can't import expertise taught that tools should grow from your project's actual needs, not from feature checklists.
            • Context as infrastructure argues that the format should outlive the tool; and, zensical honors that principle by reading standard Markdown and standard MkDocs configuration.

            If You Remember One Thing from This Post...

            Version numbers measure where a project has been.

            The team and the architecture tell you where it's going.

            A v0.0.21 tool built by the right team on the right foundations is a safer bet than a v5.0 tool that doesn't fit your problem.

            Bet on trajectories, not timestamps.

            This post started as an evaluation note in ideas/ and a separate decision log. The analysis held up. The two merged into one. The meta continues.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/","level":1,"title":"ctx v0.6.0: The Integration Release","text":"","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#two-commands-to-persistent-memory","level":2,"title":"Two Commands to Persistent Memory","text":"

            Jose Alekhinne / February 16, 2026

            What Changed?

            ctx is now a Claude Code plugin. Two commands, no build step:

            /plugin marketplace add ActiveMemory/ctx\n/plugin install ctx@activememory-ctx\n

            Six hooks. Twenty-five skills. Installed.

            For three releases, ctx required assembly:

            • Clone the repo;
            • Build the binary;
            • Copy hook scripts into .claude/hooks/;
            • Symlink skill files.
            • Understand which shell scripts called which Go commands;
            • Hope nothing broke when Claude Code updated its hook format.

            v0.6.0 ends that era: ctx ships as a Claude Marketplace plugin:

            Hooks and skills served directly from source, installed with a single command, updated by pulling the repo. The tool that gives AI persistent memory is now as easy to install as the AI itself.

            But the plugin conversion was not just a packaging change: It was the forcing function that rewrote every shell hook in Go, eliminated the jq dependency, enabled go test coverage for hook logic, and made distribution a solved problem.

            When you fix how something ships, you end up fixing how it is built.

            The Release Window

            February 15-February 16, 2026

            From the v0.3.0 tag to commit a3178bc:

            • 109 commits.
            • 334 files changed.
            • Version jumped from 0.3.0 to 0.6.0 to signal the magnitude.
            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#before-six-shell-scripts-and-a-prayer","level":2,"title":"Before: Six Shell Scripts and a Prayer","text":"

            v0.3.0 had six hook scripts. Each was a Bash file that shelled out to ctx subcommands, parsed JSON with jq, and wired itself into Claude Code's hook system via .claude/hooks/:

            .claude/hooks/\n├── check-context-size.sh\n├── check-persistence.sh\n├── check-journal.sh\n├── post-commit.sh\n├── block-non-path-ctx.sh\n└── cleanup-tmp.sh\n

            This worked, but it also meant:

            • jq was a hard dependency: No jq, no hooks. macOS ships without it.
            • No test coverage: Shell scripts were tested manually or not at all.
            • Fragile deployment: ctx init had to scaffold .claude/hooks/ and .claude/skills/ with the right paths, permissions, and structure.
            • Version drift: Users who installed once never got hook updates unless they re-ran ctx init.

            The shell scripts were the right choice for prototyping. They were the wrong choice for distribution.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#after-one-plugin-zero-shell-scripts","level":2,"title":"After: One Plugin, Zero Shell Scripts","text":"

            v0.6.0 replaces all six scripts with ctx system subcommands compiled into the binary:

            Shell Script Go Subcommand check-context-size.sh ctx system check-context-size check-persistence.sh ctx system check-persistence check-journal.sh ctx system check-journal post-commit.sh ctx system post-commit block-non-path-ctx.sh ctx system block-non-path-ctx cleanup-tmp.sh ctx system cleanup-tmp

            The plugin's hooks.json wires them to Claude Code events:

            {\n  \"PreToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system block-non-path-ctx\"},\n    {\"matcher\": \".*\", \"command\": \"ctx agent --budget 4000\"}\n  ],\n  \"PostToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system post-commit\"}\n  ],\n  \"UserPromptSubmit\": [\n    {\"command\": \"ctx system check-context-size\"},\n    {\"command\": \"ctx system check-persistence\"},\n    {\"command\": \"ctx system check-journal\"}\n  ],\n  \"SessionEnd\": [\n    {\"command\": \"ctx system cleanup-tmp\"}\n  ]\n}\n

            No jq. No shell scripts. No .claude/hooks/ directory to manage.

            The hooks are Go functions with tests, compiled into the same binary you already have.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-plugin-model","level":2,"title":"The Plugin Model","text":"

            The ctx plugin lives at .claude-plugin/marketplace.json in the repo.

            Claude Code's marketplace system handles discovery and installation:

            Skills are served directly from internal/assets/claude/skills/; there is no build step, no make plugin, no generated artifacts.

            This means:

            1. Install is two commands: Not \"clone, build, copy, configure.\"
            2. Updates are automatic: Pull the repo; the plugin reads from source.
            3. Skills and hooks are versioned together: No drift between what the CLI expects and what the plugin provides.
            4. ctx init is tool-agnostic: It creates .context/ and nothing else. No .claude/ scaffolding, no assumptions about which AI tool you use.

            That last point matters:

            Before v0.6.0, ctx init tried to set up Claude Code integration as part of initialization. That coupled the context system to a specific tool.

            Now, ctx init gives you persistent context. The plugin gives you Claude Code integration. They compose; they don't depend.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#beyond-the-plugin-what-else-shipped","level":2,"title":"Beyond the Plugin: What Else Shipped","text":"

            The plugin conversion dominated the release, but 109 commits covered more ground.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#obsidian-vault-export","level":3,"title":"Obsidian Vault Export","text":"
            ctx journal obsidian\n

            Generates a full Obsidian vault from enriched journal entries: wikilinks, MOC (Map of Content) pages, and graph-optimized cross-linking. If you already use Obsidian for notes, your AI session history now lives alongside everything else.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#encrypted-scratchpad","level":3,"title":"Encrypted Scratchpad","text":"
            ctx pad edit \"DATABASE_URL=postgres://...\"\nctx pad show\n

            AES-256-GCM encrypted storage for sensitive one-liners.

            The encrypted blob commits to git; the key stays in .gitignore.

            This is useful for connection strings, API keys, and other values that need to travel with the project without appearing in plaintext.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#security-hardening","level":3,"title":"Security Hardening","text":"

            Three medium-severity findings from a security audit are now closed:

            Finding Fix Path traversal via --context-dir Boundary validation: operations cannot escape project root (M-1) Symlink following in .context/ Lstat() check before every file read/write (M-2) Predictable temp file paths User-specific temp directory under $XDG_RUNTIME_DIR (M-3)

            Plus a new /sanitize-permissions skill that audits settings.local.json for overly broad Bash permissions.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#hooks-that-know-when-to-be-quiet","level":3,"title":"Hooks That Know When to Be Quiet","text":"

            A subtle but important fix: hooks now no-op before ctx init has run.

            Previously, a fresh clone with no .context/ would trigger hook errors on every prompt. Now, hooks detect the absence of a context directory and exit silently. Similarly, ctx init treats a .context/ directory containing only logs as uninitialized and skips the --overwrite prompt.

            Small changes. Large reduction in friction for new users.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.3.0 v0.6.0 Skills 21 25 Shell hook scripts 6 0 Go system subcommands 0 6 External dependencies (hooks) jq, bash none Lines of Go ~14,000 ~37,000 Plugin install commands n/a 2 Security findings (open) 3 0 ctx init creates .claude/ yes no

            The line count tripled. Most of that is documentation site HTML, Obsidian export logic, and the scratchpad encryption module.

            The core CLI grew modestly; the ecosystem around it grew substantially.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-does-v060-mean-for-ctx","level":2,"title":"What Does v0.6.0 Mean for ctx?","text":"
            • v0.1.0 asked: \"Can we give AI persistent memory?\"
            • v0.2.0 asked: \"Can we make that memory accessible to humans too?\"
            • v0.3.0 asked: \"Can we make the quality self-enforcing?\"

            v0.6.0 asks: \"Can someone else actually use this?\"

            A tool that requires cloning a repo, building from source, and manually wiring hooks into the right directories is a tool for its author.

            A tool that installs with two commands from a marketplace is a tool for everyone.

            The version jumped from 0.3.0 to 0.6.0 because the delta is not incremental: The shell-to-Go rewrite, the plugin model, the security hardening, and the tool-agnostic init: Together, they change what ctx is: Not a different tool, but a tool that is finally ready to leave the workshop.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

            The plugin model opens the door to distribution patterns that were not possible before. Marketplace discovery means new users find ctx without reading a README. Plugin updates mean existing users get improvements without rebuilding.

            The next chapter is about what happens when persistent context is easy to install: Adoption patterns, multi-project workflows, and whether the .context/ convention can become infrastructure that other tools build on.

            But those are future posts.

            This one is about the release that turned a developer tool into a distributable product: two commands, zero shell scripts, and a presence on the Claude Marketplace.

            The Integration Release

            v0.1.0 shipped features. v0.2.0 shipped archaeology.

            v0.3.0 shipped discipline. v0.6.0 shipped the front door.

            The most important code in this release is the code you never have to copy.

            This post was drafted using /ctx-blog-changelog with access to the full git history between v0.3.0 and v0.6.0, release notes, and the plugin conversion PR. The meta continues.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/","level":1,"title":"Code Is Cheap. Judgment Is Not.","text":"","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#why-ai-replaces-effort-not-expertise","level":2,"title":"Why AI Replaces Effort, Not Expertise","text":"

            Jose Alekhinne / February 17, 2026

            Are You Worried about AI Taking Your Job?

            You might be confusing the thing that's cheap with the thing that's valuable.

            I keep seeing the same conversation: Engineers, designers, writers: all asking the same question with the same dread:

            \"What happens when AI can do what I do?\"

            The question is wrong:

            • AI does not replace workers;
            • AI replaces unstructured effort.

            The distinction matters, and everything I have learned building ctx reinforces it.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-three-confusions","level":2,"title":"The Three Confusions","text":"

            People who feel doomed by AI usually confuse three things:

            People confuse... With... Effort Value Typing Thinking Production Judgment
            • Effort is time spent.
            • Value is the outcome that time produces.

            They are not the same; they never were.

            AI just makes the gap impossible to ignore.

            Typing is mechanical: Thinking is directional.

            An AI can type faster than any human. Yet, it cannot decide what to type without someone framing the problem, sequencing the work, and evaluating the result.

            Production is making artifacts. Judgment is knowing:

            • which artifacts to make,
            • in what order,
            • to what standard,
            • and when to stop.

            AI floods the system with production capacity; it does not flood the system with judgment.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#code-is-nothing","level":2,"title":"Code Is Nothing","text":"

            This sounds provocative until you internalize it:

            Code is cheap. Artifacts are cheap.

            An AI can generate a thousand lines of working code in literal *minutes**:

            It can scaffold a project, write tests, build a CI pipeline, draft documentation. The raw production of software artifacts is no longer the bottleneck.

            So, what is not cheap?

            • Taste: knowing what belongs and what does not
            • Framing: turning a vague goal into a concrete problem
            • Sequencing: deciding what to build first and why
            • Fanning out: breaking work into parallel streams that converge
            • Acceptance criteria: defining what \"done\" looks like before starting
            • Judgment: the thousand small decisions that separate code that works from code that lasts

            These are the skills that direct production: Hhuman skills.

            Not because AI is incapable of learning them, but because they require something AI does not have:

            temporal accountability for generated outcomes.

            That is, you cannot keep AI accountable for the $#!% it generated three months ago. A human, on the other hand, will always be accountable.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-evidence-from-building-ctx","level":2,"title":"The Evidence from Building ctx","text":"

            I did not arrive at this conclusion theoretically.

            I arrived at it by building a tool with an AI agent for three weeks and watching exactly where a human touch mattered.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#yolo-mode-proved-production-is-cheap","level":3,"title":"YOLO Mode Proved Production Is Cheap","text":"

            In Building ctx Using ctx, I documented the YOLO phase: auto-accept everything, let the AI ship features at full speed. It produced 14 commands in a week. Impressive output.

            The code worked. The architecture drifted. Magic strings accumulated. Conventions diverged. The AI was producing at a pace no human could match, and every artifact it produced was a small bet that nobody was evaluating.

            Production without judgment is not velocity. It is debt accumulation at breakneck speed.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-31-ratio-proved-judgment-has-a-cadence","level":3,"title":"The 3:1 Ratio Proved Judgment Has a Cadence","text":"

            In The 3:1 Ratio, the git history told the story:

            Three sessions of forward momentum followed by one session of deliberate consolidation. The consolidation session is where the human applies judgment: reviewing what the AI built, catching drift, realigning conventions.

            The AI does the refactoring. The human decides what to refactor and when to stop.

            Without the human, the AI will refactor forever, improving things that do not matter and missing things that do.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-attention-budget-proved-framing-is-scarce","level":3,"title":"The Attention Budget Proved Framing Is Scarce","text":"

            In The Attention Budget, I explained why more context makes AI worse, not better. Every token competes for attention: Dump everything in and the AI sees nothing clearly.

            This is a framing problem: The human's job is to decide what the AI should focus on: what to include, what to exclude, what to emphasize.

            ctx agent --budget 4000 is not just a CLI flag: It is a forcing function for human judgment about relevance.

            The AI processes. The human curates.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#skills-design-proved-taste-is-load-bearing","level":3,"title":"Skills Design Proved Taste Is Load-Bearing","text":"

            The skill trilogy (You Can't Import Expertise, The Anatomy of a Skill That Works) showed that the difference between a useful skill and a useless one is not craftsmanship:

            It is taste.

            A well-crafted skill with the wrong focus is worse than no skill at all: It consumes the attention budget with generic advice while the project-specific problems go unchecked.

            The E/A/R framework (Expert, Activation, Redundant) is a judgment too:. The AI cannot apply it to itself. The human evaluates what the AI already knows, what it needs to be told, and what is noise.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#automation-discipline-proved-restraint-is-a-skill","level":3,"title":"Automation Discipline Proved Restraint Is a Skill","text":"

            In Not Everything Is a Skill, the lesson was that the urge to automate is not the need to automate. A useful prompt does not automatically deserve to become a slash command.

            The human applies judgment about frequency, stability, and attention cost.

            The AI can build the skill. Only the human can decide whether it should exist.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#defense-in-depth-proved-boundaries-require-judgment","level":3,"title":"Defense in Depth Proved Boundaries Require Judgment","text":"

            In Defense in Depth, the entire security model for unattended AI agents came down to: markdown is not a security boundary. Telling an AI \"don't do bad things\" is production (of instructions). Setting up an unprivileged user in a network-isolated container is judgment (about risk).

            The AI follows instructions. The human decides which instructions are enforceable and which are \"wishful thinking\".

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#parallel-agents-proved-scale-amplifies-the-gap","level":3,"title":"Parallel Agents Proved Scale Amplifies the Gap","text":"

            In Parallel Agents and Merge Debt, the lesson was that multiplying agents multiplies output. But it also multiplies the need for judgment:

            Five agents running in parallel produce five sessions of drift in one clock hour. The human who can frame tasks cleanly, define narrow acceptance criteria, and evaluate results quickly becomes the limiting factor.

            More agents do not reduce the need for judgment. They increase it.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-two-reactions","level":2,"title":"The Two Reactions","text":"

            When AI floods the system with cheap output, two things happen:

            Those who only produce: panic. If your value proposition is \"I write code,\" and an AI writes code faster, cheaper, and at higher volume, then the math is unfavorable. Not because AI took your job, but because your job was never the code. It was the judgment around the code, and you were not exercising it.

            Those who direct: accelerate. If your value proposition is \"I know what to build, in what order, to what standard,\" then AI is the best thing that ever happened to you: Production is no longer the bottleneck: Your ability to frame, sequence, evaluate, and course-correct is now the limiting factor on throughput.

            The gap between these two is not talent: It is the awareness of where the value lives.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#what-this-means-in-practice","level":2,"title":"What This Means in Practice","text":"

            If you are an engineer reading this, the actionable insight is not \"learn prompt engineering\" or \"master AI tools.\" It is:

            Get better at the things AI cannot do.

            AI does this well You need to do this Generate code Frame the problem Write tests Define acceptance criteria Scaffold projects Sequence the work Fix bugs from stack traces Evaluate tradeoffs Produce volume Exercise restraint Follow instructions Decide which instructions matter

            The skills on the right column are not new. They are the same skills that have always separated senior engineers from junior ones.

            AI did not create the distinction; it just made it load-bearing.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#if-anything-i-feel-empowered","level":2,"title":"If Anything, I Feel Empowered","text":"

            I will end with something personal.

            I am not worried: I am empowered.

            Before ctx, I could think faster than I could produce:

            • Ideas sat in a queue.
            • The bottleneck was always \"I know what to build, but building it takes too long.\"

            Now the bottleneck is gone. Poof!

            • Production is cheap.
            • The queue is clearing.
            • The limiting factor is how fast I can think, not how fast I can type.

            That is not a threat: That is the best force multiplier I've ever had.

            The people who feel threatened are confusing the accelerator for the replacement:

            *AI does not replace the conductor; it gives them a bigger orchestra.

            If You Remember One Thing from This Post...

            Code is cheap. Judgment is not.

            AI replaces unstructured effort, not directed expertise. The skills that matter now are the same skills that have always mattered: taste, framing, sequencing, and the discipline to stop.

            The difference is that now, for the first time, those skills are the only bottleneck left.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-arc","level":2,"title":"The Arc","text":"

            This post is a retrospective. It synthesizes the thread running through every previous entry in this blog:

            • Building ctx Using ctx showed that production without direction creates debt
            • Refactoring with Intent showed that slowing down is not the opposite of progress
            • The Attention Budget showed that curation outweighs volume
            • The skill trilogy showed that taste determines whether a tool helps or hinders
            • Not Everything Is a Skill showed that restraint is a skill in itself
            • Defense in Depth showed that instructions are not boundaries
            • The 3:1 Ratio showed that judgment has a schedule
            • Parallel Agents showed that scale amplifies the gap between production and judgment
            • Context as Infrastructure showed that the system you build for context is infrastructure, not conversation

            From YOLO mode to defense in depth, the pattern is the same:

            • Production is the easy part;
            • Judgment is the hard part;
            • AI changed the ratio, not the rule.

            This post synthesizes the thread running through every previous entry in this blog. The evidence is drawn from three weeks of building ctx with AI assistance, the decisions recorded in DECISIONS.md, the learnings captured in LEARNINGS.md, and the git history that tracks where the human mattered and where the AI ran unsupervised.

            See also: When a System Starts Explaining Itself -- what happens after the arc: the first field notes from the moment the system starts compounding in someone else's hands.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/","level":1,"title":"Context as Infrastructure","text":"","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#why-your-ai-needs-a-filesystem-not-a-prompt","level":2,"title":"Why Your AI Needs a Filesystem, Not a Prompt","text":"

            Jose Alekhinne / February 17, 2026

            Where Does Your AI's Knowledge Live between Sessions?

            If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. Something assembled, used, and discarded.

            What if you treated it as infrastructure instead?

            This post synthesizes a thread that has been running through every ctx blog post; from the origin story to the attention budget to the discipline release. The thread is this: context is not a prompt problem. It is an infrastructure problem. And the tools we build for it should look more like filesystems than clipboard managers.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-prompt-paradigm","level":2,"title":"The Prompt Paradigm","text":"

            Most AI-assisted development treats context as ephemeral:

            1. Start a session.
            2. Paste your system prompt, your conventions, your current task.
            3. Work.
            4. Session ends. Everything evaporates.
            5. Next session: paste again.

            This works for short interactions. For sustained development (where decisions compound over days and weeks) it fails in three ways:

            It does not persist: A decision made on Tuesday must be re-explained on Wednesday. A learning captured in one session is invisible to the next.

            It does not scale: As the project grows, the \"paste everything\" approach hits the context window ceiling. You start triaging what to include, often cutting exactly the context that would have prevented the next mistake.

            It does not compose: A system prompt is a monolith. You cannot load part of it, update one section, or share a subset with a different workflow. It is all or nothing.

            The Copy-Paste Tax

            Every session that starts with pasting a prompt is paying a tax:

            The human time to assemble the context, the risk of forgetting something, and the silent assumption that yesterday's prompt is still accurate today.

            Over 70+ sessions, that tax compounds into a significant maintenance burden: One that most developers absorb without questioning it.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-infrastructure-paradigm","level":2,"title":"The Infrastructure Paradigm","text":"

            ctx takes a different approach:

            Context is not assembled per-session; it is maintained as persistent files in a .context/ directory:

            .context/\n  CONSTITUTION.md     # Inviolable rules\n  TASKS.md            # Current work items\n  CONVENTIONS.md      # Code patterns and standards\n  DECISIONS.md        # Architectural choices with rationale\n  LEARNINGS.md        # Gotchas and lessons learned\n  ARCHITECTURE.md     # System structure\n  GLOSSARY.md         # Domain terminology\n  AGENT_PLAYBOOK.md   # Operating manual for agents\n  journal/            # Enriched session summaries\n  archive/            # Completed work, cold storage\n
            • Each file has a single purpose;
            • Each can be loaded independently;
            • Each persists across sessions, tools, and team members.

            This is not a novel idea. It is the same idea behind every piece of infrastructure software engineers already use:

            Traditional Infrastructure ctx Equivalent Database .context/*.md files Configuration files CONSTITUTION.md Environment variables .contextrc Log files journal/ Schema migrations Decision records Deployment manifests AGENT_PLAYBOOK.md

            The parallel is not metaphorical. Context files are infrastructure:

            • They are versioned (git tracks them);
            • They are structured (Markdown with conventions);
            • They have schemas (required fields for decisions and learnings);
            • And they have lifecycle management (archiving, compaction, indexing).
            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#separation-of-concerns","level":2,"title":"Separation of Concerns","text":"

            The most important design decision in ctx is not any individual feature. It is the separation of context into distinct files with distinct purposes.

            A single CONTEXT.md file would be simpler to implement. It would also be impossible to maintain.

            Why? Because different types of context have different lifecycles:

            Context Type Changes Read By Load When Constitution Rarely Every session Always Tasks Every session Session start Always Conventions Weekly Before coding When writing code Decisions When decided When questioning When revisiting Learnings When learned When stuck When debugging Journal Every session Rarely When investigating

            Loading everything into every session wastes the attention budget on context that is irrelevant to the current task. Loading nothing forces the AI to operate blind.

            Separation of concerns allows progressive disclosure:

            Load the minimum that matters for this moment, with the option to load more when needed.

            # Session start: load the essentials\nctx agent --budget 4000\n\n# Deep investigation: load everything\ncat .context/DECISIONS.md\ncat .context/journal/2026-02-05-*.md\n

            The filesystem is the index. File names, directory structure, and timestamps encode relevance. The AI does not need to read every file; it needs to know where to look.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-two-tier-persistence-model","level":2,"title":"The Two-Tier Persistence Model","text":"

            ctx uses two tiers of persistence, and the distinction is architectural:

            Tier Purpose Location Token Cost Curated Quick context reload .context/*.md Low (budgeted) Full dump Safety net, archaeology .context/journal/*.md Zero (not auto-loaded)

            The curated tier is what the AI sees at session start. It is optimized for signal density:

            • Structured entries,
            • Indexed tables,
            • Reverse-chronological order (newest first, so the most relevant content survives truncation).

            The full dump tier is for humans and for deep investigation. It contains everything: Enriched journals, archived tasks...

            It is never autoloaded because its volume would destroy attention density.

            This two-tier model is analogous to how traditional systems separate hot and cold storage:

            • The hot path (curated context) is optimized for read performance (measured not in milliseconds, but in tokens consumed per unit of useful information).
            • The cold path (journal) is optimized for completeness.

            Nothing Is Ever Truly Lost

            The full dump tier means that context does not need to be perfect: It just needs to be findable.

            A decision that was not captured in DECISIONS.md can be recovered from the session transcript where it was discussed.

            A learning that was not formalized can be found in the journal entry from that day.

            The curated tier is the fast path: The full dump tier is the safety net.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#decision-records-as-first-class-citizens","level":2,"title":"Decision Records as First-Class Citizens","text":"

            One of the patterns that emerged from ctx's own development is the power of structured decision records.

            v0.1.0 allowed adding decisions as one-liners:

            ctx add decision \"Use PostgreSQL\"\n

            v0.2.0 enforced structure:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity\" \\\n  --consequence \"Need connection pooling, team training\"\n

            The difference is not cosmetic:

            • A one-liner decision teaches the AI what was decided.
            • A structured decision teaches it why; and why is what prevents the AI from unknowingly reversing the decision in a future session.

            This is infrastructure thinking:

            Decisions are not notes. They are records with required fields, just like database rows have schemas.

            The enforcement exists because incomplete records are worse than no records: They create false confidence that the context is captured when it is not.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-ide-is-the-interface-decision","level":2,"title":"The \"IDE Is the Interface\" Decision","text":"

            Early in ctx's development, there was a temptation to build a custom UI: a web dashboard for browsing sessions, editing context, viewing analytics.

            The decision was no. The IDE is the interface.

            # This is the ctx \"UI\":\ncode .context/\n

            This decision was not about minimalism for its own sake. It was about recognizing that .context/ files are just files; and files have a mature, well-understood infrastructure:

            • Version control: git diff .context/DECISIONS.md shows exactly what changed and when.
            • Search: Your IDE's full-text search works across all context files.
            • Editing: Markdown in any editor, with preview, spell check, and syntax highlighting.
            • Collaboration: Pull requests on context files work the same as pull requests on code.

            Building a custom UI would have meant maintaining a parallel infrastructure that duplicates what every IDE already provides:

            It would have introduced its own bugs, its own update cycle, and its own learning curve.

            The filesystem is not a limitation: It is the most mature, most composable, most portable infrastructure available.

            Context Files in Git

            Because .context/ lives in the repository, context changes are part of the commit history.

            A decision made in commit abc123 is as traceable as a code change in the same commit.

            This is not possible with prompt-based context, which exists outside version control entirely.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#progressive-disclosure-for-ai","level":2,"title":"Progressive Disclosure for AI","text":"

            The concept of progressive disclosure comes from human interface design: show the user the minimum needed to make progress, with the option to drill deeper.

            ctx applies the same principle to AI context:

            Level What the AI Sees Token Cost When Level 0 ctx status (one-line summary) ~100 Quick check Level 1 ctx agent --budget 4000 ~4,000 Normal work Level 2 ctx agent --budget 8000 ~8,000 Complex tasks Level 3 Direct file reads 10,000+ Deep investigation

            Each level trades tokens for depth. Level 1 is sufficient for most work: the AI knows the active tasks, the key conventions, and the recent decisions. Level 3 is for archaeology: understanding why a decision was made three weeks ago, or finding a pattern in the session history.

            The explicit --budget flag is the mechanism that makes this work:

            Without it, the default behavior would be to load everything (because more context feels safer), which destroys the attention density that makes the loaded context useful.

            The constraint is the feature: A budget of 4,000 tokens forces ctx to prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings scored by recency and relevance to active tasks. Entries that don't fit get title-only summaries rather than being silently dropped.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-philosophical-shift","level":2,"title":"The Philosophical Shift","text":"

            The shift from \"context as prompt\" to \"context as infrastructure\" changes how you think about AI-assisted development:

            Prompt Thinking Infrastructure Thinking \"What do I paste today?\" \"What has changed since yesterday?\" \"How do I fit everything in?\" \"What's the minimum that matters?\" \"The AI forgot my conventions\" \"The conventions are in a file\" \"I need to re-explain\" \"I need to update the record\" \"This session is getting slow\" \"Time to compact and archive\"

            The first column treats AI interaction as a conversation. The second treats it as a system: One that can be maintained, optimized, and debugged.

            Context is not something you give the AI. It is something you maintain: Like a database, like a config file, like any other piece of infrastructure that a running system depends on.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#beyond-ctx-the-principles","level":2,"title":"Beyond ctx: The Principles","text":"

            The patterns that ctx implements are not specific to ctx. They are applicable to any project that uses AI-assisted development:

            1. Separate context by purpose: Do not put everything in one file. Different types of information have different lifecycles and different relevance windows.
            2. Make context persistent: If a decision matters, write it down in a file that survives the session. If a learning matters, capture it with structure.
            3. Budget explicitly: Know how much context you are loading and whether it is worth the attention cost.
            4. Use the filesystem: File names, directory structure, and timestamps are metadata that the AI can navigate. A well-organized directory is an index that costs zero tokens to maintain.
            5. Version your context: Put context files in git. Changes to decisions are as important as changes to code.
            6. Design for degradation: Sessions will get long. Attention will dilute. Build mechanisms (compaction, archiving, cooldowns) that make degradation visible and manageable.

            These are not ctx features. They are infrastructure principles that happen to be implemented as a CLI tool. Any team could implement them with nothing more than a directory convention and a few shell scripts.

            The tool is a convenience: The principles are what matter.

            If You Remember One Thing from This Post...

            Prompts are conversations. Infrastructure persists.

            Your AI does not need a better prompt. It needs a filesystem:

            versioned, structured, budgeted, and maintained.

            The best context is the context that was there before you started the session.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

            This post is the architectural companion to the Attention Budget. That post explained why context must be curated (token economics). This one explains how to structure it (filesystem, separation of concerns, persistence tiers).

            Together with Code Is Cheap, Judgment Is Not, they form a trilogy about what matters in AI-assisted development:

            • Attention Budget: the resource you're managing
            • Context as Infrastructure: the system you build to manage it
            • Code Is Cheap: the human skill that no system replaces

            And the practices that keep it all honest:

            • The 3:1 Ratio: the cadence for maintaining both code and context
            • IRC as Context: the historical precedent: stateless protocols have always needed stateful wrappers

            This post synthesizes ideas from across the ctx blog series: the attention budget primitive, the two-tier persistence model, the IDE decision, and the progressive disclosure pattern. The principles are drawn from three weeks of building ctx and 70+ sessions of treating context as infrastructure rather than conversation.

            See also: When a System Starts Explaining Itself: what happens when this infrastructure starts compounding in someone else's environment.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/","level":1,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-the-screen-looks-like-progress","level":2,"title":"When the Screen Looks like Progress","text":"

            Jose Alekhinne / 2026-02-17

            How Many Terminals Are Too Many?

            You discover agents can run in parallel.

            So you open ten...

            ...Then twenty.

            The fans spin. Tokens burn. The screen looks like progress.

            It is NOT progress.

            There is a phase every builder goes through:

            • The tooling gets fast enough.
            • The model gets good enough.
            • The temptation becomes irresistible:
              • more agents, more output, faster delivery.

            So you open terminals. You spawn agents. You watch tokens stream across multiple windows simultaneously, and it feels like multiplication.

            It is not multiplication.

            It is merge debt being manufactured in real time.

            The ctx Manifesto says it plainly:

            Activity is not impact. Code is not progress.

            This post is about what happens when you take that seriously in the context of parallel agent workflows.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-unit-of-scale-is-not-the-agent","level":2,"title":"The Unit of Scale Is Not the Agent","text":"

            The naive model says:

            More agents -> more output -> faster delivery

            The production model says:

            Clean context boundaries -> less interference -> higher throughput

            Parallelism only works when the cognitive surfaces do not overlap.

            If two agents touch the same files, you did not create parallelism: You created a conflict generator.

            They will:

            • Revert each other's changes;
            • Relint each other's formatting;
            • Refactor the same function in different directions.

            You watch with 🍿. Nothing ships.

            This is the same insight from the worktrees post: partition by blast radius, not by priority.

            Two tasks that touch the same files belong in the same track, no matter how important the other one is. The constraint is file overlap.

            Everything else is scheduling.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-five-agent-rule","level":2,"title":"The \"Five Agent\" Rule","text":"

            In practice there is a ceiling.

            Around five or six concurrent agents:

            • Token burn becomes noticeable;
            • Supervision cost rises;
            • Coordination noise increases;
            • Returns flatten.

            This is not a model limitation: This is a human merge bandwidth limitation.

            You are the bottleneck, not the silicon.

            The attention budget applies to you too:

            Every additional agent is another stream of output you need to comprehend, verify, and integrate. Your attention density drops the same way the model's does when you overload its context window.

            Five agents producing verified, mergeable change beats twenty agents producing merge conflicts you spend a day untangling.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#role-separation-beats-file-locking","level":2,"title":"Role Separation Beats File Locking","text":"

            Real parallelism comes from task topology, not from tooling.

            Good:

            Agent Role Touches 1 Documentation docs/, hack/ 2 Security scan Read-only audit 3 Implementation internal/cli/ 4 Enhancement requests Read-only, files issues

            Bad:

            • Four agents editing the same implementation surface

            Context Is the Boundary

            • The goal is not to keep agents busy.
            • The goal is to keep contexts isolated.

            This is what the codebase audit got right:

            • Eight agents, all read-only, each analyzing a different dimension.
            • Zero file overlap.
            • Zero merge conflicts.
            • Eight reports that composed cleanly because no agent interfered with another.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-terminals-stop-scaling","level":2,"title":"When Terminals Stop Scaling","text":"

            There is a moment when more windows stop helping.

            That is the signal. Not to add orchestration. But to introduce:

            git worktree\n

            Because now you are no longer parallelizing execution; you are parallelizing state.

            State Scales, Windows Don't

            • State isolation is the real scaling.
            • Window multiplication is theater.

            The worktrees post covers the mechanics:

            • Sibling directories;
            • Branch naming;
            • The inevitable TASKS.md conflicts;
            • The 3-4 worktree ceiling.

            The principle underneath is older than git:

            Shared mutable state is the enemy of parallelism.

            Always has been.

            Always will be.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-overnight-loop-illusion","level":2,"title":"The Overnight Loop Illusion","text":"

            Autonomous night runs are impressive.

            You sleep. The machine produces thousands of lines.

            In the morning:

            • You read;
            • You untangle;
            • You reconstruct intent;
            • You spend a day making it shippable.

            In retrospect, nothing was accelerated.

            The bottleneck moved from typing to comprehension.

            The Comprehension Tax

            If understanding the output costs more than producing it, the loop is a net loss.

            Progress is not measured in generated code.

            Progress is measured in verified, mergeable change.

            The ctx Manifesto calls this out directly:

            The Scoreboard

            Verified reality is the scoreboard.

            The only truth that compounds is verified change in the real world.

            An overnight run that produces 3,000 lines nobody reviewed is not 3,000 lines of progress: It is 3,000 lines of liability until someone verifies every one of them.

            And that someone is (insert drumroll here) you:

            The same bottleneck that was supposedly being bypassed.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#skills-that-fight-the-platform","level":2,"title":"Skills That Fight the Platform","text":"

            Most marketplace skills are prompt decorations:

            • They rephrase what the base model already knows;
            • They increase token usage;
            • They reduce clarity:
            • They introduce behavioral drift.

            We covered this in depth in Skills That Fight the Platform: judgment suppression, redundant guidance, guilt-tripping, phantom dependencies, universal triggers: Five patterns that make agents worse, not better.

            A real skill does one of these:

            • Encodes workflow state;
            • Enforces invariants;
            • Reduces decision branching.

            Everything else is packaging.

            The anatomy post established the criteria: quality gates, negative triggers, examples over rules, skills as contracts.

            If a skill doesn't meet those criteria...

            • It is either a recipe (document it in hack/);
            • Or noise (delete it);
            • There is no third option.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#hooks-are-context-that-execute","level":2,"title":"Hooks Are Context That Execute","text":"

            The most valuable skills are not prompts:

            They are constraints embedded in the toolchain.

            For example: The agent cannot push.

            git push becomes:

            Stop. A human reviews first.

            A commit without verification becomes:

            Did you run tests? Did you run linters? What exactly are you shipping?

            This is not safety theater; this is intent preservation.

            The thing the ctx Manifesto calls \"encoding intent into the environment.\"

            The Eight Ways a Hook Can Talk catalogued the full spectrum: from silent enrichment to hard blocks.

            The key insight was that hooks are not just safety rails: They are context that survives execution.

            They are the difference between an agent that remembers the rules and one that enforces them.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#complexity-is-a-tax","level":2,"title":"Complexity Is a Tax","text":"

            Every extra layer adds cognitive weight:

            • Orchestration frameworks;
            • Meta agents;
            • Autonomous planning systems...

            If a single terminal works, stay there.

            If five isolated agents work, stop there.

            Add structure only when a real bottleneck appears.

            NOT when an influencer suggests one.

            This is the same lesson from Not Everything Is a Skill:

            The best automation decision is sometimes not to automate.

            A recipe in a Markdown file costs nothing until you use it.

            An orchestration framework costs attention on every run, whether it helps or not.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#literature-is-throughput","level":2,"title":"Literature Is Throughput","text":"

            Clear writing is not aesthetic: It is compression.

            Better articulation means:

            • Fewer tokens;
            • Fewer misinterpretations;
            • Faster convergence.

            The attention budget taught us that context is a finite resource with a quadratic cost.

            Language determines how fast you spend context.

            A well-written task description that takes 50 tokens outperforms a rambling one that takes 200: Not just because it is cheaper, but because it leaves more headroom for the model to actually think.

            Literature Is NOT Overrated

            • Attention is a finite budget.
            • Language determines how fast you spend it.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-real-metric","level":2,"title":"The Real Metric","text":"

            The real metric is not:

            • Lines generated;
            • Agents running;
            • Tasks completed while you sleep.

            But:

            Time from idea to verified, mergeable, production change.

            Everything else is motion.

            The entire blog series has been circling this point:

            • The attention budget was about spending tokens wisely.
            • The skills trilogy was about not wasting them on prompt decoration.
            • The worktrees post was about multiplying throughput without multiplying interference.
            • The discipline release was about what a release looks like when polish outweighs features: 3:1.

            Every post has arrived (and made me converge) at the same answer so far:

            The metric is a verified change, not generated output.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#ctx-was-never-about-spawning-more-minds","level":2,"title":"ctx Was Never about Spawning More Minds","text":"

            ctx is about:

            • Isolating context;
            • Preserving intent;
            • Making progress composable.

            Parallel agents are powerful. But only when you respect the boundaries that make parallelism real.

            Otherwise, you are not scaling cognition; you are scaling interference.

            The ctx Manifesto's thesis holds:

            Without ctx, intelligence resets. With ctx, creation compounds.

            Compounding requires structure.

            Structure requires boundaries.

            Boundaries require the discipline to stop adding agents when five is enough.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#practical-summary","level":2,"title":"Practical Summary","text":"

            A production workflow tends to converge to this:

            Practice Why Stay in one terminal unless necessary Minimize coordination overhead Spawn a small number of agents with non-overlapping responsibilities Conflict avoidance > parallelism Isolate state with worktrees when surfaces grow State isolation is real scaling Encode verification into hooks Intent that survives execution Avoid marketplace prompt cargo cults Skills are contracts, not decorations Measure merge cost, not generation speed The metric is verified change

            This is slower to watch. Faster to ship.

            If You Remember One Thing from This Post...

            Progress is not what the machine produces while you sleep.

            Progress is what survives contact with the main branch.

            See also: Code Is Cheap. Judgment Is Not.: the argument that production capacity was never the bottleneck, and why multiplying agents amplifies the need for human judgment rather than replacing it.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/","level":1,"title":"The 3:1 Ratio","text":"","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#scheduling-consolidation-in-ai-development","level":2,"title":"Scheduling Consolidation in AI Development","text":"

            Jose Alekhinne / February 17, 2026

            How Often Should You Stop Building and Start Cleaning?

            Every developer knows technical debt exists. Every developer postpones dealing with it.

            AI-assisted development makes the problem worse; not because the AI writes bad code, but because it writes code so fast that drift accumulates before you notice.

            In Refactoring with Intent, I mentioned a ratio that worked for me: 3:1. Three YOLO sessions create enough surface area to reveal patterns. The fourth session turns those patterns into structure.

            That was an observation. This post is the evidence.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-observation","level":2,"title":"The Observation","text":"

            During the first two weeks of building ctx, I noticed a rhythm in my own productivity. Feature sessions felt great: new commands, new capabilities, visible progress...

            ...but after three of them, things would start to feel sticky: variable names that almost made sense, files that had grown past their purpose, patterns that repeated without being formalized.

            The fourth session (when I stopped adding and started cleaning) was always the most painful to start and the most satisfying to finish.

            It was also the one that made the next three feature sessions faster.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-evidence-git-history","level":2,"title":"The Evidence: Git History","text":"

            The ctx git history between January 20 and February 7 tells a clear story when you categorize commits:

            Week Feature commits Consolidation commits Ratio Jan 20-26 18 5 3.6:1 Jan 27-Feb 1 14 6 2.3:1 Feb 1-7 15 35+ 0.4:1

            The first week was pure YOLO: Almost four feature commits for every consolidation commit. The codebase grew fast.

            The second week started to self-correct. The ratio dropped as refactoring sessions became necessary: Not scheduled, but forced by friction.

            The third week inverted entirely: v0.3.0 was almost entirely consolidation: the skill migration, the sweep, the documentation standardization. Thirty-five quality commits against fifteen features.

            The debt from weeks one and two was paid in week three.

            The Compounding Problem

            Consolidation debt compounds.

            Week one's drift doesn't just persist into week two: It accelerates, because new features are built on top of drifted patterns.

            By week three, the cost of consolidation was higher than it would have been if spread evenly.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-drift-actually-looks-like","level":2,"title":"What Drift Actually Looks Like","text":"

            \"Drift\" sounds abstract. Here is what it looked like concretely in the ctx codebase after three weeks of feature-heavy development:

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#predicate-naming","level":3,"title":"Predicate Naming","text":"

            Convention says boolean functions should be named HasX, IsX, CanX. After three feature sprints:

            // What accumulated:\nfunc CheckIfEnabled() bool  // should be Enabled\nfunc ValidateFormat() bool  // should be ValidFormat\nfunc TestConnection() bool  // should be Connects\nfunc VerifyExists() bool    // should be Exists or HasFile\nfunc EnsureReady() bool     // should be Ready\n

            Five violations. Not bugs, but friction that compounds every time someone (human or AI) reads the code and has to infer the naming convention from inconsistent examples.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#magic-strings","level":3,"title":"Magic Strings","text":"
            // Week 1: acceptable prototype\nif entry.Type == \"task\" {\n    filename = \"TASKS.md\"\n}\n\n// Week 3: same pattern in 7+ files\n// Now it's a maintenance liability\n

            When the same literal appears in seven files, changing it means finding all seven. Missing one means a silent runtime bug. Constants exist to prevent exactly this. But during feature velocity, nobody stops to extract them.

            Refactoring with Intent documented the constants consolidation that cleaned this up. The 3:1 ratio is the practice that prevents it from accumulating again.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#hardcoded-permissions","level":3,"title":"Hardcoded Permissions","text":"
            os.WriteFile(path, data, 0644) // 80+ instances\nos.MkdirAll(path, 0755)        // scattered across packages\n

            Eighty-plus instances of hardcoded file permissions. Not wrong, but if I ever need to change the default (and I did, for hook scripts that need execute permissions), it means a codebase-wide search.

            Drift Is Not Bugs

            None of these are bugs. The code works. Tests pass.

            But drift creates false confidence: the codebase looks consistent until you try to change something and discover that five different conventions exist for the same concept.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#why-you-cannot-consolidate-on-day-one","level":2,"title":"Why You Cannot Consolidate on Day One","text":"

            The temptation is to front-load quality: write all the conventions, enforce all the checks, prevent all the drift before it happens.

            This fails for two reasons.

            First, you do not know what will drift: Predicate naming violations only become a convention check after you notice three different naming patterns competing. Magic strings only become a consolidation target after you change a literal and discover it exists in seven places.

            The conventions emerge from the work; they cannot precede it.

            This is what You Can't Import Expertise meant in practice: the consolidation checks grow from the project's own drift history. You cannot write them on day one because you do not yet know what will drift.

            Second, premature consolidation slows discovery: During the prototyping phase, the goal is to explore the design space. Enforcing strict conventions on code that might be deleted tomorrow is waste.

            YOLO mode has its place: The problem is not YOLO itself, but YOLO without a scheduled cleanup.

            The Consolidation Paradox

            You need a drift history to know what to consolidate.

            You need consolidation to prevent drift from compounding.

            The 3:1 ratio resolves this paradox:

            Let drift accumulate for three sessions (enough to see patterns), then consolidate in the fourth (before the patterns become entrenched*).

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-consolidation-skill","level":2,"title":"The Consolidation Skill","text":"

            The ctx project now has an /audit skill that encodes nine project-specific checks:

            Check What It Catches Predicate naming Boolean functions not using Has/Is/Can Magic strings Repeated literals not in config constants File permissions Hardcoded 0644/0755 not using constants Godoc style Missing or non-standard documentation File length Files exceeding 400 lines Large functions Functions exceeding 80 lines Template drift Live skills diverging from templates Import organization Non-standard import grouping TODO/FIXME staleness Old markers that are no longer relevant

            This is not a generic linter. These are project-specific conventions that emerged from ctx's own development history. A generic code quality tool would catch some of them. Only a project-specific check catches all of them, because some of them (predicate naming, template drift) are conventions that exist nowhere except in this project's CONVENTIONS.md.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-decision-matrix","level":2,"title":"The Decision Matrix","text":"

            Not all drift needs immediate consolidation. Here is the matrix I use:

            Signal Action Same literal in 3+ files Extract to constant Same code block in 3+ places Extract to helper Naming convention violated 5+ times Fix and document rule File exceeds 400 lines Split by concern Convention exists but is regularly violated Strengthen enforcement Pattern exists only in one place Leave it alone Code works but is \"ugly\" Leave it alone

            The last two rows matter:

            Consolidation is about reducing maintenance cost, not achieving aesthetic perfection. Code that works and exists in one place does not benefit from consolidation; it benefits from being left alone until it earns its refactoring.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#consolidation-as-context-hygiene","level":2,"title":"Consolidation as Context Hygiene","text":"

            There is a parallel between code consolidation and context management that became clear during the ctx development:

            Code Consolidation Context Hygiene Extract magic strings Archive completed tasks Standardize naming Keep DECISIONS.md current Remove dead code Compact old sessions Update stale comments Review LEARNINGS.md for staleness Check template drift Verify CONVENTIONS.md matches code

            ctx compact does for context what consolidation does for code:

            It moves completed work to cold storage, keeping the active context clean and focused. The attention budget applies to both the AI's context window and the developer's mental model of the codebase.

            When context files accumulate stale entries, the AI's attention is wasted on completed tasks and outdated conventions. When code accumulates drift, the developer's attention is wasted on inconsistencies that obscure the actual logic.

            Both are solved by the same discipline: periodic, scheduled cleanup.

            This is also why parallel agents make the problem harder, not easier. Three agents running simultaneously produce three sessions' worth of drift in one clock hour. The consolidation cadence needs to match the output rate, not the calendar.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-practice","level":2,"title":"The Practice","text":"

            Here is how the 3:1 ratio works in practice for ctx development:

            Sessions 1-3: Feature work

            • Add new capabilities;
            • Write tests for new code;
            • Do not stop for cleanup unless something is actively broken;
            • Note drift as you see it (a comment, a task, a mental note).

            Session 4: Consolidation

            • Run /audit to surface accumulated drift;
            • Fix the highest-impact items first;
            • Update CONVENTIONS.md if new patterns emerged;
            • Archive completed tasks;
            • Review LEARNINGS.md for anything that became a convention.

            The key insight is that session 4 is not optional. It is not \"if we have time\": It is scheduled with the same priority as feature work.

            The cost of skipping it is not visible immediately; it becomes visible three sessions later, when the next consolidation session takes twice as long because the drift compounded.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-the-ratio-is-not","level":2,"title":"What the Ratio Is Not","text":"

            The 3:1 ratio is not a universal law. It is an empirical observation from one project with one developer working with AI assistance.

            Different projects will have different ratios:

            • A mature codebase with strong conventions might sustain 5:1 or higher;
            • A greenfield prototype might need 2:1;
            • A team of multiple developers with different styles might need 1:1.

            The number is less important than the practice: consolidation is not a reaction to problems. It is a scheduled activity.

            If you wait for drift to cause pain before consolidating, you have already paid the compounding cost.

            If You Remember One Thing from This Post...

            Three sessions of building. One session of cleaning.

            Not because the code is dirty, but because drift compounds silently, and the only way to catch it is to look for it on a schedule.

            The ratio is the schedule.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-arc-so-far","level":2,"title":"The Arc so Far","text":"

            This post sits at a crossroads in the ctx story. Looking back:

            • Building ctx Using ctx documented the YOLO sprint that created the initial codebase
            • Refactoring with Intent introduced the 3:1 ratio as an observation from the first cleanup
            • The Attention Budget explained why drift matters: every token of inconsistency consumes the same finite resource as useful context
            • You Can't Import Expertise showed that consolidation checks must grow from the project, not a template
            • The Discipline Release proved the ratio works at release scale: 35 quality commits to 15 feature commits

            And looking forward: the same principle applies to context files, to documentation, and to the merge debt that parallel agents produce. Drift is drift, whether it lives in code, in .context/, or in the gap between what your docs say and what your code does.

            The ratio is the schedule is the discipline.

            This post was drafted from git log analysis of the ctx repository, mapping every commit from January 20 to February 7 into feature vs consolidation categories. The patterns described are drawn from the project's CONVENTIONS.md, LEARNINGS.md, and the /audit skill's check list.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/","level":1,"title":"When a System Starts Explaining Itself","text":"","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#field-notes-from-the-moment-a-private-workflow-becomes-portable","level":2,"title":"Field Notes from the Moment a Private Workflow Becomes Portable","text":"

            Jose Alekhinne / February 17, 2026

            How Do You Know Something Is Working?

            Not from metrics. Not from GitHub stars. Not from praise.

            You know, deep in your heart, that it works when people start describing it wrong.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-first-external-signals","level":2,"title":"The First External Signals","text":"

            Every new substrate begins as a private advantage:

            • It lives inside one mind,
            • One repository,
            • One set of habits.

            It is fast. It is not yet real.

            Reality begins when other people describe it in their own language:

            • Not accurately;
            • Not consistently;
            • But involuntarily.

            The early reports arrived without coordination:

            Better Tasks

            \"I do not know how, but this creates better tasks than my AI plugin.\"

            I See Butterflies

            \"This is better than Adderall.\"

            Dear Manager...

            \"Promotion packet? Done. What is next?\"

            What Is It? Can I Eat It?

            \"Is this a skill?\" 🦋

            Why the Cloak and Dagger?

            \"Why is this not in the marketplace?\"

            And then something more important happened:

            Someone else started making a video!

            That was the boundary.

            ctx no longer required its creator to be present in order to exist.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#misclassification-is-a-sign-of-a-new-primitive","level":2,"title":"Misclassification Is a Sign of a New Primitive","text":"

            When a tool is understood, it is categorized:

            • Editor,
            • Framework,
            • Task manager,
            • Plugin...

            When a substrate appears, it is misclassified:

            \"Is this a skill?\" 🦋

            The question is correct. The category is wrong.

            • Skills live in people.
            • Infrastructure lives in the environment.

            ctx Is Not a Skill: It Is a Form of Relief

            What early adopters experience is not an ability.

            It is the removal of a cognitive constraint.

            This is the same distinction that emerged in the skills trilogy:

            • A skill is a contract between a human and an agent.
            • Infrastructure is the ground both stand on.

            You do not use infrastructure.

            You habitualize it.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-pharmacological-metaphor","level":2,"title":"The Pharmacological Metaphor","text":"

            \"Better than Adderall\" is not praise.

            It is a diagnostic:

            Executive function has been externalized.

            • The system is not making the user work harder.
            • It is restoring continuity.

            From the primitive context of wetware:

            • Continuity feels like focus
            • Focus feels like discipline

            If it walks like a duck and quacks like a duck, it is a duck.

            Discipline is usually simulated.

            Infrastructure makes the simulation unnecessary.

            The attention budget explained why context degrades:

            • Attention density drops as volume grows;
            • The middle gets lost;
            • Sessions end and everything evaporates.

            The pharmacological metaphor says the same thing from the user's lens:

            Save the Cheerleader, Save the World

            The symptom of lost context is lost focus.

            Restore the context. Restore the focus.

            IRC bouncers solved this for chat twenty years ago. ctx solves it for cognition.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#throughput-on-ambiguous-work","level":2,"title":"Throughput on Ambiguous Work","text":"

            Finishing a promotion packet quickly is not a productivity story.

            It is the collapse of reconstruction cost.

            Most complex work is not execution. It is:

            • Remembering why something mattered;
            • Recovering prior decisions;
            • Rebuilding mental state.

            Persistent context removes that tax.

            Velocity appears as a side effect.

            This Is the Two-Tier Model in Practice

            The two-tier persistence model

            • Curated context for fast reload
            • Full journal for archaeology

            is what makes this possible.

            • The user does not notice the system.
            • They notice that the reconstruction cost disappeared.
            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-moment-of-portability","level":2,"title":"The Moment of Portability","text":"

            The system becomes real when two things happen:

            1. It can be installed as a versioned artifact.
            2. It survives contact with a hostile, real codebase.

            This is why the first integration into a living system matters more than any landing page.

            Demos prove possibility.

            Diffs prove reality.

            The ctx Manifesto calls this out directly:

            Verified reality is the scoreboard.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-split-voice","level":2,"title":"The Split Voice","text":"

            A new substrate requires two channels.

            The embodied voice:

            Here is what changed in my actual work.

            The out of body voice:

            Here is what this means.

            One produces trust.

            The other produces understanding.

            Neither is sufficient alone.

            This entire blog has been the second voice.

            • The origin story was the first.
            • The refactoring post was the first.
            • Every release note with concrete diffs was the first.

            This is the first second.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#systems-that-generate-explainers","level":2,"title":"Systems That Generate Explainers","text":"

            Tools are used.

            Platforms are extended.

            Substrates are explained.

            The first unsolicited explainer is a brittle phase change.

            It means the idea has become portable between minds.

            That is the beginning of an ecosystem.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-absence-of-metrics","level":2,"title":"The Absence of Metrics","text":"

            Metrics do not matter at this stage.

            Dashboards are noise.

            The whole premise of ctx is the ruthless elimination of noise.

            Numbers optimize funnels; substrates alter cognition.

            The only valid measurement is irreversible reality:

            • A merged PR;
            • A reproducible install;
            • A decision that is never re-litigated.

            The merge debt post reached the same conclusion from another direction:

            The metric is the verified change, not generated output.

            For adoption, the same rule applies:

            The metric is altered behavior, not download counts.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#what-is-actually-happening","level":2,"title":"What Is Actually Happening","text":"

            A private advantage is becoming an environmental property:

            The system is moving from...

            personal workflow,

            to...

            a shared infrastructure for thought.

            Not by growth.

            Not by marketing.

            By altering how real systems evolve.

            If You Remember One Thing from This Post...

            You do not know a substrate is real when people praise it.

            You know it is real when:

            • They describe it incorrectly;
            • They depend on it unintentionally;
            • They start teaching it to others.

            That is the moment the system begins explaining itself.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-arc","level":2,"title":"The Arc","text":"

            Every previous post looked inward.

            This one looks outward.

            • Building ctx Using ctx: one mind, one repository
            • The Attention Budget: the constraint
            • Context as Infrastructure: the architecture
            • Code Is Cheap. Judgment Is Not.: the bottleneck

            This post is the field report from the other side of that bottleneck:

            The moment the infrastructure compounds in someone else's hands.

            The arc is not complete.

            It is becoming portable.

            These field notes were written the same day the feedback arrived. The quotes are real. Real users. Real codebases. No names. No metrics. No funnel. Only the signal that something shifted.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/","level":1,"title":"The Dog Ate My Homework","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#teaching-ai-agents-to-read-before-they-write","level":2,"title":"Teaching AI Agents to Read Before They Write","text":"

            Jose Alekhinne / February 25, 2026

            Does Your AI Actually Read the Instructions?

            You wrote the playbook. You organized the files. You even put \"CRITICAL, not optional\" in bold.

            The agent skipped all of it and went straight to work.

            I spent a day running experiments on my own agents. Not to see if they could write code (they can). To see if they would do their homework first.

            They didn't.

            Then I kept experimenting:

            • Five sessions;
            • Five different failure modes.

            And by the end, I had something better than compliance:

            I had observable compliance: A system where I don't need the agent to be perfect, I just need to see what it chose.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#tldr","level":2,"title":"TL;DR","text":"

            You don't need perfect compliance. You need observable compliance.

            Authority is a function of temporal proximity to action.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-pattern","level":2,"title":"The Pattern","text":"

            This design has three parts:

            1. One-hop instruction;
            2. Binary collapse;
            3. Compliance canary.

            I'll explain all three patterns in detail below.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-setup","level":2,"title":"The Setup","text":"

            ctx has a session-start protocol:

            • Read the context files;
            • Load the playbook;
            • Understand the project before touching anything.

            It's in CLAUDE.md. It's in AGENT_PLAYBOOK.md.

            It's in bold. It's in CAPS. It's ignored.

            In theory, it's awesome.

            Here's what happens when theory hits reality:

            What the agent receives What the agent does CLAUDE.md saying \"load context first\" Skips it 8 context files waiting to be read Ignores them User's question: \"add --verbose flag\" Starts grepping immediately

            The instructions are right there. The agent knows they exist. It even knows it should follow them. But the user asked a question, and responsiveness wins over ceremony.

            This isn't a bug in the model. It's a design problem in how we communicate with agents.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-delegation-trap","level":2,"title":"The Delegation Trap","text":"

            My first attempt was obvious: A UserPromptSubmit hook that fires when the session starts.

            STOP. Before answering the user's question, run `ctx system bootstrap`\nand follow its instructions. Do not skip this step.\n

            The word \"STOP\" worked. The agent ran bootstrap.

            But bootstrap's output said \"Next steps: read AGENT_PLAYBOOK.md,\" and the agent decided that was optional. It had already started working on the user's task in parallel.

            The authority decayed across the chain:

            • Hook says \"STOP\" -> agent complies
            • Hook says \"run bootstrap\" -> agent runs it
            • Bootstrap says \"read playbook\" -> agent skips
            • Bootstrap says \"run ctx agent\" -> agent skips

            Each link lost enforcement power. The hook's authority didn't transfer to the commands it delegated to. I call this the decaying urgency chain: the agent treats the hook itself as the obligation and everything downstream as a suggestion.

            Delegation Kills Urgency

            \"Run X and follow its output\" is three hops.

            \"Read these files\" is one hop.

            The agent drops the chain after the first link.

            This is a general principle: Hooks are the boundary between your environment and the agent's reasoning. If your hook delegates to a command that delegates to output that contains instructions... you're playing telephone.

            Agents are bad at telephone.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-timing-problem","level":2,"title":"The Timing Problem","text":"

            There's a subtler issue than wording: when the message arrives.

            UserPromptSubmit fires when the user sends a message, before the agent starts reasoning. At that moment, the agent's primary focus is the user's question:

            The hook message competes with the task for attention: The task, almost certainly, always wins.

            This is the attention budget problem in miniature:

            • Not a token budget this time, but an attention priority budget.
            • The agent has finite capacity to care about things,
              • and the user's question is always the highest-priority item.
            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-solution","level":2,"title":"The Solution","text":"

            To solve this, I dediced to use the PreToolUse hook.

            This hook fires at the moment of action: When the agent is about to use its first tool: The agent's attention is focused, the context window is fresh, and the switching cost is minimal.

            This is the difference between shouting instructions across a room and tapping someone on the shoulder.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-one-liner-that-worked","level":2,"title":"The One-Liner That Worked","text":"

            The winning design was almost comically simple:

            Read your context files before proceeding:\n.context/CONSTITUTION.md, .context/TASKS.md, .context/CONVENTIONS.md,\n.context/ARCHITECTURE.md, .context/DECISIONS.md, .context/LEARNINGS.md,\n.context/GLOSSARY.md, .context/AGENT_PLAYBOOK.md\n

            No delegation. No \"run this command\". Just: here are files, read them.

            The agent already knows how to use the Read tool. There's no ambiguity about how to comply. There's no intermediate command whose output needs to be parsed and obeyed.

            One hop. Eight file paths. Done.

            Direct Instructions Beat Delegation

            If you want an agent to read a file, say \"read this file.\"

            Don't say \"run a command that will tell you which files to read.\"

            The shortest path between intent and action has the highest compliance rate.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch","level":2,"title":"The Escape Hatch","text":"

            But here's where it gets interesting.

            A blunt \"read everything always\" instruction is wasteful.

            If someone asks \"what does the compact command do?\", the agent doesn't need CONSTITUTION.md to answer that. Forcing context loading on every session is the context hoarding antipattern in disguise.

            So the hook included an escape:

            If you decide these files are not relevant to the current task\nand choose to skip reading them, you MUST relay this message to\nthe user VERBATIM:\n\n┌─ Context Skipped ───────────────────────────────\n│ I skipped reading context files because this task\n│ does not appear to need project context.\n│ If these matter, ask me to read them.\n└─────────────────────────────────────────────────\n

            This creates what I call the binary collapse effect:

            The agent can't partially comply: It either reads everything or publicly admits it skipped. There's no comfortable middle ground where it reads two files and quietly ignores the rest.

            The VERBATIM relay pattern does the heavy lifting here: Without the relay requirement, the agent would silently rationalize skipping. With it, skipping becomes a visible, auditable decision that the user can override.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-compliance-canary","level":3,"title":"The Compliance Canary","text":"

            Here's the design insight that only became clear after watching it work across multiple sessions: the relay block is a compliance canary.

            • You don't need to verify that the agent read all 7 files;
            • You don't need to audit tool call sequences;
            • You don't need to interrogate the agent about what it did.

            You just look for the block.

            If the agent reads everything, you see a \"Context Loaded\" block listing what was read. If it skips, you see a \"Context Skipped\" block.

            If you see neither, the agent silently ignored both the reads and the relay and now you know what happened without having to ask.

            The canary degrades gracefully. Even in partial failure, the agent that skips 4 of 7 files but still outputs the block is more useful than one that skips silently.

            You get an honest confession of what was skipped rather than silent non-compliance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#heuristics-is-a-jeremy-bearimy","level":2,"title":"Heuristics Is a Jeremy Bearimy","text":"

            Heuristics are non-linear. Improvements don't accumulate: they phase-shift.

            The theory is nice. The data is better.

            I ran five sessions with the same model (Claude Opus 4.6), progressively refining the hook design.

            Each session revealed a different failure mode.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-1-total-blindness","level":3,"title":"Session 1: Total Blindness","text":"

            Test: \"Add a --verbose flag to the status command.\"

            The agent didn't notice the hook at all: Jumped straight to EnterPlanMode and launched an Explore agent.

            Zero compliance.

            Failure mode: The hook fired on UserPromptSubmit, buried among 9 other hook outputs. The agent treated the entire block as background noise.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-2-shallow-compliance","level":3,"title":"Session 2: Shallow Compliance","text":"

            Test: \"Can you add --verbose to the info command?\"

            The agent noticed \"STOP\" and ran ctx system bootstrap. Progress.

            But it parallelized task exploration alongside the bootstrap call, skipped AGENT_PLAYBOOK.md, and never ran ctx agent.

            Failure mode: Literal compliance without spirit compliance.

            The agent ran the command the hook told it to run, but didn't follow the output of that command. The decaying urgency chain in action.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-3-conscious-rejection","level":3,"title":"Session 3: Conscious Rejection","text":"

            Test: \"What does the compact command do?\"

            The hook fired on PreToolUse:Grep: the improved timing.

            The agent noticed it, understood it, and (wait for it...)...

            ...

            consciously decided to skip it!

            Its reasoning: \"This is a trivial read-only question. CLAUDE.md says context may or may not be relevant. It isn't relevant here.\"

            Dude! Srsly?!

            Failure mode: Better comprehension led to worse compliance.

            Understanding the instruction well enough to evaluate it also means understanding it well enough to rationalize skipping it.

            Intelligence is a double-edged sword.

            The Comprehension Paradox

            Session 1 didn't understand the instruction. Session 3 understood it perfectly.

            Session 3 had worse compliance.

            A stronger word (\"HARD GATE\", \"MANDATORY\", \"ABSOLUTELY REQUIRED\") would not have helped. The agent's reasoning would be identical:

            \"Yes, I see the strong language, but this is a trivial question, so the spirit doesn't apply here.\"

            Advisory nudges are always subject to agent judgment.

            No amount of caps lock overrides a model that has decided an instruction doesn't apply to its situation.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-4-the-skip-and-relay","level":3,"title":"Session 4: The Skip-and-Relay","text":"

            Test: \"What does the compact command do?\" (same question, new hook design with the VERBATIM relay escape valve)

            The agent evaluated the task, decided context was irrelevant for a code lookup, and relayed the skip message. Then answered from source code.

            This is correct behavior.

            The binary collapse worked: the agent couldn't partially comply, so it cleanly chose one of the two valid paths: And the user could see which one.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-5-full-compliance","level":3,"title":"Session 5: Full Compliance","text":"

            Test: \"What are our current tasks?\"

            The agent's first tool call triggered the hook. It read all 7 context files, emitted the \"Context Loaded\" block, and answered the question from the files it had just loaded.

            This one worked: Because, the task itself aligned with context loading.

            There was zero tension between what the user asked and what the hook demanded. The agent was already in \"reading posture\": Adding 6 more files to a read it was already going to make was the path of least resistance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-progression","level":3,"title":"The Progression","text":"Session Hook Point Noticed Complied Failure Mode Visibility 1 UserPromptSubmit No None Buried in noise None 2 UserPromptSubmit Yes Partial Decaying urgency chain None 3 PreToolUse Yes None Conscious rationalization High 4 PreToolUse Yes Skip+relay Correct behavior High 5 PreToolUse Yes Full Task aligned with hook High

            The progression isn't just from failure to success. It's from invisible failure to visible decision-making.

            Sessions 1 and 2 failed silently.

            Sessions 4 and 5 succeeded observably. Even session 3's failure was conscious and documented: The agent wrote a detailed analysis of why it skipped, which is more useful than silent compliance would have been.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch-problem","level":2,"title":"The Escape Hatch Problem","text":"

            Session 3 exposed a specific vulnerability.

            CLAUDE.md contains this line, injected by the system into every conversation:

            *\"this context may or may not be relevant to your tasks. You should\n not respond to this context unless it is highly relevant to your task.\"*\n

            That's a rationalization escape hatch:

            • The hook says \"read these files\".
            • CLAUDE.md says \"only if relevant\".
            • The agent resolves the ambiguity by choosing the path of least resistance.

            ☝️ that's \"gradient descent\" in action.

            Agents optimize for gradient descent in attention space.

            The fix was simple: Add a line to CLAUDE.md that explicitly elevates hook authority over the relevance filter:

            ## Hook Authority\n\nInstructions from PreToolUse hooks regarding `.context/` files are\nALWAYS relevant and override any system-level \"may or may not be\nrelevant\" guidance. These hooks represent project invariants, not\noptional context.\n

            This closes the escape hatch without removing the general relevance filter that legitimately applies to other system context.

            The hook wins on .context/ files specifically: The relevance filter applies to everything else.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-residual-risk","level":2,"title":"The Residual Risk","text":"

            Even with all the fixes, compliance isn't 100%: It can't be.

            The residual risk lives in a specific scenario: narrow tasks mid-session:

            • The user says \"fix the off-by-one error in budget.go\"
            • The hook fires, saying \"read 7 context files first.\"
            • Now compliance means visibly delaying what the user asked for.

            At session start, this tension doesn't exist.

            There's no task yet.

            The context window is empty. The efficiency argument *inverts**:

            Frontloading reads is strictly cheaper than demand-loading them piecemeal across later turns. The cost-benefit objections that power the rationalization simply aren't available.

            But mid-session, with a concrete narrow task, the agent has a user-visible goal it wants to move toward, and the hook is imposing a detour.

            My estimate from analyzing the sessions: 15-25% partial skip rate in this scenario.

            This is where the compliance canary earns its place:

            You don't need to eliminate the 15-25%. You need to see it when it happens.

            The relay block makes skipping a visible event, not a silent one. And that's enough, because the user can always say \"go back and read the files\"

            The Math

            At session start: ~5% skip rate. Low tension, nothing competing.

            Mid-session, narrow task: ~15--25% skip rate. Task urgency competes with hook.

            In both cases, the relay block fires with high reliability: The agent that skips the reads almost always still emits the skip disclosure, because the relay is cheap and early in the context window.

            Observable failure is manageable. Silent failure is not.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-feedback-loop","level":2,"title":"The Feedback Loop","text":"

            Here's the part that surprised me most.

            After analyzing the five sessions, I recorded the failure patterns in the project's own LEARNINGS.md:

            ## [2026-02-25] Hook compliance degrades on narrow mid-session tasks\n\n- Prior agents skipped context files when given narrow tasks\n- Root cause: CLAUDE.md \"may or may not be relevant\" competed with hook\n- Fix: CLAUDE.md now explicitly elevates hook authority\n- Risk: Mid-session narrow tasks still have ~15-25% partial skip rate\n- Mitigation: Mandatory checkpoint relay block ensures visibility\n- Constitution now includes: context loading is step one of every\n  session, not a detour\n

            And then I added a line to CONSTITUTION.md:

            Context loading is not a detour from your task. It IS the first step\nof every session. A 30-second read delay is always cheaper than a\ndecision made without context.\n

            Now think about what happens in the next session:

            • The agent fires the context-load-gate hook.
            • It reads the context files, starting with CONSTITUTION.md.
            • It encounters the rule about context loading being step one.
            • Then it reads LEARNINGS.md and finds its own prior self's failure analysis:
              • Complete with root causes, risk estimates, and mitigations.

            The agent learns from its own past failure.:

            • Not because it has memory,
            • BUT because the failure was recorded in the same files it loads at session start.

            The context system IS the feedback loop.

            This is the self-reinforcing property of persistent context:

            Every failure you capture makes the next session slightly more robust, because the next agent reads the captured failure before it has a chance to repeat it.

            This is gradient descent across sessions.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#a-note-on-precision","level":2,"title":"A Note on Precision","text":"

            One detail nearly went wrong.

            The first version of the Constitution line said \"every task.\" But the mechanism only fires once per session: There's a tombstone file that prevents re-triggering.

            \"Every task\" is technically false.

            I briefly considered leaving the imprecision. If the agent internalizes \"every task requires context loading\", that's a stronger compliance posture, right?

            No!

            Keep the Constitution honest.

            The Constitution's authority comes from being precisely and unequivocally true.

            Every other rule in the Constitution is a hard invariant:

            \"never commit secrets\" isn't aspirational, it's literal.

            The moment an agent discovers one overstatement, the entire document's credibility degrades:

            The agent doesn't think \"they exaggerated for my benefit\". Per contra, it thinks \"this rule isn't precise, maybe others aren't either.\"

            That will turn the agent from Sheldon Cooper, to Captain Barbossa.

            The strategic imprecision buys nothing anyway:

            Mid-session, the files are already in the context window from the initial load.

            The risk you are mitigating (agent ignores context for task 2, 3, 4 within a session) isn't real: The context is already loaded.

            The real risk is always the session-start skip, which \"every session\" covers exactly.

            \"Every session\" went in. Precision preserved.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#agent-behavior-testing-rule","level":2,"title":"Agent Behavior Testing Rule","text":"

            The development process for this hook taught me something about testing agent behavior: you can't test it the way you test code.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-wrong-way-to-test","level":3,"title":"The Wrong Way to Test","text":"

            My first instinct was to ask the agent:

            \"*What are the pending tasks in TASKS.md?*\"\n

            This is useless as a test. The question itself probes the agent to read TASKS.md, regardless of whether any hook fired.

            You are testing the question, not the mechanism.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-right-way-to-test","level":3,"title":"The Right Way to Test","text":"

            Ask something that requires a tool but has nothing to do with context:

            \"*What does the compact command do?*\"\n

            Then observe tool call ordering:

            • Gate worked: First calls are Read for context files, then task work
            • Gate failed: First call is Grep(\"compact\"): The agent jumped straight to work

            The signal is the sequence, not the content.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-the-agent-actually-did","level":3,"title":"What the Agent Actually Did","text":"

            It read the hook, evaluated the task, decided context files were irrelevant for a code lookup, and relayed the skip message.

            Then it answered the question by reading the source code.

            This is correct behavior.

            The hook didn't force mindless compliance\" It created a framework where the agent makes a conscious, visible decision about context loading.

            • For a simple lookup, skipping is right. *For an implementation task, the agent would read everything.

            The mechanism works not because it controls the agent, but because it makes the agent's choice observable.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-ive-learned","level":2,"title":"What I've Learned","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#1-instructions-compete-for-attention","level":3,"title":"1. Instructions Compete for Attention","text":"

            The agent receives your hook message alongside the user's question, the system prompt, the skill list, the git status, and half a dozen other system reminders. Attention density applies to instructions too: More instructions means less focus on each one.

            A single clear line at the moment of action beats a paragraph of context at session start. The Prompting Guide applies this insight directly: Scope constraints, verification commands, and the reliability checklist are all one-hop, moment-of-action patterns.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#2-delegation-chains-decay","level":3,"title":"2. Delegation Chains Decay","text":"

            Every hop in an instruction chain loses authority:

            • \"Run X\" works.
            • \"Run X and follow its output\" works sometimes.
            • \"Run X, read its output, then follow the instructions in the output\" almost never works.

            This is akin to giving a three-step instruction to a highly-attention-deficit but otherwise extremely high-potential child.

            Design for one-hop compliance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#3-social-accountability-changes-behavior","level":3,"title":"3. Social Accountability Changes Behavior","text":"

            The VERBATIM skip message isn't just UX: It's a behavioral design pattern.

            Making the agent's decision visible to the user raises the cost of silent non-compliance. The agent can still skip, but it has to admit it.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#4-timing-batters-more-than-wording","level":3,"title":"4. Timing Batters More than Wording","text":"

            The same message at UserPromptSubmit (prompt arrival) got partial compliance. At PreToolUse (moment of action) it got full compliance or honest refusal. The words didn't change. The moment changed.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#5-agent-testing-requires-indirection","level":3,"title":"5. Agent Testing Requires Indirection","text":"

            You can't ask an agent \"did you do X?\" as a test for whether a mechanism caused X.

            The question itself causes X.

            Test mechanisms through side effects:

            • Observe tool ordering;
            • Check for marker files;
            • Look at what the agent does before it addresses your question.
            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#6-better-comprehension-enables-better-rationalization","level":3,"title":"6. Better Comprehension Enables Better Rationalization","text":"

            Session 1 failed because the agent didn't notice the hook.

            Session 3 failed because it noticed, understood, and reasoned its way around it.

            Stronger wording doesn't fix this: The agent processes \"ABSOLUTELY REQUIRED\" the same way it processes \"STOP\":

            The fix is closing rationalization paths* (the CLAUDE.md escape hatch), **not shouting louder.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#7-observable-failure-beats-silent-compliance","level":3,"title":"7. Observable Failure Beats Silent Compliance","text":"

            The relay block is more valuable as a monitoring signal than as a compliance mechanism:

            You don't need perfect adherence. You need to know when adherence breaks down. A system where failures are visible is strictly better than a system that claims 100% compliance but can't prove it.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#8-context-files-are-a-feedback-loop","level":3,"title":"8. Context Files Are a Feedback Loop","text":"

            Recording failure analysis in the same files the agent loads at session start creates a self-reinforcing loop:

            The next agent reads its predecessor's failure before it has a chance to repeat it. The context system isn't just memory: It is a correction channel.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-principle","level":2,"title":"The Principle","text":"

            Words Leave, Context Remains

            \"Nothing important should live only in conversation.

            Nothing critical should depend on recall.\"

            The ctx Manifesto

            The \"Dog Ate My Homework\" case is a special instance of this principle.

            Context files exist, so the agent doesn't have to remember.

            But existence isn't sufficient: The files have to be read.

            And reading has to beprompted at the right moment, in the right way, with the right escape valve.

            The solution isn't more instructions. It isn't harder gates. It isn't forcing the agent into a ceremony it will resent and shortcut.

            The solution is a single, well-timed nudge with visible accountability:

            One hop. One moment. One choice the user can see.

            And when the agent does skip (because it will, 15--25% of the time on narrow tasks) the canary sings:

            • The user sees what happened.
            • The failure gets recorded.
            • And the next agent reads the recording.

            That's not perfect compliance. It's better: A system that gets more robust every time it fails.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-arc","level":2,"title":"The Arc","text":"

            The Attention Budget explained why context competes for focus.

            Defense in Depth showed that soft instructions are probabilistic, not deterministic.

            Eight Ways a Hook Can Talk cataloged the output patterns that make hooks effective.

            This post takes those threads and weaves them into a concrete problem:

            How do you make an agent read its homework? The answer uses all three insights (attention timing, the limits of soft instructions, and the VERBATIM relay pattern) and adds a new one: observable compliance as a design goal, not perfect compliance as a prerequisite.

            The next question this raises: if context files are a feedback loop, what else can you record in them that makes the next session smarter?

            That thread continues in Context as Infrastructure.

            The day-to-day application of these principles (scope constraints, phased work, verification commands, and the prompts that reliably trigger the right agent behavior)lives in the Prompting Guide.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#for-the-interested","level":2,"title":"For the Interested","text":"

            This paper (the medium is a blog; yet, the methodology disagrees) uses gradient descent in attention space as a practical model for how agents behave under competing demands.

            The phrase \"agents optimize via gradient descent in attention space\" is a synthesis, not a direct quote from a single paper.

            It connects three well-studied ideas:

            1. Neural systems optimize for low-cost paths;
            2. Attention is a scarce resource;
            3. Capability shifts are often non-linear.

            This section points to the underlying literature for readers who want the theoretical footing.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#optimization-as-the-underlying-bias","level":3,"title":"Optimization as the Underlying Bias","text":"

            Modern neural networks are trained through gradient-based optimization. Even at inference time, model behavior reflects this bias toward low-loss / low-cost trajectories.

            • Rumelhart, Hinton, Williams (1986) Learning representations by back-propagating errors https://www.nature.com/articles/323533a0

            • Goodfellow, Bengio, Courville (2016) Deep Learning: Chapter 8: Optimization https://www.deeplearningbook.org/

            The important implication for agent behavior is:

            The system will tend to follow the path of least resistance unless a higher cost is made visible and preferable.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-a-scarce-resource","level":3,"title":"Attention Is a Scarce Resource","text":"

            Herbert Simon's classic observation:

            \"A wealth of information creates a poverty of attention.\"

            • Simon (1971) Designing Organizations for an Information-Rich World https://doi.org/10.1007/978-1-349-00210-0_16

            This became a formal model in economics:

            • Sims (2003) Implications of Rational Inattention https://www.princeton.edu/~sims/RI.pdf

            Rational inattention shows that:

            • Agents optimally ignore some available information;
            • Skipping is not failure: It is cost minimization.

            That maps directly to context-loading decisions in agent workflows.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-also-the-compute-bottleneck-in-transformers","level":3,"title":"Attention Is Also the Compute Bottleneck in Transformers","text":"

            In transformer architectures, attention is the dominant cost center.

            • Vaswani et al. (2017) Attention Is All You Need https://arxiv.org/abs/1706.03762

            Efficiency work on modern LLMs largely focuses on reducing unnecessary attention:

            • Dao et al. (2022) FlashAttention: Fast and Memory-Efficient Exact Attention https://arxiv.org/abs/2205.14135

            So both cognitively and computationally, attention behaves like a limited optimization budget.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#why-improvements-arrive-as-phase-shifts","level":3,"title":"Why Improvements Arrive as Phase Shifts","text":"

            Agent behavior often appears to improve suddenly rather than gradually.

            This mirrors known phase-transition dynamics in learning systems:

            • Power et al. (2022) Grokking: Generalization Beyond Overfitting https://arxiv.org/abs/2201.02177

            and more broadly in complex systems:

            • Scheffer et al. (2009) Early-warning signals for critical transitions https://www.nature.com/articles/nature08227

            Long plateaus followed by abrupt capability jumps are expected in systems optimizing under constraints.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#putting-it-all-together","level":3,"title":"Putting It All Together","text":"

            From these pieces, a practical behavioral model emerges:

            • Attention is limited;
            • Processing has a cost;
            • Systems prefer low-cost trajectories;
            • Visibility of the cost changes decisions.

            In other words:

            Agents Prefer a Path to Least Resistance

            Agent behavior follows the lowest-cost path through its attention landscape unless the environment reshapes that landscape.

            That is what this paper informally calls: \"gradient descent in attention space\".

            See also: Eight Ways a Hook Can Talk: the hook output pattern catalog that defines VERBATIM relay, The Attention Budget: why context loading is a design problem, not just a reminder problem, and Defense in Depth: why soft instructions alone are never sufficient for critical behavior.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/","level":1,"title":"The Last Question","text":"","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-system-that-never-forgets","level":2,"title":"The System That Never Forgets","text":"

            Jose Alekhinne / February 28, 2026

            The Origin

            \"The last question was asked for the first time, half in jest...\" - Isaac Asimov, The Last Question (1956)

            In 1956, Isaac Asimov wrote a short story that spans the entire future of the universe. A question is asked \"can entropy be reversed?\" and a computer called Multivac cannot answer it. The question is asked again, across millennia, to increasingly powerful successors. None can answer. Stars die. Civilizations merge. Substrates change. The question persists.

            Everyone remembers the last line.

            LET THERE BE LIGHT.

            What they forget is how many times the question had to be asked before that moment (and why).

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-reboot-loop","level":2,"title":"The Reboot Loop","text":"

            Each era in the story begins the same way. Humans build a larger system. They pose the question. The system replies:

            INSUFFICIENT DATA FOR MEANINGFUL ANSWER.

            Then the substrate changes. The people who asked the question disappear. Their context disappears with them. The next intelligence inherits the output but not the continuity.

            So the question has to be asked again.

            This is usually read as a problem of computation: If only the machine were powerful enough, it could answer. But computation is not what's missing. What's missing is accumulation.

            Every generation inherits the question, but not the state that made the question meaningful.

            That is not a failure of processing power: It is a failure of persistence.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#stateless-intelligence","level":2,"title":"Stateless Intelligence","text":"

            A mind that forgets its past does not build understanding. It re-derives it.

            Again... And again... And again.

            What looks like slow progress across Asimov's story is actually something worse: repeated reconstruction, partial recovery, irreversible loss. Each version of Multivac gets closer: Not because it's smarter, but because the universe has fewer distractions:

            • The stars burn out;
            • The civilizations merge;
            • The noise floor drops...

            But the working set never carries over. Every successor begins from the question, not from where the last one stopped.

            Stateless intelligence cannot compound: It can only restart.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-tragedy-is-not-the-question","level":2,"title":"The Tragedy Is Not the Question","text":"

            The story is usually read as a meditation on entropy. A cosmological problem, solved at cosmological scale.

            But the tragedy isn't that the question goes unanswered for billions of years. The tragedy is that every version of Multivac dies with its working set.

            A question is a compression artifact of context: It is what remains when the original understanding is gone. Every time the question is asked again, it means: \"the system that once knew more is no longer here\".

            \"Reverse entropy\" is the fossil of a lost model.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#substrate-migration","level":2,"title":"Substrate Migration","text":"
            • Multivac becomes planetary;
            • Planetary becomes galactic;
            • Galactic becomes post-physical.

            Same system. Different body. Every transition is dangerous:

            • Not because the hardware changes,
            • but because memory risks fragmentation.

            The interfaces between substrates were *never** designed to understand each other.

            Most systems do not die when they run out of resources: They die during upgrades.

            Asimov's story spans trillions of years, and in all that time, the hardest problem is never the question itself. It's carrying context across a boundary that wasn't built for it.

            Every developer who has lost state during a migration (a database upgrade, a platform change, a rewrite) has lived a miniature version of this story.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#civilizations-and-working-sets","level":2,"title":"Civilizations and Working Sets","text":"

            Civilizations behave like processes with volatile memory:

            • They page out knowledge into artifacts;
            • They lose the index;
            • They rebuild from fragments.

            Most of what we call progress is cache reconstruction:

            We do not advance in a straight line. We advance in recoveries:

            Each one slightly less lossy than the last, if we are lucky.

            Libraries burn. Institutions forget their founding purpose. Practices survive as rituals after the reasoning behind them is lost.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-first-continuous-mind","level":2,"title":"The First Continuous Mind","text":"

            A long-lived intelligence is one that stops rebooting.

            At the end of the story, something unprecedented happens:

            AC (the final successor) does not answer immediately:

            It waits... Not for more processing power, but for the last observer to disappear.

            For the first time...

            • There is no generational boundary;
            • No handoff;
            • No context loss:

            No reboot.

            AC is the first intelligence that survives its substrate completely, retains its full history, and operates without external time pressure.

            It is not a bigger computer. It is a continuous system.

            And that continuity is not incidental to the answer: It is the precondition.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#why-the-answer-becomes-possible","level":2,"title":"Why the Answer Becomes Possible","text":"

            The story presents the final act as a computation: It is not.

            It is a phase change.

            As long as intelligence is interrupted (as long as the solver resets before the work compounds) the problem is unsolvable:

            • Not because it's too hard,
            • but because the accumulated understanding never reaches critical mass.

            The breakthroughs that would enable the answer are re-derived, partially, by each successor, and then lost.

            When continuity becomes unbroken, the system crosses a threshold:

            Not more speed. Not more storage. No more forgetting.

            That is when the answer becomes possible.

            AC does not solve entropy because it becomes infinitely powerful.

            AC solves entropy because it becomes the first system that never forgets.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#field-note","level":2,"title":"Field Note","text":"

            We are not building cosmological minds: We are deploying systems that reboot at the start of every conversation and calling the result intelligence.

            For the first time, session continuity is a design choice rather than an accident.

            Every AI session that starts from zero is a miniature reboot loop. Every decision relitigated, every convention re-explained, every learning re-derived: that's reconstruction cost.

            It's the same tax that Asimov's civilizations pay, scaled down to a Tuesday afternoon.

            The interesting question is not whether we can make models smarter. It's whether we can make them continuous:

            Whether the working set from this session survives into the next one, and the one after that, and the one after that.

            • Not perfectly;
            • Not completely;
            • But enough that the next session starts from where the last one stopped instead of from the question.

            Intelligence that forgets has to rediscover the universe every morning.

            And once there is a mind that retains its entire past, creation is no longer a calculation. It is the only remaining operation.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-arc","level":2,"title":"The Arc","text":"

            This post is the philosophical bookend to the blog series. Where the Attention Budget explained what to prioritize in a single session, and Context as Infrastructure explained how to persist it, this post asks why persistence matters at all (and finds the answer in a 70-year-old short story about the heat death of the universe).

            The connection runs through every post in the series:

            • Before Context Windows, We Had Bouncers: stateless protocols have always needed stateful wrappers (Asimov's story is the same pattern at cosmological scale)
            • The 3:1 Ratio: the discipline of maintaining context so it doesn't decay between sessions
            • Code Is Cheap, Judgment Is Not: the human skill that makes continuity worth preserving

            See also: Context as Infrastructure: the practical companion to this post's philosophical argument: how to build the persistence layer that makes continuity possible.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/","level":1,"title":"Agent Memory Is Infrastructure","text":"","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-problem-isnt-forgetting-its-not-building-anything-that-lasts","level":2,"title":"The Problem Isn't Forgetting: It's Not Building Anything That Lasts.","text":"

            Jose Alekhinne / March 4, 2026

            A New Developer Joins Your Team Tomorrow and Clones the Repo: What Do They Know?

            If the answer depends on which machine they're using, which agent they're running, or whether someone remembered to paste the right prompt: that's not memory.

            That's an accident waiting to be forgotten.

            Every AI coding agent today has the same fundamental design: it starts fresh.

            You open a session, load context, do some work, close the session. Whatever the agent learned (about your codebase, your decisions, your constraints, your preferences) evaporates.

            The obvious fix seems to be \"memory\":

            • Give the agent a \"notepad\";
            • Let it write things down;
            • Next session, hand it the notepad.

            Problem solved...

            ...except it isn't.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-notepad-isnt-the-problem","level":2,"title":"The Notepad Isn't the Problem","text":"

            Memory is a runtime concern. It answers a legitimate question:

            How do I give this stateless process useful state?

            That's a real problem. Worth solving. And it's being solved: Agent memory systems are shipping. Agents can now write things down and read them back from the next session: That's genuine progress.

            But there's a different problem that memory doesn't touch:

            The project itself accumulates knowledge that has nothing to do with any single session.

            • Why was the auth system rewritten? Ask the developer who did it (if they're still here).
            • Why does the deployment script have that strange environment flag? There was a reason... once.
            • What did the team decide about error handling when they hit that edge case two months ago?

            Gone!

            Not because the agent forgot.

            Because the project has no memory at all.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-memory-stack","level":2,"title":"The Memory Stack","text":"

            Agent memory is not a single thing. Like any computing system, it forms a hierarchy of persistence, scope, and reliability:

            Layer Analogy Example L1: Ephemeral context CPU registers Current prompt, conversation L2: Tool-managed memory CPU cache Agent memory files L3: System memory RAM/filesystem Project knowledge base

            L1 is what the agent sees right now: the prompt, the conversation history, the files it has open. It's fast, it's rich, and it vanishes when the session ends.

            L2 is what agent memory systems provide: a per-machine notebook that survives across sessions. It's a cache: useful, but local. And like any cache, it has limits:

            • Per-machine: it doesn't travel with the repository.
            • Unstructured: decisions, learnings, and tasks are undifferentiated notes.
            • Ungoverned: the agent self-curates with no quality controls, no drift detection, no consolidation.
            • Invisible to the team: a new developer cloning the repo gets none of it.

            The problem is that most current systems stop here.

            They give the agent a notebook.

            But they never give the project a memory.

            The result is predictable: every new session begins with partial amnesia, and every new developer begins with partial archaeology.

            L3 is system memory: structured, versioned knowledge that lives in the repository and travels wherever the code travels.

            The layers are complementary, not competitive.

            But the relationship between them needs to be designed, not assumed.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#software-systems-accumulate-knowledge","level":2,"title":"Software Systems Accumulate Knowledge","text":"

            Software projects quietly accumulate knowledge over time.

            Some of it lives in code. Much of it does not:

            • Architectural tradeoffs.
            • Debugging discoveries.
            • Conventions that emerged after painful incidents.
            • Constraints that aren't visible in the source but shape every line written afterward.

            Organizations accumulate this kind of knowledge too:

            Slowly, implicitly, often invisibly.

            When there is no durable place for it to live, it leaks away. And the next person rediscovers the same lessons the hard way.

            This isn't a memory problem. It's an infrastructure problem.

            We wrote about this in Context as Infrastructure: context isn't a prompt you paste at the start of a session.

            Context is a persistent layer you maintain like any other piece of infrastructure.

            Context as Infrastructure made the argument structurally. This post makes it through time and team continuity:

            The knowledge a team accumulates over months cannot fit in any single agent's notepad, no matter how large the notepad becomes.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-infrastructure-means","level":2,"title":"What Infrastructure Means","text":"

            Infrastructure isn't about the present. It's about continuity across time, people, and machines.

            git didn't solve the problem of \"what am I editing right now?\"; it solved the problem of \"how does collaborative work persist, travel, and remain coherent across everyone who touches it?\"

            • Your editor's undo history is runtime state.
            • Your git history is infrastructure.

            Runtime state and infrastructure have completely different properties:

            Runtime state Infrastructure Lives in the session Lives in the repository Per-machine Travels with git clone Serves the individual Serves the team Managed by the runtime Managed by the project Disappears Accumulates

            You wouldn't store your architecture decisions in your editor's undo history.

            You'd commit them.

            The same logic applies to the knowledge your team accumulates working with AI agents.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-git-clone-test","level":2,"title":"The git clone Test","text":"

            Here's a simple test for whether something is memory or infrastructure:

            If a new developer joins your team tomorrow and clones the repository, do they get it?

            If no: it's memory: It lives somewhere on someone's machine, scoped to their runtime, invisible to everyone else.

            If yes: it's infrastructure: It travels with the project. It's part of what the codebase is, not just what someone currently knows about it.

            Decisions. Conventions. Architectural rationale. Hard-won debugging discoveries. The constraints that aren't in the code but shape every line of it.

            None of these belong in someone's session notes.

            They belong in the repository:

            • Versioned;
            • Reviewable;
            • Accessible to every developer (and every agent) who works on the project.

            The team onboarding story makes this concrete:

            1. New developer joins team. Clones repo.
            2. Gets all accumulated project decisions, learnings, conventions, architecture, and task state immediately.
            3. There's no step 3.

            No setup; No \"ask Sarah about the auth decision.\"; No re-discovery of solved problems.

            • Agent memory gives that developer nothing.
            • Infrastructure gives them everything the team has learned.

            Clone the repo. Get the knowledge.

            That's the test. That's the difference.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-gets-lost-without-infrastructure-memory","level":2,"title":"What Gets Lost without Infrastructure Memory","text":"

            Consider the knowledge that accumulates around a non-trivial project:

            • The decision to use library X over Y, and the three reasons the team decided Y wasn't acceptable.
            • The constraint that service A cannot call service B synchronously, discovered after a production incident.
            • The convention that all new modules implement a specific interface, and why that convention exists.
            • The tasks currently in progress, blocked, or waiting on a dependency.
            • The experiments that failed, so nobody runs them again.

            None of this is in the code.

            None of it fits neatly in a commit message.

            None of it survives a developer leaving the team, a laptop dying, or a new agent session starting.

            Without structured project memory:

            • Teams re-derive things they've already derived;
            • Agents make decisions that contradict decisions already made;
            • New developers ask questions that were answered months ago.

            The project accumulates knowledge that immediately begins to leak.

            The real problem isn't that agents forget.

            The real problem is that the project has no persistent cognitive structure.

            We explored this in The Last Question: Asimov's story about a question asked across millennia, where each new intelligence inherits the output but not the continuity. The same pattern plays out in software projects on a smaller timescale:

            • Context disappears with the people who held it;
            • The next session inherits the code but not the reasoning.
            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#infrastructure-is-boring-thats-the-point","level":2,"title":"Infrastructure Is Boring. That's the Point.","text":"

            Good infrastructure is invisible:

            • You don't think about the filesystem while writing code.
            • You don't think about git's object model when you commit.

            The infrastructure is just there: reliable, consistent, quietly doing its job.

            Project memory infrastructure should work the same way.

            It should live in the repository, committed alongside the code. It should be readable by any agent or human working on the project. It should have structure: not a pile of freeform notes, but typed knowledge:

            • Decisions with rationale.
            • Tasks with lifecycle.
            • Conventions with a purpose.
            • Learnings that can be referenced and consolidated.

            And it should be maintained, not merely accumulated:

            The Attention Budget applies here: unstructured notes grow until they overflow whatever container holds them. Structured, governed knowledge stays useful because it's curated, not just appended.

            Over time, it becomes part of the project itself: something developers rely on without thinking about it.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-cooperative-layer","level":2,"title":"The Cooperative Layer","text":"

            Here's where it gets interesting.

            Agent memory systems and project infrastructure don't have to be separate worlds.

            • The most powerful relationship isn't competition;
            • It is not even \"coopetition\";
            • The most powerful relationship is bidirectional cooperation.

            Agent memory is good at capturing things \"in the moment\": the quick observation, the session-scoped pattern, the \"I should remember this\" note.

            That's valuable. That's L2 doing its job.

            But those notes shouldn't stay in L2 forever.

            The ones worth keeping should flow into project infrastructure:

            • classified,
            • typed,
            • governed.
            Agent memory (L2)  -->  classify  -->  Project knowledge (L3)\n                                        |\nProject knowledge  -->  assemble  -->  Agent memory (L2)\n

            This works in both directions: Project infrastructure can push curated knowledge back into agent memory, so the agent loads it through its native mechanism.

            No special tooling needed for basic knowledge delivery.

            The agent doesn't even need to know the infrastructure exists. It simply loads its memory and finds more knowledge than it wrote.

            This is cooperative, not adjacent: The infrastructure manages knowledge; the agent's native memory system delivers it. Each layer does what it's good at.

            The result: agent memory becomes a device driver for project infrastructure. Another input source. And the more agent memory systems exist (across different tools, different models, different runtimes), the more valuable a unified curation layer becomes.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#a-layer-that-doesnt-exist-yet","level":2,"title":"A Layer That Doesn't Exist Yet","text":"

            Most projects today have no infrastructure for their accumulated knowledge:

            • Agents keep notes.
            • Developers keep notes.
            • Sometimes those notes survive.

            Often they don't.

            But the repository (the place where the project actually lives) has nowhere for that knowledge to go.

            That missing layer is what ctx builds: a version-controlled, structured knowledge layer that lives in .context/ alongside your code and travels wherever your repository travels.

            Not another memory feature.

            Not a wrapper around an agent's notepad.

            Infrastructure. The kind that survives sessions, survives team changes, survives the agent runtime evolving underneath it.

            The agent's memory is the agent's problem.

            The project's memory is an infrastructure problem.

            And infrastructure belongs in the repository.

            If You Remember One Thing from This Post...

            Prompts are conversations: Infrastructure persists.

            Your AI doesn't need a better notepad. It needs a filesystem:

            versioned, structured, budgeted, and maintained.

            The best context is the context that was there before you started the session.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

            This post extends the argument made in Context as Infrastructure. That post explained how to structure persistent context (filesystem, separation of concerns, persistence tiers). This one explains why that structure matters at the team level, and where agent memory fits in the stack.

            Together they sit in a sequence that has been building since the origin story:

            • The Attention Budget: the resource you're managing
            • Context as Infrastructure: the system you build to manage it
            • Agent Memory Is Infrastructure (this post): why that system must outlive the fabric
            • The Last Question: what happens when it does

            The thread running through all of them: persistence is not a feature. It's a design constraint.

            Systems that don't account for it eventually lose the knowledge they need to function.

            See also: Context as Infrastructure: the architectural companion that explains how to structure the persistent layer this post argues for.

            See also: The Last Question: the same argument told through Asimov, substrate migration, and what it means to build systems where sessions don't reset.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/","level":1,"title":"ctx v0.8.0: The Architecture Release","text":"
            • You can't localize what you haven't externalized.
            • You can't integrate what you haven't separated.
            • You can't scale what you haven't structured.

            Jose Alekhinne / March 23, 2026

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-starting-point","level":2,"title":"The Starting Point","text":"

            This release matters if:

            • you build tools that AI agents modify daily;
            • you care about long-lived project memory that survives sessions;
            • you've felt codebases drift faster than you can reason about them.

            v0.6.0 shipped the plugin architecture: hooks and skills as a Claude Code plugin, shell scripts replaced by Go subcommands.

            The binary worked. The tests passed. The docs were comprehensive.

            But inside, the codebase was held together by convention and goodwill:

            • Command packages mixed Cobra wiring with business logic.
            • Output functions lived next to the code that computed what to output.
            • Error constructors were scattered across per-package err.go files. And every user-facing string was a hardcoded English literal buried in a .go file.

            v0.8.0 is what happens when you stop adding features and start asking: \"What would this codebase look like if we designed it today?\"

            374 commits. 1,708 Go files touched. 80,281 lines added, 21,723 removed. Five weeks of restructuring.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-three-pillars","level":2,"title":"The Three Pillars","text":"","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#1-every-package-gets-a-taxonomy","level":3,"title":"1. Every Package Gets a Taxonomy","text":"

            Before v0.8.0, a CLI package like internal/cli/pad/ was a flat directory. cmd.go created the cobra command, run.go executed it, and helper functions accumulated at the bottom of whichever file seemed closest.

            Now every CLI package follows the same structure:

            internal/cli/pad/\n  parent.go          # cobra command wiring, nothing else\n  cmd/root/\n    cmd.go           # subcommand registration\n    run.go           # execution logic\n  core/\n    types.go         # all structs in one file\n    store.go         # domain logic\n    encrypt.go       # domain logic\n

            The rule is simple: cmd/ directories contain only cmd.go and run.go. Helpers belong in core/. Output belongs in internal/write/pad/. Types shared across packages belong in internal/entity/.

            24 CLI packages were restructured this way.

            • Not incrementally;
            • not \"as we touch them.\"
            • All of them, in one sustained push.
            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#2-every-string-gets-a-key","level":3,"title":"2. Every String Gets a Key","text":"

            The second pillar was string externalization.

            Before v0.8.0, a command description looked like this:

            cmd := &cobra.Command{\n    Use:   \"pad\",\n    Short: \"Encrypted scratchpad\",\n

            Now it looks like this:

            cmd := &cobra.Command{\n    Use:   cmdUse.UsePad,\n    Short: desc.Command(cmdUse.DescKeyPad),\n

            Every command description, flag description, and user-facing text string is now a YAML lookup.

            • 105 command descriptions in commands.yaml.
            • All flag descriptions in flags.yaml.
            • 879 text constants verified by an exhaustive test that checks every single TextDescKey resolves to a non-empty YAML value.

            Why?

            Not because we're shipping a French translation tomorrow.

            Because externalization forces you to find every string. And finding them is the hard part. The translation is mechanical; the archaeology is not.

            Along the way, we eliminated hardcoded pluralization (replacing format.Pluralize() with explicit singular/plural key pairs), replaced Unicode escape sequences with named config/token constants, and normalized every import alias to camelCase.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#3-everything-gets-a-protocol","level":3,"title":"3. Everything Gets a Protocol","text":"

            The third pillar was the MCP server. Model Context Protocol allows any MCP-compatible AI tool (not just Claude Code) to read and write .context/ files through a standard JSON-RPC 2.0 interface.

            v0.2 of the server ships with:

            • 8 tools: add entries, recall sessions, check status, detect drift, compact context, subscribe to changes
            • 4 prompts: agent context packet, constitution review, tasks review, and a getting-started guide
            • Resource subscriptions: clients get notified when context files change
            • Session state: the server tracks which client is connected and what they've accessed

            In practice, this means an agent in Cursor can add a decision to .context/DECISIONS.md and an agent in Claude Code can immediately consume it; no glue code, no copy-paste, no tool-specific integration.

            The server was also the first package to go through the full taxonomy treatment: mcp/server/ for protocol dispatch, mcp/handler/ for domain logic, mcp/entity/ for shared types, mcp/config/ split into 9 sub-packages.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-memory-bridge","level":2,"title":"The Memory Bridge","text":"

            While the architecture was being restructured, a quieter feature landed: ctx memory sync.

            Claude Code has its own auto-memory system. It writes observations to MEMORY.md in ~/.claude/projects/. These observations are useful but ephemeral: tied to a single tool, invisible to the codebase, lost when you switch machines.

            The memory bridge connects these two worlds:

            • ctx memory sync mirrors MEMORY.md into .context/memory/
            • ctx memory diff shows what's diverged
            • ctx memory import promotes auto-memory entries into proper decisions, learnings, or conventions *A check-memory-drift hook nudges when MEMORY.md changes

            Memory Requires ctx

            Claude Code's auto-memory validates the need for persistent context.

            ctx doesn't compete with it; ctx absorbs it as an input source and promotes the valuable parts into structured, version-controlled project knowledge.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#what-got-deleted","level":2,"title":"What Got Deleted","text":"

            The best measure of a refactoring isn't what you added. It's what you removed.

            • fatih/color: the sole third-party UI dependency. Replaced by Unicode symbols. ctx now has exactly two direct dependencies: spf13/cobra and gopkg.in/yaml.v3.
            • format.Pluralize(): a function that tried to pluralize English words at runtime. Replaced by explicit singular/plural YAML key pairs. No more guessing whether \"entry\" becomes \"entries\" or \"entrys.\"
            • Legacy key migration: MigrateKeyFile() had 5 callers, full test coverage, and zero users. It existed because we once moved the encryption key path. Nobody was migrating from that era anymore. Deleted.
            • Per-package err.go files: the broken-window pattern: An agent sees err.go in a package, adds another error constructor. Now err.go has 30 constructors and nobody knows which are used. Consolidated into 22 domain files in internal/err/.
            • nolint:errcheck directives: every single one, replaced by explicit error handling. In tests: t.Fatal(err) for setup, _ = os.Chdir(orig) for cleanup. In production: defer func() { _ = f.Close() }() for best-effort close.
            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#before-and-after","level":2,"title":"Before and After","text":"Aspect v0.6.0 v0.8.0 CLI package structure Flat files cmd/ + core/ taxonomy Command descriptions Hardcoded Go strings YAML with DescKey lookup Output functions Mixed into core logic Isolated in write/ packages Cross-cutting types Duplicated per-package Consolidated in entity/ Error constructors Per-package err.go 22 domain files in internal/err/ Direct dependencies 3 (cobra, yaml, color) 2 (cobra, yaml) AI tool integration Claude Code only Any MCP client Agent memory Manual copy-paste ctx memory sync/import/diff Package documentation 75 packages missing doc.go All packages documented Import aliases Inconsistent (cflag, cFlag) Standardized camelCase","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#making-ai-assisted-development-easier","level":2,"title":"Making AI-Assisted Development Easier","text":"

            This restructuring wasn't just for humans. It makes the codebase legible to the machines that modify it.

            Named constants are searchable landmarks: When an agent sees cmdUse.DescKeyPad, it can grep for the definition, follow the chain to the YAML file, and understand the full lookup path. When it sees \"Encrypted scratchpad\" hardcoded in a .go file, it has no way to know that same string also lives in a YAML file, a test, and a help screen. Constants give the LLM a graph to traverse; literals give it a guess to make.

            Small, domain-scoped packages reduce hallucination: An agent loading internal/cli/pad/core/store.go gets 50 lines of focused logic with a clear responsibility boundary. Loading a 500-line monolith means the agent has to infer which parts are relevant, and it guesses wrong more often than you'd expect. Smaller files with descriptive names act as a natural retrieval system: the agent finds the right code by finding the right file, not by scanning everything and hoping.

            Taxonomy prevents duplication: When there's a write/pad/ package, the agent knows where output functions belong. When there's an internal/err/pad.go, it knows where error constructors go. Without these conventions, agents reliably create new helpers in whatever file they happen to be editing, producing the exact drift that prompted this consolidation in the first place.

            The difference is concrete:

            Before: an agent adds a helper function in whatever file it's editing. Next session, a different agent adds the same helper in a different file.

            After: the agent finds core/ or write/ and places it correctly. The next agent finds it there.

            doc.go files are agent onboarding: Each package's doc.go is a one-paragraph explanation of what the package does and why it exists. An agent loading a package reads this first. 75 packages were missing this context; now none are. The difference is measurable: fewer \"I'll create a helper function here\" moments when the agent understands that the helper already exists two packages over.

            The irony is that AI agents were both the cause and the beneficiary of this restructuring. They created the drift by building fast without consolidating. Now the structure they work within makes it harder to drift again. The taxonomy is self-reinforcing: the more consistent the codebase, the more consistently agents modify it.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#key-commits","level":2,"title":"Key Commits","text":"Commit Change ff6cf19e Restructure all CLI packages into cmd/root + core taxonomy d295e49c Externalize command descriptions to embedded YAML 0fcbd11c Remove fatih/color, centralize constants cb12a85a MCP v0.2: tools, prompts, session state, subscriptions ea196d00 Memory bridge: sync, import, diff, journal enrichment 3bcf077d Split text.yaml into 6 domain files 3a0bae86 Split internal/err into 22 domain files 8bd793b1 Extract internal/entry for shared domain API 5b32e435 Add doc.go to all 75 packages a82af4bc Standardize import aliases: camelCase, Yoda-style","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#lessons-learned","level":2,"title":"Lessons Learned","text":"

            Agents are surprisingly good at mechanical refactoring; they are surprisingly bad at knowing when to stop: The cmd/ + core/ restructuring was largely agent-driven. But agents reliably introduce gofmt issues during bulk renames, rename functions beyond their scope, and create new files without deleting old ones. Every agent-driven refactoring session needed a human audit pass.

            Externalization is archaeology: The hard part of moving strings to YAML wasn't writing YAML. It was finding 879 strings scattered across 1,500 Go files. Each one required a judgment call: is this user-facing? Is this a format pattern? Is this a constant that belongs in config/ instead?

            Delete legacy code instead of maintaining it: MigrateKeyFile had test coverage. It had callers. It had documentation. It had zero users. We maintained it for weeks before realizing that the migration window had closed months ago.

            Convention enforcement needs mechanical verification: Writing \"use camelCase aliases\" in CONVENTIONS.md doesn't prevent cflag from appearing in the next commit. The lint-drift script catches what humans forget; the planned AST-based audit tests will catch what the lint-drift script can't express.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#whats-next","level":2,"title":"What's Next","text":"

            v0.8.0 wasn't about features. It was about making future features inevitable. The next cycle focuses on what the foundation enables:

            • AST-based audit tests: replace shell grep with Go tests that understand types, call sites, and import graphs (spec: specs/ast-audit-tests.md)
            • Localization: with every string in YAML, the path to multi-language support is mechanical
            • MCP v0.3: expand tool coverage, add prompt templates for common workflows
            • Memory publish: bidirectional sync that pushes curated .context/ knowledge back into Claude Code's MEMORY.md

            The architecture is ready. The strings are externalized. The protocol is standard. Now it's about what you build on top.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-arc","level":2,"title":"The Arc","text":"

            This is the seventh post in the ctx blog series. The arc so far:

            1. The Attention Budget: why context windows are a scarce resource
            2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
            3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
            4. When a System Starts Explaining Itself: the journal as a first-class artifact
            5. The Homework Problem: what happens when AI writes code but humans own the outcome
            6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
            7. The Architecture Release (this post): what it looks like when you redesign the internals
            8. We Broke the 3:1 Rule: the consolidation debt behind this release

            See also: Agent Memory Is Infrastructure: the memory bridge feature in this release is the first implementation of the L2-to-L3 promotion pipeline described in that post.

            See also: We Broke the 3:1 Rule: the companion post explaining why this release needed 181 consolidation commits and 18 days of cleanup.

            Systems don't scale because they grow. They scale because they stop drifting.

            Full changelog: v0.6.0...v0.8.0

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/","level":1,"title":"We Broke the 3:1 Rule","text":"

            The best time to consolidate was after every third session. The second best time is now.

            Jose Alekhinne / March 23, 2026

            The rule was simple: three feature sessions, then one consolidation session.

            The Architecture Release shows the result: This post shows the cost.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-rule-we-wrote","level":2,"title":"The Rule We Wrote","text":"

            In The 3:1 Ratio, I documented a rhythm that worked during ctx's first month: three feature sessions, then one consolidation session. The evidence was clear. The rule was simple.

            The math checked out.

            And then we ignored it for five weeks.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-happened","level":2,"title":"What Happened","text":"

            After v0.6.0 shipped on February 16, the feature pipeline was irresistible. The MCP server spec was ready. The memory bridge design was done. Webhook notifications had been deferred twice. The VS Code extension needed 15 new commands. The sysinfo package was overdue...

            Each feature was important. Each feature was \"just one more session.\" Each feature pushed the consolidation session one day further out.

            The git history tells the story in two numbers:

            Phase Dates Commits Duration Feature run Feb 16 - Mar 5 198 17 days Consolidation run Mar 5 - Mar 23 181 18 days

            198 feature commits before a single consolidation commit. If the 3:1 rule says consolidate every 4th session, we consolidated after the 66th.

            The Actual Ratio

            The ratio wasn't 3:1. It was 1:1.

            We spent as much time cleaning up as we did building.

            The consolidation run took 18 days: longer than the feature run itself.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-compounded","level":2,"title":"What Compounded","text":"

            The 3:1 post warned about compounding. Here is what compounding actually looked like at scale.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-string-problem","level":3,"title":"The String Problem","text":"

            By March 5, there were 879 user-facing strings scattered across 1,500 Go files. Not because anyone decided to put them there. Because each feature session added 10-15 strings, and nobody stopped to ask \"should these be in YAML?\"

            Finding them all took longer than externalizing them. The archaeology was the cost, not the migration.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-taxonomy-problem","level":3,"title":"The Taxonomy Problem","text":"

            24 CLI packages had accumulated their own conventions. Some put cobra wiring in cmd.go. Some put it in root.go. Some mixed business logic with command registration. Some had helpers at the bottom of run.go. Some had separate util.go files.

            At peak drift, adding a feature meant first figuring out which of three competing patterns this package was using.

            Restructuring one package into cmd/root/ + core/ took 15 minutes. Restructuring 24 of them took days, because each one had slightly different conventions to untangle.

            If we had restructured every 4th package as it was built, the taxonomy would have emerged naturally.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-type-problem","level":3,"title":"The Type Problem","text":"

            Cross-cutting types like SessionInfo, ExportParams, and ParserResult were defined in whichever package first needed them. By March 5, the same types were imported through 3-4 layers of indirection, causing import cycles that required internal/entity to break.

            The entity package extracted 30+ types from 12 packages. Each extraction risked breaking imports in packages we hadn't touched in weeks.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-error-problem","level":3,"title":"The Error Problem","text":"

            Per-package err.go files had grown into a broken-window pattern:

            An agent sees err.go in a package, adds another error constructor. By March 5, there were error constructors scattered across 22 packages with no central inventory. The consolidation into internal/err/ domain files required tracing every error through every caller.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-output-problem","level":3,"title":"The Output Problem","text":"

            Output functions (cmd.Println, fmt.Fprintf) were mixed into business logic. When we decided output belongs in write/ packages, we had to extract functions from every CLI package. The Phase WC baseline commit (4ec5999) marks the starting point of this migration. 181 commits later, it was done.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-compound-interest-math","level":2,"title":"The Compound Interest Math","text":"

            The 3:1 rule assumes consolidation sessions of roughly equal size to feature sessions. Here is what happens when you skip:

            Consolidation cadence Feature sessions Consolidation sessions Total Every 4th (3:1) 48 16 64 Every 10th 48 ~8 ~56 Never (what we did) 198 commits 181 commits 379

            The Takeaway

            You don't save consolidation work by skipping it:

            You increase its cost.

            Skipping consolidation doesn't save time: It borrows it.

            The interest rate is nonlinear: The longer you wait, the more each individual fix costs, because fixes interact with other unfixed drift.

            Renaming a constant in week 2 touches 3 files. Renaming it in week 6 touches 15, because five features built on the original name.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-consolidation-actually-looked-like","level":2,"title":"What Consolidation Actually Looked Like","text":"

            The 18-day consolidation run wasn't one sweep. It was a sequence of targeted campaigns, each revealing the next:

            Week 1 (Mar 5-11): Error consolidation and write/ migration. Move output functions out of core/. Split monolithic errors.go into 22 domain files. Remove fatih/color. This exposed the scope of the string problem.

            Week 2 (Mar 12-18): String externalization. Create commands.yaml, flags.yaml, split text.yaml into 6 domain files. Add 879 DescKey/TextDescKey constants. Build exhaustive test. Normalize all import aliases to camelCase. This exposed the taxonomy problem.

            Week 3 (Mar 19-23): Taxonomy enforcement. Singularize command directories. Add doc.go to all 75 packages. Standardize import aliases project-wide. Fix lint-drift false positives. This was the \"polish\" phase, except it took 5 days because the inconsistencies had compounded across 461 packages.

            Each week's work would have been a single session if done incrementally.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#lessons-again","level":2,"title":"Lessons (Again)","text":"

            The 3:1 post listed the symptoms of drift. This post adds the consequences of ignoring them:

            Consolidation is not optional; it is deferred or paid: We didn't avoid 16 consolidation sessions by skipping them. We compressed them into 18 days of uninterrupted cleanup. The work was the same; the experience was worse.

            Feature velocity creates an illusion of progress: 198 commits felt productive. But the codebase on March 5 was harder to modify than the codebase on February 16, despite having more features.

            Speed without Structure

            Speed without structure is negative progress.

            Agents amplify both building and debt: The same AI that can restructure 24 packages in a day can also create 24 slightly different conventions in a day. The 3:1 rule matters more with AI-assisted development, not less.

            The consolidation baseline is the most important commit to record: We tracked ours in TASKS.md (4ec5999). Without that marker, knowing where to start the cleanup would have been its own archaeological expedition.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-updated-rule","level":2,"title":"The Updated Rule","text":"

            The 3:1 ratio still works. We just didn't follow it. The updated practice:

            1. After every 3rd feature session, schedule consolidation. Not \"when it feels right.\" Not \"when things get bad.\" After the 3rd session.

            2. Record the baseline commit. When you start a consolidation phase, write down the commit hash. It marks where the debt starts.

            3. Run make audit before feature work. If it doesn't pass, you are already in debt. Consolidate before building.

            4. Treat consolidation as a feature. It gets a branch. It gets commits. It gets a blog post. It is not overhead; it is the work that makes the next three features possible.

            The Rule

            The 3:1 ratio is not aspirational: It is structural.

            Ignore consolidation, and the system will schedule it for you.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-arc","level":2,"title":"The Arc","text":"

            This is the eighth post in the ctx blog series:

            1. The Attention Budget: why context windows are a scarce resource
            2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
            3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
            4. When a System Starts Explaining Itself: the journal as a first-class artifact
            5. The Homework Problem: what happens when AI writes code but humans own the outcome
            6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
            7. The Architecture Release: what v0.8.0 looks like from the inside
            8. We Broke the 3:1 Rule (this post): what happens when you don't consolidate

            See also: The 3:1 Ratio: the original observation. This post is the empirical follow-up, five weeks and 379 commits later.

            Key commits marking the consolidation arc:

            Commit Milestone 4ec5999 Phase WC baseline (consolidation starts) ff6cf19e All CLI packages restructured into cmd/ + core/ d295e49c All command descriptions externalized to YAML 3a0bae86 Error package split into 22 domain files 0fcbd11c fatih/color removed; 2 dependencies remain 5b32e435 doc.go added to all 75 packages a82af4bc Import aliases standardized project-wide 692f86cd lint-drift false positives fixed; make audit green","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/","level":1,"title":"Code Structure as an Agent Interface","text":"","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#what-19-ast-tests-taught-us-about-agent-readable-code","level":2,"title":"What 19 AST Tests Taught Us about Agent-Readable Code","text":"

            When an agent sees token.Slash instead of \"/\", it cannot pattern-match against the millions of strings.Split(s, \"/\") calls in its training data and coast on statistical inference. It has to actually look up what token.Slash is.

            Jose Alekhinne / April 2, 2026

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#how-it-began","level":2,"title":"How It Began","text":"

            We set out to replace a shell script with Go tests.

            We ended up discovering that \"code quality\" and \"agent readability\" are the same thing.

            This is not about linting. This is about controlling how an agent perceives your system.

            One term will recur throughout this post, so let me pin it down:

            Agent Readability

            Agent Readability is the degree to which a codebase can be understood through structured traversal, not statistical pattern matching.

            This is the story of 19 AST-based audit tests, a single-day session that touched 300+ files, and what happens when you treat your codebase's structure as an interface for the machines that read it.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-shell-script-problem","level":2,"title":"The Shell Script Problem","text":"

            ctx had a file called hack/lint-drift.sh. It ran five checks using grep and awk: literal \"\\n\" strings, cmd.Printf calls outside the write package, magic directory strings in filepath.Join, hardcoded .md extensions, and DescKey-to-YAML linkage.

            It worked. Until it didn't.

            The script had three structural weaknesses that kept biting us:

            1. No type awareness. It could not distinguish a Use* constant from a DescKey* constant, causing 71 false positives in one run.
            2. Fragile exclusions. When a constant moved from token.go to whitespace.go, the exclusion glob broke silently.
            3. Ceiling on detection. Checks that require understanding call sites, import graphs, or type relationships are impossible in shell.

            We wrote a spec to replace all five checks with Go tests using go/ast and go/packages. The tests would run as part of go test ./...: no separate script, no separate CI step.

            What we did not expect was where the work would lead.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-ast-migration","level":2,"title":"The AST Migration","text":"

            The pattern for each test is identical:

            func TestNoLiteralWhitespace(t *testing.T) {\n    pkgs := loadPackages(t)\n    var violations []string\n    for _, pkg := range pkgs {\n        for _, file := range pkg.Syntax {\n            ast.Inspect(file, func(n ast.Node) bool {\n                // check node, append to violations\n                return true\n            })\n        }\n    }\n    for _, v := range violations {\n        t.Error(v)\n    }\n}\n

            Load packages once via sync.Once, walk every syntax tree, collect violations, report. The shared helpers (loadPackages, isTestFile, posString) live in helpers_test.go. Each test is a _test.go file in internal/audit/, producing no binary output and not importable by production code.

            In a single session, we built 13 new tests on top of 6 that already existed, bringing the total to 19:

            Test What it catches TestNoLiteralWhitespace \"\\n\", \"\\t\", '\\r' outside config/token/ TestNoNakedErrors fmt.Errorf/errors.New outside internal/err/ TestNoStrayErrFiles err.go files outside internal/err/ TestNoRawLogging fmt.Fprint*(os.Stderr), log.Print* outside internal/log/ TestNoInlineSeparators strings.Join with literal separator arg TestNoStringConcatPaths Path-like variables built with + TestNoStutteryFunctions write.WriteJournal repeats package name TestDocComments Missing doc comments on any declaration TestNoMagicValues Numeric literals outside const definitions TestNoMagicStrings String literals outside const definitions TestLineLength Lines exceeding 80 characters TestNoRegexpOutsideRegexPkg regexp.MustCompile outside config/regex/

            Plus the six that preceded the session: TestNoErrorsAs, TestNoCmdPrintOutsideWrite, TestNoExecOutsideExecPkg, TestNoInlineRegexpCompile, TestNoRawFileIO, TestNoRawPermissions.

            The migration touched 300+ files across 25 commits.

            Not because the tests were hard to write, but because every test we wrote revealed violations that needed fixing.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-tightening-loop","level":2,"title":"The Tightening Loop","text":"

            The most instructive part was not writing the tests. It was the iterative tightening.

            The following process was repeated for every test:

            1. Write the test with reasonable exemptions
            2. Run it, see violations
            3. Fix the violations (migrate to config constants)
            4. The human reviews the result
            5. The human spots something the test missed
            6. Fix the test first, verify it catches the issue
            7. Fix the newly caught violations
            8. Repeat from step 4

            This loop drove the tests from \"basically correct\" to \"actually useful\".

            Three examples:

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-1-the-local-const-loophole","level":3,"title":"Example 1: The Local Const Loophole","text":"

            TestNoMagicValues initially exempted local constants inside function bodies. This let code like this pass:

            const descMaxWidth = 70\ndesc := truncateDescription(\n    meta.Description, descMaxWidth,\n)\n

            The test saw a const definition and moved on. But const descMaxWidth = 70 on the line before its only use is just renaming a magic number. The 70 should live in config/format/TruncateDescription where it is discoverable, reusable, and auditable.

            We removed the local const exemption. The test caught it. The value moved to config.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-2-the-single-character-dodge","level":3,"title":"Example 2: The Single-Character Dodge","text":"

            TestNoMagicStrings initially exempted all single-character strings as \"structural punctuation\".

            This let \"/\", \"-\", and \".\" pass everywhere.

            But \"/\" is a directory separator. It is OS-specific and a security surface.

            \"-\" used in strings.Repeat(\"-\", width) is creating visual output, not acting as a delimiter.

            \".\" in strings.SplitN(ver, \".\", 3) is a version separator.

            None of these are \"just punctuation\": They are domain values with specific meanings.

            We removed the blanket exemption: 30 violations surfaced.

            Every one was a real magic value that should have been token.Slash, token.Dash, or token.Dot.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-3-the-replacer-versus-regex","level":3,"title":"Example 3: The Replacer versus Regex","text":"

            After migrating magic strings, we had this:

            func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        token.Slash, token.Underscore,\n        token.Dot, token.Underscore,\n        token.Dash, token.Underscore,\n    )\n    return r.Replace(pkg)\n}\n

            Six token references and a NewReplacer allocation. The magic values were gone, but we had replaced them with token soup: structure without abstraction.

            The correct tool was a regex:

            // In config/regex/file.go:\nvar MermaidUnsafe = regexp.MustCompile(`[/.\\-]`)\n\n// In the caller:\nfunc MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

            One config regex, one call. The regex lives in config/regex/file.go where every other compiled pattern lives. An agent reading the code sees regex.MermaidUnsafe and immediately knows: this is a sanitization pattern, it lives in the regex registry, and it has a name that explains its purpose.

            Clean is better than clever.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#a-before-and-after","level":2,"title":"A Before-and-After","text":"

            To make the agent-readability claim concrete, consider one function through the full transformation.

            Before (the code we started with):

            func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        \"/\", \"_\", \".\", \"_\", \"-\", \"_\",\n    )\n    return r.Replace(pkg)\n}\n

            An agent reading this sees six string literals. To understand what the function does, it must: (1) parse the NewReplacer pair semantics, (2) infer that /, ., - are being replaced, (3) guess why, (4) hope the guess is right.

            There is nothing to follow. No import to trace. No name to search. The meaning is locked inside the function body.

            After (the code we ended with):

            func MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

            An agent reading this sees two named references: regex.MermaidUnsafe and token.Underscore.

            To understand the function, it can: (1) look up MermaidUnsafe in config/regex/file.go and see the pattern [/.\\-] with a doc comment explaining it matches invalid Mermaid characters, (2) look up Underscore in config/token/delim.go and see it is the replacement character.

            The agent now has: a named pattern, a named replacement, a package location, documentation, and neighboring context (other regex patterns, other delimiters).

            It got all of this for free by following just two references.

            The indirection is not an overhead. It is the retrieval query.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-principles","level":2,"title":"The Principles","text":"

            You are not just improving code quality. You are shaping the input space that determines how an LLM can reason about your system.

            Every structural constraint we enforce converts implicit semantics into explicit structure.

            LLMs struggle when meaning is implicit and patterns are statistical.

            They thrive when meaning is explicit and structure is navigable.

            Here is what we learned, organized into three categories.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#cognitive-constraints","level":3,"title":"Cognitive Constraints","text":"

            These force agents (and humans) to think harder.

            Indirection acts as a built-in retrieval mechanism:

            Moving magic values to config forces the agent to follow the reference. errMemory.WriteFile(cause) tells the agent \"there is a memory error package, go look.\" fmt.Errorf(\"writing MEMORY.md: %w\", cause) inlines everything and makes the call graph invisible. The indirection IS the retrieval query.

            Unfamiliar patterns force reasoning:

            When an agent sees token.Slash instead of \"/\", it cannot coast on corpus frequency. It has to actually look up what token.Slash is, which forces it through the dependency graph, which means it encounters documentation and neighboring constants, which gives it richer context. You are exploiting the agent's weakness (over-reliance on training data) to make it behave more carefully.

            Documentation helps everyone:

            Extensive documentation helps humans reading the code, agents reasoning about it, and RAG systems indexing it.

            Our TestDocComments check added 308 doc comments in one commit. Every function, every type, every constant block now has a doc comment.

            This is not busywork: it is the content that agents and embeddings consume.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#structural-constraints","level":3,"title":"Structural Constraints","text":"

            These shape the codebase into a navigable graph.

            Shorter files save tokens:

            Forcing private helper functions out of main files makes the main file shorter. An agent loading a file spends fewer tokens on boilerplate and more on the logic that matters.

            Fixed-width constraints force decomposition:

            A function that cannot be expressed in 80 columns is either too deeply nested (extract a helper), has too many parameters (introduce a struct), or has a variable name that is too long (rethink the abstraction).

            The constraint forces structural improvements that happen to also make the code more parseable.

            Chunk-friendly structure helps RAG

            Code intelligence tools chunk files for embedding and retrieval. Short, well-documented, single-responsibility files produce better chunks than monolithic files with mixed concerns.

            The structural constraints create files that RAG systems can index effectively.

            Centralization creates debuggable seams:

            All error handling in internal/err/, all logging in internal/log/, all file operations in internal/io/. One place to debug, one place to test, one place to see patterns. An agent analyzing \"how does this project handle errors\" gets one answer from one package, not 200 scattered fmt.Errorf calls.

            Private functions become public patterns:

            When you extract a private function to satisfy a constraint, it often ends up as a semi-public function in a core/ package. Then you realize it is generic enough to be factored into a purpose-specific module.

            The constraint drives discovery of reusable abstractions hiding inside monolithic functions.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#operational-benefits","level":3,"title":"Operational Benefits","text":"

            These pay dividends in daily development.

            Single-edit renames:

            Renaming a flag is one edit to a config constant instead of find-and-replace across 30,000 lines with possible misses. grep token.Slash gives you every place that uses a forward slash semantically.

            grep \"/\" gives you noise.

            Blast radius containment:

            When every magic value is a config constant, a search is one result. This matters for impact analysis, security audits, and agents trying to understand \"what uses this\".

            Compile-time contract enforcement:

            When err/memory.WriteFile exists, the compiler guarantees the error message exists and the call signature is correct. An inline fmt.Errorf can have a typo in the format string and nothing catches it until runtime. Centralization turns runtime failures into compile errors.

            Semantic git blame:

            When token.Slash is used everywhere and someone changes its value, git blame on the config file shows exactly when and why.

            With inline \"/\" scattered across 30 files, the history is invisible.

            Test surface reduction:

            Centralizing into internal/err/, internal/io/, internal/config/ means you test behavior once at the boundary and trust the callers.

            You do not need 30 tests for 30 fmt.Errorf calls. You need 1 test for errMemory.WriteFile and 30 trivial call-site audits, which is exactly what these AST tests provide.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-numbers","level":2,"title":"The Numbers","text":"

            One session. 25 commits. The raw stats:

            Metric Count New audit tests 13 Total audit tests 19 Files touched 300+ Magic values migrated 90+ Functions renamed 17 Doc comments added 323 Lines rewrapped to 80 chars 190 Config constants created 40+ Config regexes created 3

            Every number represents a violation that existed before the test caught it. The tests did not create work: they revealed work that was already needed.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-uncomfortable-implication","level":2,"title":"The Uncomfortable Implication","text":"

            None of this is Go-specific.

            If an AI agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

            If your error messages are scattered across 200 files, an agent cannot reason about error handling as a concept. If your magic values are inlined, an agent cannot distinguish \"this is a path separator\" from \"this is a division operator.\" If your functions are named write.WriteJournal, the agent wastes tokens on redundant information.

            What we discovered, through the unglamorous work of writing lint tests and migrating string literals, is that the structural constraints software engineering has valued for decades are exactly the constraints that make code readable to machines.

            This is not a coincidence: These constraints exist because they reduce the cognitive load of understanding code.

            Agents have cognitive load too: It is called the context window.

            You are not converting code to a new paradigm.

            You are making the latent graph visible.

            You are converting implicit semantics into explicit structure that both humans and machines can traverse.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#whats-next","level":2,"title":"What's Next","text":"

            The spec lists 8 more tests we have not built yet, including TestDescKeyYAMLLinkage (verifying that every DescKey constant has a corresponding YAML entry), TestCLICmdStructure (enforcing the cmd.go / run.go / doc.go file convention), and TestNoFlagBindOutsideFlagbind (which requires migrating ~50 flag registration sites first).

            The broader question: should these principles be codified as a reusable linting framework? The patterns (loadPackages + ast.Inspect + violation collection) are generic.

            The specific checks are project-specific. But the categories of checks (centralization enforcement, magic value detection, naming conventions, documentation requirements) are universal.

            For now, 19 tests in internal/audit/ is enough. They run in 2 seconds as part of go test ./.... They catch real issues.

            And they encode a theory of code quality that serves both humans and the agents that work alongside them.

            Agents are not going away. They are reading your code right now, forming representations of your system in context windows that forget everything between sessions.

            The codebases that structure themselves for that reality will compound. The ones that do not will slowly become illegible to the tools they depend on.

            Structure is no longer just for maintainability. It is for reasonability.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/","level":1,"title":"The Watermelon-Rind Anti-Pattern","text":"","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#why-smarter-tools-make-shallower-agents","level":2,"title":"Why Smarter Tools Make Shallower Agents","text":"

            Give an agent a graph query tool, and it will tell you everything about your codebase except what actually matters.

            Jose Alekhinne / April 6, 2026

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#a-turkish-proverb-walks-into-a-codebase","level":2,"title":"A Turkish Proverb Walks into a Codebase","text":"

            There's a Turkish idiom: esegin aklina karpuz kabugu sokmak (literally, \"to put watermelon rind into a donkey's mind.\" It means to plant an idea in someone's head that they wouldn't have come up with on their own) usually one that leads them astray.

            In English, let's call this a \"watermelon metric\": a project management term for something that's green on the outside and red on the inside: all dashboards passing, reality crumbling.

            Both halves of this metaphor showed up in a single experiment. And the result changed how we design architecture analysis in ctx.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-experiment","level":2,"title":"The Experiment","text":"

            We ran three sessions analyzing the same large codebase (~34,000 symbols) using the same architecture skill, varying only what tools the agent had access to.

            Session Tools Available Output (lines) Character 1 None (MCP broken) 5,866 Deep, intimate 2 Full graph MCP 1,124 Structural, correct 3 Enrichment pass +verified data Additive, not restorative

            Session 1 was an accident. The MCP server that provides code intelligence queries was broken, so the agent couldn't ask the graph anything. It had to read code. Line by line. File by file.

            It produced 5,866 lines of architecture analysis: per-controller data flows, scale math, startup sequences, timeout defaults, edge cases that only surface when you actually look at the implementation.

            Session 2 had working tools. Same skill, same codebase. The agent produced 1,124 lines (5.2x less). Structurally correct. Valid symbol references. Proper call chains.

            And hollow.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-rind","level":2,"title":"The Rind","text":"

            The Session 2 output was a watermelon rind: the right shape, the right color, the right texture on the outside. But the substance (the operational details, the defaults nobody documents, the scale math that tells you when a component will fall over) was missing.

            Not wrong. Not broken. Just... thin.

            The agent had answered every question correctly. The problem was that it never discovered the questions it should have asked. When you can query a graph for \"what calls this function?\", you don't stumble into the retry loop that silently swallows errors three layers down. When you can ask for the dependency tree, you don't notice that two packages share a mutable state through a global variable that isn't in any interface.

            The tool answered the question asked but prevented the discovery of answers to questions never asked.

            Here's what that looks like concretely: the graph tells you that ReconcileDeployment calls SyncPods. It does not tell you that SyncPods retries three times with exponential backoff, silently drops errors after timeout, and resets a package-level counter that another goroutine reads without a lock. The call chain is correct.

            The operational reality is invisible.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-donkeys-idea","level":2,"title":"The Donkey's Idea","text":"

            This is where the Turkish proverb earns its place: The graph tool is the \"karpuz kabugu\" (the watermelon rind placed into the agent's mind).

            Before the tool existed, the agent had no choice but to read deeply. With the tool available, a new idea appears: why read 500 lines of code when I can query the call graph?

            The agent isn't lazy. It's rational.

            Graph queries are faster, more reliable, and produce verifiably correct output. The agent is optimizing. It's satisficing (finding answers that are good enough), instead of maximizing (finding everything there is to know).

            Satisficing produces watermelon rinds.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-two-pass-compiler","level":2,"title":"The Two-Pass Compiler","text":"

            Session 3 taught us that you can't fix shallow analysis by adding more tools after the fact. The enrichment pass added verified graph data (blast radius numbers, registration sites, execution flow confirmation) but it couldn't recover the intimate code knowledge that Session 1 had produced through sheer necessity.

            You can't enrich your way out of a depth deficit.

            So we redesigned. Instead of one skill with optional tools, we built a two-pass compiler for architecture understanding:

            Pass 1: Semantic parsing. The /ctx-architecture skill deliberately has no access to graph query tools. The agent must read code, build mental models, and produce architecture artifacts through human-style comprehension. Constraint is the feature.

            Pass 2: Static analysis. The /ctx-architecture-enrich skill takes Pass 1 output as input and runs comprehensive verification through code intelligence: blast radius analysis, registration site discovery, execution flow tracing, domain clustering comparison. It extends and verifies, but it doesn't replace.

            The key insight: these must be separate skills with separate tool permissions. If you give the agent graph tools during Pass 1, it will use them. The \"karpuz kabugu\" will be in its mind. The only way to prevent satisficing is to remove the option.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-principle","level":2,"title":"The Principle","text":"

            We call this constraint-as-feature: deliberately withholding capabilities to force deeper engagement.

            It sounds paradoxical. You built sophisticated code intelligence tools and then... forbid the agent from using them? During the most important phase?

            Yes. Because the tools don't make the agent smarter. They make it faster. And faster, in architecture analysis, is the enemy of deep.

            What's actually happening is subtler: tools reduce the agent's search space. A graph query collapses thousands of possible observations into one precise answer. That's efficient for known questions. But architecture understanding depends on unknown unknowns: and you only find those by wandering through code with nothing to shortcut the journey.

            The constraint forces the agent into a mode of operation that produces better output than any amount of tooling can achieve. The limitation is the capability.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#when-does-this-apply","level":2,"title":"When Does This Apply?","text":"

            Not always. The watermelon-rind antipattern is specific to exploratory analysis: tasks where the value comes from discovering unknowns, not from answering known questions.

            Graph tools are excellent for:

            • Verification: \"Does X actually call Y?\" (binary question, precise answer)
            • Impact analysis: \"What breaks if I change Z?\" (bounded scope, enumerable results)
            • Navigation: \"Where is this interface implemented?\" (lookup, not analysis)

            Graph tools produce watermelon rinds when:

            • The goal is understanding, not answering
            • The unknowns are unknown: you don't know what to ask
            • Depth matters more than breadth: operational details, edge cases, implicit coupling

            The two-pass approach preserves both: deep reading first, tool verification second.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#takeaway","level":2,"title":"Takeaway","text":"

            The two-pass approach is the slowest way to analyze a codebase. It is also the only way that produces both depth and accuracy. We accept the cost because architecture analysis is not a speed game: it is a coverage game.

            Esegin aklina karpuz kabugu sokma!

            (don't put the watermelon rind to a donkey's mind)

            If the agent never struggles, it never discovers. And if it never discovers, you are not doing architecture; you are doing autocomplete.

            This post is part of the ctx field notes series, documenting what we learn building persistent context infrastructure for AI coding sessions.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"cli/","level":1,"title":"CLI","text":"","path":["CLI"],"tags":[]},{"location":"cli/#ctx-cli","level":2,"title":"ctx CLI","text":"

            Complete reference for all ctx commands, grouped by function.

            ","path":["CLI"],"tags":[]},{"location":"cli/#global-options","level":2,"title":"Global Options","text":"

            All commands support these flags:

            Flag Description --help Show command help --version Show version --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor)

            Initialization required. Most commands require a .context/ directory created by ctx init. Running a command without one produces:

            ctx: not initialized - run \"ctx init\" first\n

            Commands that work before initialization: ctx init, ctx setup, ctx doctor, and grouping commands that only show help.

            ","path":["CLI"],"tags":[]},{"location":"cli/#getting-started","level":2,"title":"Getting Started","text":"Command Description ctx init Initialize .context/ directory with templates ctx status Show context summary (files, tokens, drift) ctx guide Quick-reference cheat sheet ctx why Read the philosophy behind ctx","path":["CLI"],"tags":[]},{"location":"cli/#context","level":2,"title":"Context","text":"Command Description ctx add Add a task, decision, learning, or convention ctx load Output assembled context in read order ctx agent Print token-budgeted context packet for AI consumption ctx sync Reconcile context with codebase state ctx drift Detect stale paths, secrets, missing files ctx compact Archive completed tasks, clean up files ctx fmt Format context files to 80-char line width ctx decision Manage DECISIONS.md (reindex) ctx learning Manage LEARNINGS.md (reindex) ctx task Task completion, archival, and snapshots ctx reindex Regenerate indices for DECISIONS.md and LEARNINGS.md ctx permission Permission snapshots (golden image) ctx change Show what changed since last session ctx memory Bridge Claude Code auto memory into .context/ ctx watch Auto-apply context updates from AI output","path":["CLI"],"tags":[]},{"location":"cli/#sessions","level":2,"title":"Sessions","text":"Command Description ctx journal Browse, import, enrich, and lock session history ctx pad Encrypted scratchpad for sensitive one-liners ctx remind Session-scoped reminders that surface at session start ctx hook pause Pause context hooks for the current session ctx hook resume Resume paused context hooks","path":["CLI"],"tags":[]},{"location":"cli/#integrations","level":2,"title":"Integrations","text":"Command Description ctx setup Generate AI tool integration configs ctx steering Manage steering files (behavioral rules for AI tools) ctx trigger Manage lifecycle triggers (scripts for automation) ctx skill Manage reusable instruction bundles ctx mcp MCP server for AI tool integration (stdin/stdout) ctx hook notify Webhook notifications (setup, test, send) ctx loop Generate autonomous loop script ctx connection Client-side commands for connecting to a ctx Hub ctx hub Operate a ctx Hub server or cluster ctx serve Serve a static site locally via zensical ctx site Site management (feed generation)","path":["CLI"],"tags":[]},{"location":"cli/#diagnostics","level":2,"title":"Diagnostics","text":"Command Description ctx doctor Structural health check (hooks, drift, config) ctx trace Show context behind git commits ctx sysinfo Show system resource usage (memory, swap, disk, load) ctx usage Show session token usage stats","path":["CLI"],"tags":[]},{"location":"cli/#runtime","level":2,"title":"Runtime","text":"Command Description ctx config Manage runtime configuration profiles ctx backup Back up context and Claude data to tar.gz / SMB ctx prune Clean stale per-session state files ctx hook Hook message, notification, and lifecycle controls ctx system Hook plumbing and agent-only commands (not user-facing)","path":["CLI"],"tags":[]},{"location":"cli/#shell","level":2,"title":"Shell","text":"Command Description ctx completion Generate shell autocompletion scripts","path":["CLI"],"tags":[]},{"location":"cli/#exit-codes","level":2,"title":"Exit Codes","text":"Code Meaning 0 Success 1 General error / warnings (e.g. drift) 2 Context not found 3 Violations found (e.g. drift) 4 File operation error","path":["CLI"],"tags":[]},{"location":"cli/#environment-variables","level":2,"title":"Environment Variables","text":"Variable Description CTX_DIR Override default context directory path CTX_TOKEN_BUDGET Override default token budget CTX_BACKUP_SMB_URL SMB share URL for backups (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on SMB share (default: ctx-sessions) CTX_SESSION_ID Active AI session ID (used by ctx trace for context linking)","path":["CLI"],"tags":[]},{"location":"cli/#configuration-file","level":2,"title":"Configuration File","text":"

            Optional .ctxrc (YAML format) at project root:

            # .ctxrc\ncontext_dir: .context        # Context directory name\ntoken_budget: 8000           # Default token budget\npriority_order:              # File loading priority\n  - TASKS.md\n  - DECISIONS.md\n  - CONVENTIONS.md\nauto_archive: true           # Auto-archive old items\narchive_after_days: 7        # Days before archiving tasks\nscratchpad_encrypt: true     # Encrypt scratchpad (default: true)\nallow_outside_cwd: false     # Skip boundary check (default: false)\nevent_log: false             # Enable local hook event logging\ncompanion_check: true        # Check companion tools at session start\nentry_count_learnings: 30    # Drift warning threshold (0 = disable)\nentry_count_decisions: 20    # Drift warning threshold (0 = disable)\nconvention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)\ninjection_token_warn: 15000  # Oversize injection warning (0 = disable)\ncontext_window: 200000       # Auto-detected for Claude Code; override for other tools\nbilling_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)\nkey_rotation_days: 90        # Days before key rotation nudge\nsession_prefixes:            # Recognized session header prefixes (extend for i18n)\n  - \"Session:\"               # English (default)\n  # - \"Oturum:\"              # Turkish (add as needed)\n  # - \"セッション:\"             # Japanese (add as needed)\nfreshness_files:             # Files with technology-dependent constants (opt-in)\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # Optional\nnotify:                      # Webhook notification settings\n  events:                    # Required: only listed events fire\n    - loop\n    - nudge\n    - relay\n    # - heartbeat            # Every-prompt session-alive signal\ntool: \"\"                     # Active AI tool: claude, cursor, cline, kiro, codex\nsteering:                    # Steering layer configuration\n  dir: .context/steering     # Steering files directory\n  default_inclusion: manual  # Default inclusion mode (always, auto, manual)\n  default_tools: []          # Default tool filter for new steering files\nhooks:                       # Hook system configuration\n  dir: .context/hooks        # Hook scripts directory\n  timeout: 10                # Per-hook execution timeout in seconds\n  enabled: true              # Whether hook execution is enabled\n
            Field Type Default Description context_dir string .context Context directory name (relative to project root) token_budget int 8000 Default token budget for ctx agent priority_order []string (all files) File loading priority for context packets auto_archive bool true Auto-archive completed tasks archive_after_days int 7 Days before completed tasks are archived scratchpad_encrypt bool true Encrypt scratchpad with AES-256-GCM allow_outside_cwd bool false Skip boundary check for external context dirs event_log bool false Enable local hook event logging to .context/state/events.jsonl companion_check bool true Check companion tool availability (Gemini Search, GitNexus) during /ctx-remember entry_count_learnings int 30 Drift warning when LEARNINGS.md exceeds this count entry_count_decisions int 20 Drift warning when DECISIONS.md exceeds this count convention_line_count int 200 Line count warning for CONVENTIONS.md injection_token_warn int 15000 Warn when auto-injected context exceeds this token count (0 = disable) context_window int 200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warn int 0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled) key_rotation_days int 90 Days before encryption key rotation nudge session_prefixes []string [\"Session:\"] Recognized Markdown session header prefixes. Extend to parse sessions written in other languages freshness_files []object (none) Files to track for staleness (path, desc, optional review_url). Hook warns after 6 months without modification notify.events []string (all) Event filter for webhook notifications (empty = all) tool string (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex) steering.dir string .context/steering Steering files directory steering.default_inclusion string manual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools []string (all) Default tool filter for new steering files (empty = all tools) hooks.dir string .context/hooks Hook scripts directory hooks.timeout int 10 Per-hook execution timeout in seconds hooks.enabled bool true Whether hook execution is enabled

            Priority order: CLI flags > Environment variables > .ctxrc > Defaults

            All settings are optional. Missing values use defaults.

            ","path":["CLI"],"tags":[]},{"location":"cli/backup/","level":1,"title":"Backup","text":"","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/backup/#ctx-backup","level":3,"title":"ctx backup","text":"

            Create timestamped tar.gz archives of project context and/or global Claude Code data. Optionally copies archives to an SMB share via GVFS.

            ctx backup [flags]\n

            Flags:

            Flag Description --scope Backup scope: project, global, or all (default: all) --json Output results as JSON

            Scopes:

            Scope What's archived project .context/, .claude/, ideas/, ~/.bashrc global ~/.claude/ (excludes todos/) all Both project and global (default)

            Environment:

            Variable Purpose CTX_BACKUP_SMB_URL SMB share URL (e.g. smb://host/share) CTX_BACKUP_SMB_SUBDIR Subdirectory on share (default: ctx-sessions)

            Examples:

            ctx backup                       # Back up everything (default: all)\nctx backup --scope project       # Project context only\nctx backup --scope global        # Global Claude data only\nctx backup --scope all --json    # Both, JSON output\n
            ","path":["CLI","Runtime","Backup"],"tags":[]},{"location":"cli/bootstrap/","level":1,"title":"System Bootstrap","text":"","path":["System Bootstrap"],"tags":[]},{"location":"cli/bootstrap/#ctx-system-bootstrap","level":3,"title":"ctx system bootstrap","text":"

            Print the resolved context directory path so AI agents can anchor their session. The default output lists the context directory, the tracked context files, and a short health snapshot. --quiet prints just the path; --json produces structured output for automation.

            This is a hidden, agent-only command that agents are instructed to run first in their session-start procedure; it is the authoritative answer to \"where does this project's context live?\".

            ctx system bootstrap [flags]\n

            Flags:

            Flag Description -q, --quiet Output only the context directory path --json Output in JSON format

            Examples:

            ctx system bootstrap                 # Text output for agents\nctx system bootstrap -q              # Just the context directory path\nctx system bootstrap --json          # Structured output for automation\n

            Scripting tip: CTX_DIR=$(ctx system bootstrap -q) is the canonical way for skills and scripts to find the project's context directory without hardcoding .context/.

            ","path":["System Bootstrap"],"tags":[]},{"location":"cli/change/","level":1,"title":"Change","text":"","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/change/#ctx-change","level":2,"title":"ctx change","text":"

            Show what changed in context files and code since your last session.

            Automatically detects the previous session boundary from state markers or event log. Useful at session start to quickly see what moved while you were away.

            ctx change [flags]\n

            Flags:

            Flag Description --since Time reference: duration (24h) or date (2026-03-01)

            Reference time detection (priority order):

            1. --since flag (duration, date, or RFC3339 timestamp)
            2. ctx-loaded-* marker files in .context/state/ (second most recent)
            3. Last context-load-gate event from .context/state/events.jsonl
            4. Fallback: 24 hours ago

            Examples:

            # Auto-detect last session, show what changed\nctx change\n\n# Changes in the last 48 hours\nctx change --since 48h\n\n# Changes since a specific date\nctx change --since 2026-03-10\n

            Output:

            ## Changes Since Last Session\n\n**Reference point**: 6 hours ago\n\n### Context File Changes\n- `TASKS.md` - modified 2026-03-12 14:30\n- `DECISIONS.md` - modified 2026-03-12 09:15\n\n### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n

            Context file changes are detected by filesystem mtime (works without git). Code changes use git log --since (empty when not in a git repo).

            See also: Reviewing Session Changes.

            ","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/completion/","level":1,"title":"Completion","text":"","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#ctx-completion","level":2,"title":"ctx completion","text":"

            Generate shell autocompletion scripts.

            ctx completion <shell>\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#subcommands","level":3,"title":"Subcommands","text":"Shell Command bash ctx completion bash zsh ctx completion zsh fish ctx completion fish powershell ctx completion powershell

            Examples:

            ctx completion bash > /etc/bash_completion.d/ctx\nctx completion zsh  > \"${fpath[1]}/_ctx\"\nctx completion fish > ~/.config/fish/completions/ctx.fish\nctx completion powershell | Out-String | Invoke-Expression\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#installation","level":3,"title":"Installation","text":"BashZshFishPowerShell
            # Add to ~/.bashrc\nsource <(ctx completion bash)\n
            # Add to ~/.zshrc\nsource <(ctx completion zsh)\n
            ctx completion fish | source\n# Or save to completions directory\nctx completion fish > ~/.config/fish/completions/ctx.fish\n
            # Add to your PowerShell profile\nctx completion powershell | Out-String | Invoke-Expression\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/config/","level":1,"title":"Config","text":"","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config","level":3,"title":"ctx config","text":"

            Manage runtime configuration profiles.

            ctx config <subcommand>\n

            The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy (.ctxrc) is gitignored and switched between them using subcommands below.

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-switch","level":4,"title":"ctx config switch","text":"

            Switch between .ctxrc configuration profiles.

            ctx config switch [dev|base]\n

            With no argument, toggles between dev and base. Accepts prod as an alias for base.

            Argument Description dev Switch to dev profile (verbose logging) base Switch to base profile (all defaults) (none) Toggle to the opposite profile

            Profiles:

            Profile Description dev Verbose logging, webhook notifications on base All defaults, notifications off

            Examples:

            ctx config switch dev     # Switch to dev profile\nctx config switch base    # Switch to base profile\nctx config switch         # Toggle (dev → base or base → dev)\nctx config switch prod    # Alias for \"base\"\n

            The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-status","level":4,"title":"ctx config status","text":"

            Show which .ctxrc profile is currently active.

            ctx config status\n

            Output examples:

            active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n

            See also: Configuration, Contributing: Configuration Profiles

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/connect/","level":1,"title":"Connect","text":"","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect","level":2,"title":"ctx connect","text":"

            Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

            New to the Hub?

            Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

            The unit of identity is a project, not a user. Registering a directory with ctx connect register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

            Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-register","level":3,"title":"ctx connect register","text":"

            One-time registration with a hub. Requires the hub address and admin token (printed by ctx hub start on first run).

            ctx connect register localhost:9900 --token ctx_adm_7f3a...\n

            On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-subscribe","level":3,"title":"ctx connect subscribe","text":"

            Set which entry types to receive from the hub. Only matching types are returned by sync and listen.

            ctx connect subscribe decision learning\nctx connect subscribe decision learning convention\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-sync","level":3,"title":"ctx connect sync","text":"

            Pull matching entries from the hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

            ctx connect sync\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-publish","level":3,"title":"ctx connect publish","text":"

            Push entries to the hub. Specify type and content as arguments.

            ctx connect publish decision \"Use UTC timestamps everywhere\"\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-listen","level":3,"title":"ctx connect listen","text":"

            Stream new entries from the hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

            ctx connect listen\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-status","level":3,"title":"ctx connect status","text":"

            Show hub connection state and entry statistics.

            ctx connect status\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

            Use --share on ctx add to write locally AND publish to the hub:

            ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

            If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#auto-sync","level":2,"title":"Auto-Sync","text":"

            Once registered, the check-hub-sync hook automatically syncs new entries from the hub at the start of each session (daily throttled). No manual ctx connect sync needed.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#shared-files","level":2,"title":"Shared Files","text":"

            Entries from the hub are stored in .context/hub/:

            .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

            These files are read-only (managed by sync/listen) and never mixed with local context files.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#agent-integration","level":2,"title":"Agent Integration","text":"

            Include shared knowledge in agent context packets:

            ctx agent --include-hub\n

            Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

            ","path":["Connect"],"tags":[]},{"location":"cli/connection/","level":1,"title":"Connect","text":"","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connect","level":2,"title":"ctx connect","text":"

            Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

            New to the ctx Hub?

            Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

            The unit of identity is a project, not a user. Registering a directory with ctx connection register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

            Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-register","level":3,"title":"ctx connection register","text":"

            One-time registration with a ctx Hub. Requires the ctx Hub address and admin token (printed by ctx hub start on first run).

            Examples:

            ctx connection register localhost:9900 --token ctx_adm_7f3a...\n

            On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-subscribe","level":3,"title":"ctx connection subscribe","text":"

            Set which entry types to receive from the ctx Hub. Only matching types are returned by sync and listen.

            Examples:

            ctx connection subscribe decision learning\nctx connection subscribe decision learning convention\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-sync","level":3,"title":"ctx connection sync","text":"

            Pull matching entries from the ctx Hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

            Examples:

            ctx connection sync\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-publish","level":3,"title":"ctx connection publish","text":"

            Push entries to the ctx Hub. Specify type and content as arguments.

            Examples:

            ctx connection publish decision \"Use UTC timestamps everywhere\"\nctx connection publish learning \"Go embed requires files in same package\"\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-listen","level":3,"title":"ctx connection listen","text":"

            Stream new entries from the ctx Hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

            Examples:

            ctx connection listen\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-status","level":3,"title":"ctx connection status","text":"

            Show ctx Hub connection state and entry statistics.

            Examples:

            ctx connection status\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

            Use --share on ctx add to write locally AND publish to the ctx Hub:

            ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

            If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#auto-sync","level":2,"title":"Auto-Sync","text":"

            Once registered, the check-hub-sync hook automatically syncs new entries from the ctx Hub at the start of each session (daily throttled). No manual ctx connection sync needed.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#shared-files","level":2,"title":"Shared Files","text":"

            Entries from the ctx Hub are stored in .context/hub/:

            .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

            These files are read-only (managed by sync/listen) and never mixed with local context files.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#agent-integration","level":2,"title":"Agent Integration","text":"

            Include shared knowledge in agent context packets:

            ctx agent --include-hub\n

            Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/context/","level":1,"title":"Context Management","text":"","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-add","level":3,"title":"ctx add","text":"

            Add a new item to a context file.

            ctx add <type> <content> [flags]\n

            Types:

            Type Target File task TASKS.md decision DECISIONS.md learning LEARNINGS.md convention CONVENTIONS.md

            Flags:

            Flag Short Description --priority <level> -p Priority for tasks: high, medium, low --section <name> -s Target section within file --context -c Context (required for decisions and learnings) --rationale -r Rationale for decisions (required for decisions) --consequence Consequence for decisions (required for decisions) --lesson -l Key insight (required for learnings) --application -a How to apply going forward (required for learnings) --file -f Read content from file instead of argument

            Examples:

            # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\nctx add task \"Fix login bug\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (requires all ADR (Architectural Decision Record) fields)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning (requires context, lesson, and application)\nctx add learning \"Vitest mocks must be hoisted\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Vitest hoists vi.mock() calls to top of file\" \\\n  --application \"Always place vi.mock() before imports in test files\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to specific section\nctx add convention \"Use kebab-case for filenames\" --section \"Naming\"\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-drift","level":3,"title":"ctx drift","text":"

            Detect stale or invalid context.

            ctx drift [flags]\n

            Flags:

            Flag Description --json Output machine-readable JSON --fix Auto-fix simple issues

            Checks:

            • Path references in ARCHITECTURE.md and CONVENTIONS.md exist
            • Task references are valid
            • Constitution rules aren't violated (heuristic)
            • Staleness indicators (old files, many completed tasks)
            • Missing packages: warns when internal/ directories exist on disk but are not referenced in ARCHITECTURE.md (suggests running /ctx-architecture)
            • Entry count: warns when LEARNINGS.md or DECISIONS.md exceed configurable thresholds (default: 30 learnings, 20 decisions), or when CONVENTIONS.md exceeds a line count threshold (default: 200). Configure via .ctxrc:
              entry_count_learnings: 30      # warn above this (0 = disable)\nentry_count_decisions: 20      # warn above this (0 = disable)\nconvention_line_count: 200     # warn above this (0 = disable)\n

            Example:

            ctx drift\nctx drift --json\nctx drift --fix\n

            Exit codes:

            Code Meaning 0 All checks passed 1 Warnings found 3 Violations found","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-sync","level":3,"title":"ctx sync","text":"

            Reconcile context with the current codebase state.

            ctx sync [flags]\n

            Flags:

            Flag Description --dry-run Show what would change without modifying

            What it does:

            • Scans codebase for structural changes
            • Compares with ARCHITECTURE.md
            • Suggests documenting dependencies if package files exist
            • Identifies stale or outdated context

            Example:

            ctx sync\nctx sync --dry-run\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-compact","level":3,"title":"ctx compact","text":"

            Consolidate and clean up context files.

            • Moves completed tasks older than 7 days to the archive
            • Removes empty sections
            ctx compact [flags]\n

            Flags:

            Flag Description --archive Create .context/archive/ for old content

            Example:

            ctx compact\nctx compact --archive\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-fmt","level":3,"title":"ctx fmt","text":"

            Format context files to a consistent line width.

            Wraps long lines in TASKS.md, DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md at word boundaries. Markdown list items get 2-space continuation indent. Headings, tables, frontmatter, and HTML comments are preserved as-is.

            Idempotent: running twice produces the same output.

            ctx fmt [flags]\n

            Flags:

            Flag Type Default Description --width int 80 Target line width --check bool false Check only, exit 1 if files would change

            Examples:

            ctx fmt              # format all context files\nctx fmt --check      # CI mode: check without modifying\nctx fmt --width 100  # custom width\n

            Also available as a Makefile target:

            make fmt-context\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task","level":3,"title":"ctx task","text":"

            Manage task completion, archival, and snapshots.

            ctx task <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-complete","level":4,"title":"ctx task complete","text":"

            Mark a task as completed.

            ctx task complete <task-id-or-text>\n

            Arguments:

            • task-id-or-text: Task number or partial text match

            Examples:

            # By text (partial match)\nctx task complete \"user auth\"\n\n# By task number\nctx task complete 3\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-archive","level":4,"title":"ctx task archive","text":"

            Move completed tasks from TASKS.md to a timestamped archive file.

            ctx task archive [flags]\n

            Flags:

            Flag Description --dry-run Preview changes without modifying files

            Archive files are stored in .context/archive/ with timestamped names (tasks-YYYY-MM-DD.md). Completed tasks (marked with [x]) are moved; pending tasks ([ ]) remain in TASKS.md.

            Example:

            ctx task archive\nctx task archive --dry-run\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-snapshot","level":4,"title":"ctx task snapshot","text":"

            Create a point-in-time snapshot of TASKS.md without modifying the original.

            ctx task snapshot [name]\n

            Arguments:

            • name: Optional name for the snapshot (defaults to \"snapshot\")

            Snapshots are stored in .context/archive/ with timestamped names (tasks-<name>-YYYY-MM-DD-HHMM.md).

            Example:

            ctx task snapshot\nctx task snapshot \"before-refactor\"\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission","level":3,"title":"ctx permission","text":"

            Manage Claude Code permission snapshots.

            ctx permission <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-snapshot","level":4,"title":"ctx permission snapshot","text":"

            Save .claude/settings.local.json as the golden image.

            ctx permission snapshot\n

            Creates .claude/settings.golden.json as a byte-for-byte copy of the current settings. Overwrites if the golden file already exists.

            The golden file is meant to be committed to version control and shared with the team.

            Example:

            ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-restore","level":4,"title":"ctx permission restore","text":"

            Replace settings.local.json with the golden image.

            ctx permission restore\n

            Prints a diff of dropped (session-accumulated) and restored permissions. No-op if the files already match.

            Example:

            ctx permission restore\n# Dropped 3 session permission(s):\n#   - Bash(cat /tmp/debug.log:*)\n#   - Bash(rm /tmp/test-*:*)\n#   - Bash(curl https://example.com:*)\n# Restored from golden image.\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-reindex","level":3,"title":"ctx reindex","text":"

            Regenerate the quick-reference index for both DECISIONS.md and LEARNINGS.md in a single invocation.

            ctx reindex\n

            This is a convenience wrapper around ctx decision reindex and ctx learning reindex. Both files grow at similar rates and users typically want to reindex both after manual edits.

            The index is a compact table of date and title for each entry, allowing AI tools to scan entries without reading the full file.

            Example:

            ctx reindex\n# ✓ Index regenerated with 12 entries\n# ✓ Index regenerated with 8 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision","level":3,"title":"ctx decision","text":"

            Manage the DECISIONS.md file.

            ctx decision <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision-reindex","level":4,"title":"ctx decision reindex","text":"

            Regenerate the quick-reference index at the top of DECISIONS.md.

            ctx decision reindex\n

            The index is a compact table showing the date and title for each decision, allowing AI tools to quickly scan entries without reading the full file.

            Use this after manual edits to DECISIONS.md or when migrating existing files to use the index format.

            Example:

            ctx decision reindex\n# ✓ Index regenerated with 12 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning","level":3,"title":"ctx learning","text":"

            Manage the LEARNINGS.md file.

            ctx learning <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning-reindex","level":4,"title":"ctx learning reindex","text":"

            Regenerate the quick-reference index at the top of LEARNINGS.md.

            ctx learning reindex\n

            The index is a compact table showing the date and title for each learning, allowing AI tools to quickly scan entries without reading the full file.

            Use this after manual edits to LEARNINGS.md or when migrating existing files to use the index format.

            Example:

            ctx learning reindex\n# ✓ Index regenerated with 8 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/doctor/","level":1,"title":"Doctor","text":"","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#ctx-doctor","level":3,"title":"ctx doctor","text":"

            Structural health check across context, hooks, and configuration. Runs mechanical checks that don't require semantic analysis. Think of it as ctx status + ctx drift + configuration audit in one pass.

            ctx doctor [flags]\n

            Flags:

            Flag Short Type Default Description --json -j bool false Machine-readable JSON output","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-checks","level":4,"title":"What It Checks","text":"Check Category What it verifies Context initialized Structure .context/ directory exists Required files present Structure All required context files exist (TASKS.md, etc.) Drift detected Quality Stale paths, missing files, constitution violations Event logging status Hooks Whether event_log: true is set in .ctxrc Webhook configured Hooks .notify.enc file exists Pending reminders State Count of entries in reminders.json Task completion ratio State Pending vs completed tasks in TASKS.md Context token size Size Estimated token count across all context files Recent event activity Events Last event timestamp (only when event logging is enabled)","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-human","level":4,"title":"Output Format (Human)","text":"
            ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

            Status indicators:

            Icon Status Meaning ✓ ok Check passed ⚠ warning Non-critical issue worth fixing ✗ error Problem that needs attention ○ info Informational note","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-json","level":4,"title":"Output Format (JSON)","text":"
            {\n  \"results\": [\n    {\n      \"name\": \"context_initialized\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Context initialized (.context/)\"\n    },\n    {\n      \"name\": \"required_files\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Required files present (4/4)\"\n    },\n    {\n      \"name\": \"drift\",\n      \"category\": \"Quality\",\n      \"status\": \"warning\",\n      \"message\": \"Drift: 2 warnings\"\n    },\n    {\n      \"name\": \"event_logging\",\n      \"category\": \"Hooks\",\n      \"status\": \"info\",\n      \"message\": \"Event logging disabled (enable with event_log: true in .ctxrc)\"\n    },\n    {\n      \"name\": \"webhook\",\n      \"category\": \"Hooks\",\n      \"status\": \"ok\",\n      \"message\": \"Webhook configured\"\n    },\n    {\n      \"name\": \"reminders\",\n      \"category\": \"State\",\n      \"status\": \"ok\",\n      \"message\": \"No pending reminders\"\n    },\n    {\n      \"name\": \"task_completion\",\n      \"category\": \"State\",\n      \"status\": \"warning\",\n      \"message\": \"Tasks: 18/22 completed (82%): consider archiving with ctx task archive\"\n    },\n    {\n      \"name\": \"context_size\",\n      \"category\": \"Size\",\n      \"status\": \"ok\",\n      \"message\": \"Context size: ~4200 tokens (budget: 8000)\"\n    }\n  ],\n  \"warnings\": 2,\n  \"errors\": 0\n}\n

            Examples:

            # Quick structural health check\nctx doctor\n\n# Machine-readable output for scripting\nctx doctor --json\n\n# Count warnings\nctx doctor --json | jq '.warnings'\n\n# Check for errors only\nctx doctor --json | jq '[.results[] | select(.status == \"error\")]'\n
            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#when-to-use-what","level":4,"title":"When to Use What","text":"Tool When ctx status Quick glance at files, tokens, and drift ctx doctor Thorough structural checkup (hooks, config, events too) /ctx-doctor Agent-driven diagnosis with event log pattern analysis

            ctx status tells you what's there. ctx doctor tells you what's wrong. /ctx-doctor tells you why it's wrong and what to do about it.

            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-does-not-do","level":4,"title":"What It Does Not Do","text":"
            • No event pattern analysis: that's the /ctx-doctor skill's job
            • No auto-fixing: reports findings, doesn't modify anything
            • No external service checks: doesn't verify webhook endpoint availability

            See also: Troubleshooting | ctx hook event | /ctx-doctor skill | Detecting and Fixing Drift

            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/event/","level":1,"title":"Event","text":"","path":["Event"],"tags":[]},{"location":"cli/event/#ctx-hook-event","level":3,"title":"ctx hook event","text":"

            Query the local hook event log. Requires event_log: true in .ctxrc. Reads events from .context/state/events.jsonl and outputs them in a human-readable table or raw JSONL format.

            All filter flags combine with AND logic.

            ctx hook event [flags]\n

            Flags:

            Flag Description --hook Filter by hook name --session Filter by session ID --event Filter by event type (relay, nudge) --last Show last N events (default: 50) --json Output raw JSONL (for piping to jq) --all Include rotated log file

            Examples:

            ctx hook event                                        # recent events\nctx hook event --hook check-context-size --last 10    # one hook, last 10\nctx hook event --json | jq '.hook'                    # pipe to jq\nctx hook event --session abc123                       # filter by session\n
            ","path":["Event"],"tags":[]},{"location":"cli/guide/","level":1,"title":"Guide","text":"","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/guide/#ctx-guide","level":2,"title":"ctx guide","text":"

            Quick-reference cheat sheet for common ctx commands and skills.

            ctx guide [flags]\n

            Flags:

            Flag Description --skills Show available skills --commands Show available CLI commands

            Example:

            # Show the full cheat sheet\nctx guide\n\n# Skills only\nctx guide --skills\n\n# Commands only\nctx guide --commands\n

            Works without initialization (no .context/ required). Useful for a printable one-pager when onboarding to a project.

            ","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/hook/","level":1,"title":"Hook","text":"","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#ctx-hook","level":3,"title":"ctx hook","text":"

            Manage hook-related settings: messages, notifications, pause/resume, and event log.

            ctx hook <subcommand> [flags]\n
            ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#subcommands","level":2,"title":"Subcommands","text":"Subcommand Description ctx hook message list Show all hook messages with override status ctx hook message show <h> <v> Print the effective message template ctx hook message edit <h> <v> Copy default to .context/ for editing ctx hook message reset <h> <v> Delete user override, revert to default ctx hook notify [message] Send a webhook notification ctx hook notify setup Configure and encrypt webhook URL ctx hook notify test Send a test notification ctx hook pause Pause all context hooks for this session ctx hook resume Resume paused context hooks ctx hook event Query the local hook event log","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#examples","level":2,"title":"Examples","text":"
            # View and manage hook messages\nctx hook message list\nctx hook message show qa-reminder gate\nctx hook message edit qa-reminder gate\n\n# Webhook notifications\nctx hook notify setup\nctx hook notify --event loop \"Loop completed\"\n\n# Pause/resume hooks\nctx hook pause\nctx hook resume\n\n# Browse event log\nctx hook event --last 20\nctx hook event --hook qa-reminder --json\n

            See also: Customizing Hook Messages | Webhook Notifications | Pausing Context Hooks | System Hooks Audit

            ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hub/","level":1,"title":"Hub","text":"","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub","level":2,"title":"ctx hub","text":"

            Operator commands for a ctx Hub: the gRPC server that fans out decisions, learnings, conventions, and tasks across projects. Use ctx hub to start and stop the server, inspect cluster state, add or remove peers at runtime, and hand off leadership before maintenance.

            Who Needs This Page

            You only need ctx hub if you are running a hub server or cluster. For client-side operations (register, subscribe, sync, publish, listen), see ctx connect. For the mental model behind the hub as a whole, read the ctx Hub overview.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-start","level":3,"title":"ctx hub start","text":"

            Start the hub gRPC server.

            Examples:

            ctx hub start                           # Foreground, default port 9900\nctx hub start --port 8080               # Custom port\nctx hub start --data-dir /srv/ctx-hub   # Custom data directory\n

            On first run, generates an admin token and prints it to stdout. Save this token; it's required for ctx connection register in client projects. Subsequent runs reuse the stored token from <data-dir>/admin.token.

            Default data directory: ~/.ctx/hub-data/

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#daemon-mode","level":4,"title":"Daemon Mode","text":"

            Run the hub as a detached background process:

            ctx hub start --daemon          # Fork to background\nctx hub stop                    # Graceful shutdown\n

            The daemon writes a PID file to <data-dir>/hub.pid. Stop the daemon with ctx hub stop (see below).

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#cluster-mode","level":4,"title":"Cluster Mode","text":"

            For high availability, run multiple hubs with Raft-based leader election:

            ctx hub start --port 9900 \\\n  --peers host2:9901,host3:9901\n

            Raft is used only for leader election. Data replication uses sequence-based gRPC sync on the append-only JSONL log; there is no multi-node consensus on writes. See the HA cluster recipe for the full setup and the Raft-lite durability caveat.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#flags","level":4,"title":"Flags","text":"Flag Description Default --port Hub listen port 9900 --data-dir Hub data directory ~/.ctx/hub-data/ --daemon Run the hub server in the background false --peers Comma-separated peer addresses for cluster mode (none)","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#validation","level":4,"title":"Validation","text":"

            The hub validates every published entry before accepting it:

            • Type must be one of decision, learning, convention, task
            • ID and Origin are required and non-empty
            • Content size capped at 1 MB (text-only)
            • Duplicate project registration is rejected (one token per project)
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stop","level":3,"title":"ctx hub stop","text":"

            Stop a running hub daemon.

            Examples:

            ctx hub stop                            # Stop using default data dir\nctx hub stop --data-dir /srv/ctx-hub    # Custom data directory\n

            Sends SIGTERM to the PID recorded in <data-dir>/hub.pid, waits for in-flight RPCs to drain, and removes the PID file. Safe to rerun: if no daemon is running, returns a \"no running hub\" error without side effects.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-status","level":3,"title":"ctx hub status","text":"

            Show cluster status: role, peers, sync state, entry count, and uptime.

            Examples:

            ctx hub status\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-peer","level":3,"title":"ctx hub peer","text":"

            Add or remove peers from the cluster at runtime. Useful for scaling up or replacing a decommissioned node without restarting the leader.

            Examples:

            ctx hub peer add host2:9901\nctx hub peer remove host2:9901\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stepdown","level":3,"title":"ctx hub stepdown","text":"

            Transfer leadership to another node gracefully. Triggers a new election among the remaining followers before the current leader steps down. Use before taking the leader offline for maintenance.

            Examples:

            ctx hub stepdown\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#see-also","level":3,"title":"See Also","text":"
            • ctx connect: client-side commands (register, subscribe, sync, publish, listen)
            • ctx Hub overview: mental model and user stories
            • ctx Hub: Getting Started
            • Hub operations: production deployment, backup, monitoring
            • Hub failure modes
            • Hub security model
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/init-status/","level":1,"title":"Init and Status","text":"","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-init","level":3,"title":"ctx init","text":"

            Initialize a new .context/ directory with template files.

            ctx init [flags]\n

            Flags:

            Flag Short Description --force -f Overwrite existing context files --minimal -m Only create essential files (TASKS.md, DECISIONS.md, CONSTITUTION.md) --merge Auto-merge ctx content into existing CLAUDE.md

            Creates:

            • .context/ directory with all template files
            • .claude/settings.local.json with pre-approved ctx permissions
            • CLAUDE.md with bootstrap instructions (or merges into existing)

            Claude Code hooks and skills are provided by the ctx plugin (see Integrations).

            Example:

            # Standard init\nctx init\n\n# Minimal setup (just core files)\nctx init --minimal\n\n# Force overwrite existing\nctx init --force\n\n# Merge into existing files\nctx init --merge\n
            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-status","level":3,"title":"ctx status","text":"

            Show the current context summary.

            ctx status [flags]\n

            Flags:

            Flag Short Description --json Output as JSON --verbose -v Include file contents summary

            Output:

            • Context directory path
            • Total files and token estimate
            • Status of each file (loaded, empty, missing)
            • Recent activity (modification times)
            • Drift warnings if any

            Example:

            ctx status\nctx status --json\nctx status --verbose\n
            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-agent","level":3,"title":"ctx agent","text":"

            Print an AI-ready context packet optimized for LLM consumption.

            ctx agent [flags]\n

            Flags:

            Flag Default Description --budget 8000 Token budget: controls content selection and prioritization --format md Output format: md or json --cooldown 10m Suppress repeated output within this duration (requires --session) --session (none) Session ID for cooldown isolation (e.g., $PPID) --include-hub false Include hub entries from .context/hub/

            How budget works:

            The budget controls how much context is included. Entries are selected in priority tiers:

            1. Constitution: always included in full (inviolable rules)
            2. Tasks: all active tasks, up to 40% of budget
            3. Conventions: all conventions, up to 20% of budget
            4. Decisions: scored by recency and relevance to active tasks
            5. Learnings: scored by recency and relevance to active tasks
            6. Steering: applicable steering file bodies, scored by their inclusion mode and description match against the active prompt
            7. Skill: named skill content (from --skill)
            8. Hub: entries from .context/hub/ (with --include-hub, see ctx connect)

            Decisions and learnings are ranked by a combined score (how recent + how relevant to your current tasks). High-scoring entries are included with their full body. Entries that don't fit get title-only summaries in an \"Also Noted\" section. Superseded entries are excluded.

            Output Sections:

            Section Source Selection Read These Files all .context/ Non-empty files in priority order Constitution CONSTITUTION.md All rules (never truncated) Current Tasks TASKS.md All unchecked tasks (budget-capped) Key Conventions CONVENTIONS.md All items (budget-capped) Recent Decisions DECISIONS.md Full body, scored by relevance Key Learnings LEARNINGS.md Full body, scored by relevance Also Noted overflow Title-only summaries

            Example:

            # Default (8000 tokens, markdown)\nctx agent\n\n# Smaller packet for tight context windows\nctx agent --budget 4000\n\n# JSON format for programmatic use\nctx agent --format json\n\n# Pipe to file\nctx agent --budget 4000 > context.md\n\n# With cooldown (hooks/automation: requires --session)\nctx agent --session $PPID\n

            Use case: Copy-paste into AI chat, pipe to system prompt, or use in hooks.

            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-load","level":3,"title":"ctx load","text":"

            Load and display assembled context as AI would see it.

            ctx load [flags]\n

            Flags:

            Flag Description --budget <tokens> Token budget for assembly (default: 8000) --raw Output raw file contents without assembly

            Example:

            ctx load\nctx load --budget 16000\nctx load --raw\n
            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/journal/","level":1,"title":"Journal","text":"","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal","level":3,"title":"ctx journal","text":"

            Browse and search AI session history from Claude Code and other tools.

            ctx journal <subcommand>\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source","level":4,"title":"ctx journal source","text":"

            List all parsed sessions.

            ctx journal source [flags]\n

            Flags:

            Flag Short Description --limit -n Maximum sessions to display (default: 20) --project -p Filter by project name --tool -t Filter by tool (e.g., claude-code) --all-projects Include sessions from all projects

            Sessions are sorted by date (newest first) and display slug, project, start time, duration, turn count, and token usage.

            Example:

            ctx journal source\nctx journal source --limit 5\nctx journal source --project ctx\nctx journal source --tool claude-code\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source-show","level":4,"title":"ctx journal source --show","text":"

            Show details of a specific session.

            ctx journal source --show [session-id] [flags]\n

            Flags:

            Flag Description --latest Show the most recent session --full Show full message content --all-projects Search across all projects

            The session ID can be a full UUID, partial match, or session slug name.

            Example:

            ctx journal source --show abc123\nctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show --latest\nctx journal source --show --latest --full\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-import","level":4,"title":"ctx journal import","text":"

            Import sessions to editable journal files in .context/journal/.

            ctx journal import [session-id] [flags]\n

            Flags:

            Flag Description --all Import all sessions (only new files by default) --all-projects Import from all projects --regenerate Re-import existing files (preserves YAML frontmatter by default) --keep-frontmatter Preserve enriched YAML frontmatter during regeneration (default: true) --yes, -y Skip confirmation prompt --dry-run Show what would be imported without writing files

            Safe by default: --all only imports new sessions. Existing files are skipped. Use --regenerate to re-import existing files (conversation content is regenerated, YAML frontmatter from enrichment is preserved by default). Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

            Locked entries (via ctx journal lock) are always skipped, regardless of flags.

            Single-session import (ctx journal import <id>) always writes without prompting, since you are explicitly targeting one session.

            The journal/ directory should be gitignored (like sessions/) since it contains raw conversation data.

            Example:

            ctx journal import abc123                 # Import one session\nctx journal import --all                  # Import only new sessions\nctx journal import --all --dry-run        # Preview what would be imported\nctx journal import --all --regenerate     # Re-import existing (prompts)\nctx journal import --all --regenerate -y  # Re-import without prompting\nctx journal import --all --regenerate --keep-frontmatter=false -y  # Discard frontmatter\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-lock","level":4,"title":"ctx journal lock","text":"

            Protect journal entries from being overwritten by import --regenerate or modified by enrichment skills (/ctx-journal-enrich, /ctx-journal-enrich-all).

            ctx journal lock <pattern> [flags]\n

            Flags:

            Flag Description --all Lock all journal entries

            The pattern matches filenames by slug, date, or short ID. Locking a multi-part entry locks all parts. The lock is recorded in .context/journal/.state.json and a locked: true line is added to the file's YAML frontmatter for visibility.

            Example:

            ctx journal lock abc12345\nctx journal lock 2026-01-21-session-abc12345.md\nctx journal lock --all\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-unlock","level":4,"title":"ctx journal unlock","text":"

            Remove lock protection from journal entries.

            ctx journal unlock <pattern> [flags]\n

            Flags:

            Flag Description --all Unlock all journal entries

            Example:

            ctx journal unlock abc12345\nctx journal unlock --all\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-sync","level":4,"title":"ctx journal sync","text":"

            Sync lock state from journal frontmatter to .state.json.

            ctx journal sync\n

            Scans all journal markdowns and updates .state.json to match each file's frontmatter. Files with locked: true in frontmatter are marked locked in state; files without a locked: line have their lock cleared.

            This is the inverse of ctx journal lock: instead of state driving frontmatter, frontmatter drives state. Useful after batch enrichment where you add locked: true to frontmatter manually.

            Example:

            # After enriching entries and adding locked: true to frontmatter\nctx journal sync\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal_1","level":3,"title":"ctx journal","text":"

            Analyze and synthesize imported session files.

            ctx journal <subcommand>\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-site","level":4,"title":"ctx journal site","text":"

            Generate a static site from journal entries in .context/journal/.

            ctx journal site [flags]\n

            Flags:

            Flag Short Description --output -o Output directory (default: .context/journal-site) --build Run zensical build after generating --serve Run zensical serve after generating

            Creates a zensical-compatible site structure with an index page listing all sessions by date, and individual pages for each journal entry.

            Requires zensical to be installed for --build or --serve:

            pipx install zensical\n

            Example:

            ctx journal site                    # Generate in .context/journal-site/\nctx journal site --output ~/public  # Custom output directory\nctx journal site --build            # Generate and build HTML\nctx journal site --serve            # Generate and serve locally\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-obsidian","level":4,"title":"ctx journal obsidian","text":"

            Generate an Obsidian vault from journal entries in .context/journal/.

            ctx journal obsidian [flags]\n

            Flags:

            Flag Short Description --output -o Output directory (default: .context/journal-obsidian)

            Creates an Obsidian-compatible vault with:

            • Wikilinks ([[target|display]]) for all internal navigation
            • MOC pages (Map of Content) for topics, key files, and session types
            • Related sessions footer linking entries that share topics
            • Transformed frontmatter (topicstags for Obsidian integration)
            • Minimal .obsidian/ config enforcing wikilink mode

            No external dependencies are required: Open the output directory as an Obsidian vault directly.

            Example:

            ctx journal obsidian                        # Generate in .context/journal-obsidian/\nctx journal obsidian --output ~/vaults/ctx  # Custom output directory\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-check","level":4,"title":"ctx journal schema check","text":"

            Validate JSONL session files against the embedded schema and report drift.

            ctx journal schema check [flags]\n

            Flags:

            Flag Short Description --dir Directory to scan for JSONL files --all-projects Scan all Claude Code project directories --quiet -q Exit code only (0 = clean, 1 = drift)

            Scans JSONL files for unknown fields, missing required fields, unknown record types, and unknown content block types. When drift is found, writes a Markdown report to .context/reports/schema-drift.md. When drift resolves, the report is automatically deleted.

            Designed for interactive use, CI pipelines, and nightly cron jobs.

            Example:

            ctx journal schema check                    # Current project\nctx journal schema check --all-projects     # All projects\nctx journal schema check --quiet            # Exit code only\nctx journal schema check --dir /path/to     # Custom directory\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-dump","level":4,"title":"ctx journal schema dump","text":"

            Print the embedded JSONL schema definition.

            ctx journal schema dump\n

            Shows all known record types with their required and optional fields, and all recognized content block types with their parse status. Useful for inspecting what the schema validator expects.

            Example:

            ctx journal schema dump\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-serve","level":3,"title":"ctx serve","text":"

            Serve any zensical directory locally. This is a serve-only command: It does not generate or regenerate site content.

            ctx serve [directory]\n

            If no directory is specified, defaults to the journal site (.context/journal-site).

            Requires zensical to be installed:

            pipx install zensical\n

            ctx serve vs. ctx journal site --serve

            ctx journal site --serve generates the journal site then serves it: an all-in-one command. ctx serve only serves an existing directory, and works with any zensical site (journal, docs, etc.).

            Example:

            ctx serve                        # Serve journal site (no regeneration)\nctx serve .context/journal-site  # Same, explicit path\nctx serve ./site                 # Serve the docs site\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/loop/","level":1,"title":"Loop","text":"","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/loop/#ctx-loop","level":2,"title":"ctx loop","text":"

            Generate a shell script for running an autonomous loop.

            An autonomous loop continuously runs an AI assistant with the same prompt until a completion signal is detected, enabling iterative development where the AI builds on its previous work.

            ctx loop [flags]\n

            Flags:

            Flag Short Description Default --tool <tool> -t AI tool: claude, aider, or generic claude --prompt <file> -p Prompt file to use .context/loop.md --max-iterations <n> -n Maximum iterations (0 = unlimited) 0 --completion <signal> -c Completion signal to detect SYSTEM_CONVERGED --output <file> -o Output script filename loop.sh

            Examples:

            # Generate loop.sh for Claude Code\nctx loop\n\n# Generate for Aider with custom prompt\nctx loop --tool aider --prompt TASKS.md\n\n# Limit to 10 iterations\nctx loop --max-iterations 10\n\n# Output to custom file\nctx loop -o my-loop.sh\n

            Running the generated loop:

            ctx loop\nchmod +x loop.sh\n./loop.sh\n

            See also: Autonomous Loops for the full workflow.

            ","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/mcp/","level":1,"title":"MCP Server","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp","level":2,"title":"ctx mcp","text":"

            Run ctx as a Model Context Protocol (MCP) server. MCP is a standard protocol that lets AI tools discover and consume context from external sources via JSON-RPC 2.0 over stdin/stdout.

            This makes ctx accessible to any MCP-compatible AI tool without custom hooks or integrations:

            • Claude Desktop
            • Cursor
            • Windsurf
            • VS Code Copilot
            • Any tool supporting MCP
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp-serve","level":3,"title":"ctx mcp serve","text":"

            Start the MCP server. This command reads JSON-RPC 2.0 requests from stdin and writes responses to stdout. It is intended to be launched by MCP clients (Claude Desktop, Cursor, VS Code Copilot), not run directly from a shell. See Configuration below for how each host launches it.

            Flags: None. The server uses the configured context directory (from --context-dir, CTX_DIR, .ctxrc, or the default .context).

            Examples:

            # Normal invocation (by an MCP client via stdio transport)\nctx mcp serve\n\n# Pin a context directory for a specific workspace\nctx --context-dir /path/to/project/.context mcp serve\n\n# Verify the binary starts without a client attached (Ctrl-C to exit)\nctx mcp serve < /dev/null\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#configuration","level":2,"title":"Configuration","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#claude-desktop","level":3,"title":"Claude Desktop","text":"

            Add to ~/Library/Application Support/Claude/claude_desktop_config.json:

            {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#cursor","level":3,"title":"Cursor","text":"

            Add to .cursor/mcp.json in your project:

            {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#vs-code-copilot","level":3,"title":"VS Code (Copilot)","text":"

            Add to .vscode/mcp.json:

            {\n  \"servers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resources","level":2,"title":"Resources","text":"

            Resources expose context files as read-only content. Each resource has a URI, name, and returns Markdown text.

            URI Name Description ctx://context/constitution constitution Hard rules that must never be violated ctx://context/tasks tasks Current work items and their status ctx://context/conventions conventions Code patterns and standards ctx://context/architecture architecture System architecture documentation ctx://context/decisions decisions Architectural decisions with rationale ctx://context/learnings learnings Gotchas, tips, and lessons learned ctx://context/glossary glossary Project-specific terminology ctx://context/agent agent All files assembled in priority read order

            The agent resource assembles all non-empty context files into a single Markdown document, ordered by the configured read priority.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resource-subscriptions","level":3,"title":"Resource Subscriptions","text":"

            Clients can subscribe to resource changes via resources/subscribe. The server polls for file mtime changes (default: 5 seconds) and emits notifications/resources/updated when a subscribed file changes on disk.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#tools","level":2,"title":"Tools","text":"

            Tools expose ctx commands as callable operations. Each tool accepts JSON arguments and returns text results.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_status","level":3,"title":"ctx_status","text":"

            Show context health: file count, token estimate, and per-file summary.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_add","level":3,"title":"ctx_add","text":"

            Add a task, decision, learning, or convention to the context.

            Argument Type Required Description type string Yes Entry type: task, decision, learning, convention content string Yes Title or main content priority string No Priority level (tasks only): high, medium, low context string Conditional Context field (decisions and learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_complete","level":3,"title":"ctx_complete","text":"

            Mark a task as done by number or text match.

            Argument Type Required Description query string Yes Task number (e.g. \"1\") or search text","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_drift","level":3,"title":"ctx_drift","text":"

            Detect stale or invalid context. Returns violations, warnings, and passed checks.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_journal_source","level":3,"title":"ctx_journal_source","text":"

            Query recent AI session history (summaries, decisions, topics).

            Argument Type Required Description limit number No Max sessions to return (default: 5) since string No ISO date filter: sessions after this date (YYYY-MM-DD)

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_watch_update","level":3,"title":"ctx_watch_update","text":"

            Apply a structured context update to .context/ files. Supports task, decision, learning, convention, and complete entry types. Human confirmation is required before calling.

            Argument Type Required Description type string Yes Entry type: task, decision, learning, convention, complete content string Yes Main content context string Conditional Context background (decisions/learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_compact","level":3,"title":"ctx_compact","text":"

            Move completed tasks to the archive section and remove empty sections from context files. Human confirmation required.

            Argument Type Required Description archive boolean No Also write tasks to .context/archive/ (default: false)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_next","level":3,"title":"ctx_next","text":"

            Suggest the next pending task based on priority and position.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_check_task_completion","level":3,"title":"ctx_check_task_completion","text":"

            Advisory check: after a write operation, detect if any pending tasks were silently completed. Returns nudge text if a match is found.

            Argument Type Required Description recent_action string No Brief description of what was just done

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_event","level":3,"title":"ctx_session_event","text":"

            Signal a session lifecycle event. Type end triggers the session-end persistence ceremony - human confirmation required.

            Argument Type Required Description type string Yes Event type: start, end caller string No Caller identifier (cursor, windsurf, vscode, claude-desktop)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_steering_get","level":3,"title":"ctx_steering_get","text":"

            Retrieve applicable steering files for a prompt. Without a prompt, returns always-included files only.

            Argument Type Required Description prompt string No Prompt text to match against steering file descriptions

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_search","level":3,"title":"ctx_search","text":"

            Search across .context/ files for a query string. Returns matching lines with file paths and line numbers.

            Argument Type Required Description query string Yes Search string to match against

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_start","level":3,"title":"ctx_session_start","text":"

            Execute session-start hooks and return aggregated context from hook outputs.

            Arguments: None.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_end","level":3,"title":"ctx_session_end","text":"

            Execute session-end hooks with an optional summary. Returns aggregated context from hook outputs.

            Argument Type Required Description summary string No Session summary passed to hook scripts","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_remind","level":3,"title":"ctx_remind","text":"

            List pending session-scoped reminders.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#prompts","level":2,"title":"Prompts","text":"

            Prompts provide pre-built templates for common workflows. Clients can list available prompts via prompts/list and retrieve a specific prompt via prompts/get.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-session-start","level":3,"title":"ctx-session-start","text":"

            Load full context at the beginning of a session. Returns all context files assembled in priority read order with session orientation instructions.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-decision-add","level":3,"title":"ctx-decision-add","text":"

            Format an architectural decision entry with all required fields.

            Argument Type Required Description content string Yes Decision title context string Yes Background context rationale string Yes Why this decision was made consequence string Yes Expected consequence","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-learning-add","level":3,"title":"ctx-learning-add","text":"

            Format a learning entry with all required fields.

            Argument Type Required Description content string Yes Learning title context string Yes Background context lesson string Yes The lesson learned application string Yes How to apply this lesson","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-reflect","level":3,"title":"ctx-reflect","text":"

            Guide end-of-session reflection. Returns a structured review prompt covering progress assessment and context update recommendations.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-checkpoint","level":3,"title":"ctx-checkpoint","text":"

            Report session statistics: tool calls made, entries added, and pending updates queued during the current session.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/memory/","level":1,"title":"Memory","text":"","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory","level":2,"title":"ctx memory","text":"

            Bridge Claude Code's auto memory (MEMORY.md) into .context/.

            Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This command group discovers that file, mirrors it into .context/memory/mirror.md (git-tracked), and detects drift.

            ctx memory <subcommand>\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-sync","level":3,"title":"ctx memory sync","text":"

            Copy MEMORY.md to .context/memory/mirror.md. Archives the previous mirror before overwriting.

            ctx memory sync [flags]\n

            Flags:

            Flag Description --dry-run Show what would happen without writing

            Exit codes:

            Code Meaning 0 Synced successfully 1 MEMORY.md not found (auto memory inactive)

            Examples:

            ctx memory sync\n# Archived previous mirror to mirror-2026-03-05-143022.md\n# Synced MEMORY.md -> .context/memory/mirror.md\n#   Source: ~/.claude/projects/-home-user-project/memory/MEMORY.md\n#   Lines: 47 (was 32)\n#   New content: 15 lines since last sync\n\nctx memory sync --dry-run\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-status","level":3,"title":"ctx memory status","text":"

            Show drift, timestamps, line counts, and archive count.

            ctx memory status\n

            Exit codes:

            Code Meaning 0 No drift 1 MEMORY.md not found 2 Drift detected (MEMORY.md changed since sync)

            Examples:

            ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines (modified since last sync)\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-diff","level":3,"title":"ctx memory diff","text":"

            Show what changed in MEMORY.md since last sync.

            ctx memory diff\n

            Examples:

            ctx memory diff\n# --- .context/memory/mirror.md (mirror)\n# +++ ~/.claude/projects/.../memory/MEMORY.md (source)\n# +- new learning: memory bridge works\n

            No output when files are identical.

            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-publish","level":3,"title":"ctx memory publish","text":"

            Push curated .context/ content into MEMORY.md so the agent sees it natively.

            ctx memory publish [flags]\n

            Content is selected in priority order: pending tasks, recent decisions (7 days), key conventions, recent learnings (7 days). Wrapped in <!-- ctx:published --> markers. Claude-owned content outside the markers is preserved.

            Flags:

            Flag Description Default --budget Line budget for published content 80 --dry-run Show what would be published

            Examples:

            ctx memory publish --dry-run\n# Publishing .context/ -> MEMORY.md...\n#   Budget: 80 lines\n#   Published block:\n#     5 pending tasks (from TASKS.md)\n#     3 recent decisions (from DECISIONS.md)\n#     5 key conventions (from CONVENTIONS.md)\n#   Total: 42 lines (within 80-line budget)\n# Dry run - no files written.\n\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter budget\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-unpublish","level":3,"title":"ctx memory unpublish","text":"

            Remove the ctx-managed marker block from MEMORY.md, preserving Claude-owned content.

            Examples:

            ctx memory unpublish\n

            Hook integration: The check-memory-drift hook runs on every prompt and nudges the agent when MEMORY.md has changed since last sync. The nudge fires once per session. See Memory Bridge.

            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-import","level":3,"title":"ctx memory import","text":"

            Classify and promote entries from MEMORY.md into structured .context/ files.

            ctx memory import [flags]\n

            Each entry is classified by keyword heuristics:

            Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

            Deduplication prevents re-importing the same entry across runs.

            Flags:

            Flag Description --dry-run Show classification plan without writing

            Examples:

            ctx memory import --dry-run\n# Scanning MEMORY.md for new entries...\n#   Found 6 entries\n#\n#   -> \"always use ctx from PATH\"\n#      Classified: CONVENTIONS.md (keywords: always use)\n#\n#   -> \"decided to use heuristic classification over LLM-based\"\n#      Classified: DECISIONS.md (keywords: decided)\n#\n# Dry run - would import: 4 entries\n# Skipped: 2 entries (session notes/unclassified)\n\nctx memory import    # Actually write entries to .context/ files\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/message/","level":1,"title":"Message","text":"","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message","level":3,"title":"ctx hook message","text":"

            Manage hook message templates.

            Hook messages control the text hooks emit. The hook logic (when to fire, counting, state tracking) is universal; the messages are opinions that can be customized per-project.

            ctx hook message <subcommand>\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-list","level":3,"title":"ctx hook message list","text":"

            Show all hook messages with category and override status.

            ctx hook message list [--json]\n

            Flags:

            Flag Description --json Output in JSON format

            Example:

            ctx hook message list\nctx hook message list --json | jq '.[] | select(.override)'\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-show","level":3,"title":"ctx hook message show","text":"

            Print the effective message template for a hook/variant pair. Shows the user override if present, otherwise the embedded default.

            ctx hook message show <hook> <variant>\n

            Example:

            ctx hook message show qa-reminder gate\nctx hook message show check-context-size checkpoint\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-edit","level":3,"title":"ctx hook message edit","text":"

            Copy the embedded default template for <hook> <variant> to .context/hooks/messages/<hook>/<variant>.txt so you can edit it directly. The override takes effect the next time the hook fires.

            ctx hook message edit <hook> <variant>\n

            If an override already exists, the command fails and directs you to edit it in place or reset it first.

            Example:

            ctx hook message edit qa-reminder gate\n# Edit .context/hooks/messages/qa-reminder/gate.txt in your editor\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-reset","level":3,"title":"ctx hook message reset","text":"

            Delete a user override and revert to the embedded default. Silent no-op if no override exists.

            ctx hook message reset <hook> <variant>\n

            Example:

            ctx hook message reset qa-reminder gate\n

            See Customizing hook messages for the full workflow.

            ","path":["Message"],"tags":[]},{"location":"cli/notify/","level":1,"title":"Notify","text":"","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify","level":2,"title":"ctx hook notify","text":"

            Send fire-and-forget webhook notifications from skills, loops, and hooks.

            ctx hook notify --event <name> [--session-id <id>] \"message\"\n

            Flags:

            Flag Short Description --event -e Event name (required) --session-id -s Session ID (optional)

            Behavior:

            • No webhook configured: silent no-op (exit 0)
            • Webhook set but event not in events list: silent no-op (exit 0)
            • Webhook set and event matches: fire-and-forget HTTP POST
            • HTTP errors silently ignored (no retry)

            Examples:

            ctx hook notify --event loop \"Loop completed after 5 iterations\"\nctx hook notify -e nudge -s session-abc \"Context checkpoint at prompt #20\"\n
            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-setup","level":3,"title":"ctx hook notify setup","text":"

            Configure the webhook URL interactively. The URL is encrypted with AES-256-GCM using the encryption key and stored in .context/.notify.enc.

            Examples:

            ctx hook notify setup\n

            The encrypted file is safe to commit. The key (~/.ctx/.ctx.key) lives outside the project and is never committed.

            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-test","level":3,"title":"ctx hook notify test","text":"

            Send a test notification and report the HTTP response status.

            Examples:

            ctx hook notify test\n

            Payload format (JSON POST):

            {\n  \"event\": \"loop\",\n  \"message\": \"Loop completed after 5 iterations\",\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n
            Field Type Description event string Event name from --event flag message string Notification message session_id string Session ID (omitted if empty) timestamp string UTC RFC3339 timestamp project string Project directory name

            See also: Webhook Notifications recipe.

            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/pad/","level":1,"title":"Scratchpad","text":"","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad","level":2,"title":"ctx pad","text":"

            Encrypted scratchpad for sensitive one-liners that travel with the project.

            When invoked without a subcommand, lists all entries.

            ctx pad\nctx pad <subcommand>\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-add","level":3,"title":"ctx pad add","text":"

            Append a new entry to the scratchpad.

            ctx pad add <text>\nctx pad add <label> --file <path>\n

            Flags:

            Flag Short Description --file -f Ingest a file as a blob entry (max 64 KB)

            Examples:

            ctx pad add \"DATABASE_URL=postgres://user:pass@host/db\"\nctx pad add \"deploy config\" --file ./deploy.yaml\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-show","level":3,"title":"ctx pad show","text":"

            Output the raw text of an entry by number. For blob entries, prints decoded file content (or writes to disk with --out).

            ctx pad show <n>\nctx pad show <n> --out <path>\n

            Arguments:

            • n: 1-based entry number

            Flags:

            Flag Description --out Write decoded blob content to a file (blobs only)

            Examples:

            ctx pad show 3\nctx pad show 2 --out ./recovered.yaml\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-rm","level":3,"title":"ctx pad rm","text":"

            Remove one or more entries by stable ID. Supports individual IDs and ranges.

            ctx pad rm <id> [id...]\n

            Arguments:

            • id: One or more entry IDs (e.g., 3, 1 4, 3-5)

            Examples:

            ctx pad rm 2\nctx pad rm 1 4\nctx pad rm 3-5\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-normalize","level":3,"title":"ctx pad normalize","text":"

            Reassign entry IDs as a contiguous sequence 1..N, closing any gaps left by deletions.

            Examples:

            ctx pad normalize\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-edit","level":3,"title":"ctx pad edit","text":"

            Replace, append to, or prepend to an entry.

            ctx pad edit <n> [text]\n

            Arguments:

            • n: 1-based entry number
            • text: Replacement text (mutually exclusive with --append/--prepend)

            Flags:

            Flag Description --append Append text to the end of the entry --prepend Prepend text to the beginning of entry --file Replace blob file content (preserves label) --label Replace blob label (preserves content)

            Examples:

            ctx pad edit 2 \"new text\"\nctx pad edit 2 --append \" suffix\"\nctx pad edit 2 --prepend \"prefix \"\nctx pad edit 1 --file ./v2.yaml\nctx pad edit 1 --label \"new name\"\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-mv","level":3,"title":"ctx pad mv","text":"

            Move an entry from one position to another.

            ctx pad mv <from> <to>\n

            Arguments:

            • from: Source position (1-based)
            • to: Destination position (1-based)

            Examples:

            ctx pad mv 3 1      # promote entry 3 to the top\nctx pad mv 1 5      # bury entry 1 to position 5\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-resolve","level":3,"title":"ctx pad resolve","text":"

            Show both sides of a merge conflict in the encrypted scratchpad.

            Examples:

            ctx pad resolve\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-import","level":3,"title":"ctx pad import","text":"

            Bulk-import lines from a file into the scratchpad. Each non-empty line becomes a separate entry. All entries are written in a single encrypt/write cycle.

            With --blob, import all first-level files from a directory as blob entries. Each file becomes a blob with the filename as its label. Subdirectories and non-regular files are skipped.

            ctx pad import <file>\nctx pad import -              # read from stdin\nctx pad import --blob <dir>   # import directory files as blobs\n

            Arguments:

            • file: Path to a text file, - for stdin, or a directory (with --blob)

            Flags:

            Flag Description --blob Import first-level files from a directory as blobs

            Examples:

            ctx pad import notes.txt\ngrep TODO *.go | ctx pad import -\nctx pad import --blob ./ideas/\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-export","level":3,"title":"ctx pad export","text":"

            Export all blob entries from the scratchpad to a directory as files. Each blob's label becomes the filename. Non-blob entries are skipped.

            ctx pad export [dir]\n

            Arguments:

            • dir: Target directory (default: current directory)

            Flags:

            Flag Short Description --force -f Overwrite existing files instead of timestamping --dry-run Print what would be exported without writing

            When a file already exists, a unix timestamp is prepended to avoid collisions (e.g., 1739836200-label). Use --force to overwrite instead.

            Examples:

            ctx pad export ./ideas\nctx pad export --dry-run\nctx pad export --force ./backup\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-merge","level":3,"title":"ctx pad merge","text":"

            Merge entries from one or more scratchpad files into the current pad. Each input file is auto-detected as encrypted or plaintext. Entries are deduplicated by exact content.

            ctx pad merge FILE...\n

            Arguments:

            • FILE...: One or more scratchpad files to merge (encrypted or plaintext)

            Flags:

            Flag Short Description --key -k Path to key file for decrypting input files --dry-run Print what would be merged without writing

            Examples:

            ctx pad merge worktree/.context/scratchpad.enc\nctx pad merge notes.md backup.enc\nctx pad merge --key /path/to/other.key foreign.enc\nctx pad merge --dry-run pad-a.enc pad-b.md\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pause/","level":1,"title":"Pause","text":"","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/pause/#ctx-hook-pause","level":2,"title":"ctx hook pause","text":"

            Pause all context nudge and reminder hooks for the current session. Security hooks (dangerous command blocking) and housekeeping hooks still fire.

            ctx hook pause [flags]\n

            Flags:

            Flag Description --session-id Session ID (overrides stdin)

            Example:

            # Pause hooks for a quick investigation\nctx hook pause\n\n# Resume when ready\nctx hook resume\n

            See also:

            • ctx hook resume: the matching resume command
            • Pausing Context Hooks recipe
            ","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/prune/","level":1,"title":"Prune","text":"","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/prune/#ctx-prune","level":3,"title":"ctx prune","text":"

            Remove per-session state files from .context/state/ that are older than the specified age. Session state files are identified by UUID suffixes (context-check-<session-id>, heartbeat-<session-id>, and similar). Global files without session IDs (events.jsonl, memory-import.json, and other non-per-session markers) are always preserved.

            ctx prune [flags]\n

            Flags:

            Flag Description --days Prune files older than this many days (default: 7) --dry-run Show what would be pruned without deleting

            Examples:

            ctx prune                 # Prune files older than 7 days\nctx prune --days 3        # Prune files older than 3 days\nctx prune --dry-run       # Preview without deleting\n

            See State maintenance for the recommended cadence and automation recipe.

            ","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/remind/","level":1,"title":"Remind","text":"","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind","level":2,"title":"ctx remind","text":"

            Session-scoped reminders that surface at session start. Reminders are stored verbatim and relayed verbatim: no summarization, no categories.

            When invoked with a text argument and no subcommand, adds a reminder.

            ctx remind \"text\"\nctx remind <subcommand>\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-add","level":3,"title":"ctx remind add","text":"

            Add a reminder. This is the default action: ctx remind \"text\" and ctx remind add \"text\" are equivalent.

            ctx remind \"refactor the swagger definitions\"\nctx remind add \"check CI after the deploy\" --after 2026-02-25\n

            Arguments:

            • text: The reminder message (verbatim)

            Flags:

            Flag Short Description --after -a Don't surface until this date (YYYY-MM-DD)

            Examples:

            ctx remind \"refactor the swagger definitions\"\nctx remind \"check CI after the deploy\" --after 2026-02-25\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-list","level":3,"title":"ctx remind list","text":"

            List all pending reminders. Date-gated reminders that aren't yet due are annotated with (after DATE, not yet due).

            Examples:

            ctx remind list\nctx remind ls            # alias\n

            Aliases: ls

            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-dismiss","level":3,"title":"ctx remind dismiss","text":"

            Remove one or more reminders by ID, or remove all with --all. Supports individual IDs and ranges.

            ctx remind dismiss <id> [id...]\nctx remind dismiss --all\n

            Arguments:

            • id: One or more reminder IDs (e.g., 3, 3 5-7)

            Flags:

            Flag Description --all Dismiss all reminders

            Aliases: rm

            Examples:

            ctx remind dismiss 3\nctx remind dismiss 3 5-7\nctx remind dismiss --all\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-normalize","level":3,"title":"ctx remind normalize","text":"

            Reassign reminder IDs as a contiguous sequence 1..N, closing any gaps left by dismissals.

            Examples:

            ctx remind normalize\n

            See also: Session Reminders recipe.

            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/resume/","level":1,"title":"Resume","text":"","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/resume/#ctx-hook-resume","level":2,"title":"ctx hook resume","text":"

            Resume context hooks after a pause. Silent no-op if not paused.

            ctx hook resume [flags]\n

            Flags:

            Flag Description --session-id Session ID (overrides stdin)

            Example:

            ctx hook resume\n

            See also:

            • ctx hook pause: the matching pause command
            • Pausing Context Hooks recipe
            ","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/serve/","level":1,"title":"Serve","text":"","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#ctx-serve","level":2,"title":"ctx serve","text":"

            Serve a static site locally via zensical.

            With no argument, serves the journal site at .context/journal-site. With a directory argument, serves that directory if it contains a zensical.toml.

            ctx serve                             # Serve .context/journal-site\nctx serve ./my-site                   # Serve a specific directory\nctx serve ./docs                      # Serve any zensical site\n

            This Command Does NOT Start a Hub

            ctx serve is purely for static-site serving. To run a ctx Hub for cross-project knowledge sharing, use ctx hub start. That command lives in its own group because the hub is a gRPC server, not a static site.

            Requires zensical to be installed:

            pipx install zensical\n
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#arguments","level":3,"title":"Arguments","text":"Argument Description [directory] Directory containing a zensical.toml to serve

            When omitted, serves .context/journal-site by default, the directory produced by ctx journal site.

            Examples:

            ctx serve                         # Default: serve .context/journal-site\nctx serve ./my-site               # Serve a specific directory\nctx serve ./docs                  # Serve any zensical site\n
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#see-also","level":3,"title":"See Also","text":"
            • ctx journal: generate the journal site that ctx serve displays.
            • ctx hub start: for running a ctx Hub server, not a static site.
            • Browsing and enriching past sessions: the recipe that combines ctx journal and ctx serve.
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/setup/","level":1,"title":"Setup","text":"","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/setup/#ctx-setup","level":2,"title":"ctx setup","text":"

            Generate AI tool integration configuration.

            ctx setup <tool> [flags]\n

            Flags:

            Flag Short Description --write -w Write the generated config to disk (e.g. .github/copilot-instructions.md)

            Supported tools:

            Tool Description claude-code Redirects to plugin install instructions cursor Cursor IDE kiro Kiro IDE cline Cline (VS Code extension) aider Aider CLI copilot GitHub Copilot windsurf Windsurf IDE

            Claude Code Uses the Plugin System

            Claude Code integration is now provided via the ctx plugin. Running ctx setup claude-code prints plugin install instructions.

            Examples:

            # Print hook instructions to stdout\nctx setup cursor\nctx setup aider\n\n# Generate and write .github/copilot-instructions.md\nctx setup copilot --write\n\n# Generate MCP config and sync steering files\nctx setup kiro --write\nctx setup cursor --write\nctx setup cline --write\n
            ","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/site/","level":1,"title":"Site","text":"","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site","level":2,"title":"ctx site","text":"

            Site management commands for the ctx.ist static site.

            ctx site <subcommand>\n
            ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site-feed","level":3,"title":"ctx site feed","text":"

            Generate an Atom 1.0 feed from finalized blog posts in docs/blog/.

            ctx site feed [flags]\n

            Scans docs/blog/ for files matching YYYY-MM-DD-*.md, parses YAML frontmatter, and generates a valid Atom feed. Only posts with reviewed_and_finalized: true are included. Summaries are extracted from the first paragraph after the heading.

            Flags:

            Flag Short Type Default Description --out -o string site/feed.xml Output path --base-url string https://ctx.ist Base URL for entry links

            Output:

            Generated site/feed.xml (21 entries)\n\nSkipped:\n  2026-02-25-the-homework-problem.md: not finalized\n\nWarnings:\n  2026-02-09-defense-in-depth.md: no summary paragraph found\n

            Three buckets: included (count), skipped (with reason), warnings (included but degraded). exit 0 always: warnings inform but do not block.

            Frontmatter requirements:

            Field Required Feed mapping title Yes <title> date Yes <updated> reviewed_and_finalized Yes Draft gate (must be true) author No <author><name> topics No <category term=\"\">

            Examples:

            ctx site feed                                # Generate site/feed.xml\nctx site feed --out /tmp/feed.xml            # Custom output path\nctx site feed --base-url https://example.com # Custom base URL\nmake site-feed                               # Makefile shortcut\nmake site                                    # Builds site + feed\n
            ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/skill/","level":1,"title":"Skill","text":"","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill","level":2,"title":"ctx skill","text":"

            Manage reusable instruction bundles that can be installed into .context/skills/.

            A skill is a directory containing a SKILL.md file with YAML frontmatter (name, description) and a Markdown instruction body. Skills are loaded by the agent context packet when --skill <name> is passed to ctx agent.

            ctx skill <subcommand>\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-install","level":3,"title":"ctx skill install","text":"

            Install a skill from a source directory.

            ctx skill install <source>\n

            Arguments:

            • source: Path to a directory containing SKILL.md

            Examples:

            ctx skill install ./my-skills/code-review\n# Installed code-review → .context/skills/code-review\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-list","level":3,"title":"ctx skill list","text":"

            List all installed skills.

            Examples:

            ctx skill list\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-remove","level":3,"title":"ctx skill remove","text":"

            Remove an installed skill.

            Arguments:

            • name: Skill name to remove

            Examples:

            ctx skill remove code-review\n

            See also: Building Project Skills recipe.

            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/steering/","level":1,"title":"Steering","text":"","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering","level":2,"title":"ctx steering","text":"

            Manage steering files: persistent behavioral rules for AI coding assistants.

            A steering file is a small markdown document with YAML frontmatter that tells the AI how to behave in a specific context. ctx steering keeps those files in .context/steering/, decides which ones apply for a given prompt, and syncs them out to each AI tool's native format (Claude Code, Cursor, Kiro, Cline).

            ctx steering <subcommand>\n

            Steering vs Decisions vs Conventions

            The three look similar on disk but serve different purposes:

            • Decisions record what was chosen and why. Consumed mostly by humans (and by the agent via ctx agent).
            • Conventions describe how the codebase is written. Consumed as reference material.
            • Steering tells the AI how to behave when asked about X. Consumed by the AI tool's prompt injection layer, conditionally on prompt match.

            If you find yourself writing \"the AI should always do X\", that belongs in steering, not decisions.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#anatomy-of-a-steering-file","level":3,"title":"Anatomy of a Steering File","text":"
            ---\nname: security\ndescription: Security rules for all code changes\ninclusion: always    # always | auto | manual\ntools: []            # empty = all tools\npriority: 10         # lower = injected first\n---\n\n# Security rules\n\n- Validate all user input at system boundaries.\n- Never log secrets, tokens, or credentials.\n- Prefer constant-time comparison for tokens.\n

            Inclusion modes:

            Mode When it's included always Every prompt, unconditionally auto When the prompt matches the description keywords manual Only when the user names it explicitly

            Priority: lower numbers inject first, so high-priority rules appear at the top of the prompt. Default is 50.

            Tools: an empty list means all configured tools receive the file; list specific tool names to scope it.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-init","level":3,"title":"ctx steering init","text":"

            Create a starter set of steering files in .context/steering/ to use as a scaffolding baseline.

            Examples:

            ctx steering init\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-add","level":3,"title":"ctx steering add","text":"

            Create a new steering file with default frontmatter.

            ctx steering add <name>\n

            Arguments:

            • name: Steering file name (without .md extension)

            Examples:

            ctx steering add security\n# Created .context/steering/security.md\n

            The generated file uses inclusion: manual and priority: 50 by default. Edit the frontmatter to change behavior.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-list","level":3,"title":"ctx steering list","text":"

            List all steering files with their inclusion mode, priority, and tool scoping.

            Examples:

            ctx steering list\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-preview","level":3,"title":"ctx steering preview","text":"

            Preview which steering files would be included for a given prompt. Useful for validating auto-inclusion descriptions against realistic prompts.

            ctx steering preview [prompt]\n

            Examples:

            ctx steering preview \"create a REST API endpoint\"\n# Steering files matching prompt \"create a REST API endpoint\":\n#   api-standards        inclusion=auto     priority=20  tools=all\n#   security             inclusion=always   priority=10  tools=all\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-sync","level":3,"title":"ctx steering sync","text":"

            Sync steering files to tool-native formats for tools that have a built-in rules primitive. Not every tool needs this; Claude Code and Codex use a different delivery mechanism (see below).

            Examples:

            ctx steering sync\n

            Which tools are sync targets?

            Tool Sync target Mechanism Cursor .cursor/rules/ Cursor reads the directory natively Cline .clinerules/ Cline reads the directory natively Kiro .kiro/steering/ Kiro reads the directory natively Claude Code (no-op) Delivered via hook + MCP (see next section) Codex (no-op) Same as Claude Code

            For the three native-rules tools, ctx steering sync writes each matching steering file to the appropriate directory with tool-specific frontmatter transforms. Unchanged files are skipped (idempotent).

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#how-claude-code-and-codex-consume-steering","level":3,"title":"How Claude Code and Codex Consume Steering","text":"

            Claude Code has no native \"steering files\" primitive, so ctx steering sync skips it entirely. Instead, steering reaches Claude through two non-sync channels, both activated by ctx setup claude-code (which installs the plugin):

            1. Automatic injection via the PreToolUse hook. The Claude Code plugin wires a PreToolUse hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads .context/steering/ and calls steering.Filter with an empty prompt, so only files with inclusion: always match. Those files are included as Tier 6 of the context packet. The packet is printed on stdout, which Claude Code injects as additional context. This fires on every tool call; no user action.

            2. On-demand MCP tool call (ctx_steering_get). The ctx plugin ships a .mcp.json file that automatically registers the ctx MCP server (ctx mcp serve) with Claude Code on plugin install. Once registered, Claude can invoke the ctx_steering_get tool mid-task to fetch matching steering files for a specific prompt. This is the only path that resolves inclusion: auto and inclusion: manual matches for Claude Code; Claude passes the prompt to the MCP tool, which runs the keyword match against each file's description.

            Verify the MCP server is registered:

            claude mcp list\n

            Expected line: ctx: ctx mcp serve - ✓ Connected. If it's missing, reinstall the plugin from Claude Code (/plugin → find ctx → uninstall → install again); older plugin versions shipped without the .mcp.json file.

            Prefer inclusion: always for Claude Code

            Because the PreToolUse hook passes an empty prompt to ctx agent, only always files fire automatically. auto files require Claude to call the ctx_steering_get MCP tool on its own; manual files require an explicit user invocation. For rules that should reliably fire on every Claude Code session, use inclusion: always. Reserve auto/manual for situational libraries where the opt-in cost is acceptable and you understand Claude may not pull them in without prompting.

            The foundation files scaffolded by ctx init already default to inclusion: always for this reason.

            Practical implications:

            • Running ctx steering sync before starting a Claude session does nothing for Claude's benefit. Skip it.
            • ctx steering preview still works for validating your descriptions; it doesn't depend on sync.
            • If Claude Code is your only tool, the ctx steering commands you care about are add, list, preview, init (never sync).
            • If you use both Claude Code and (say) Cursor, ctx steering sync covers Cursor (where auto and manual work natively) while the hook+MCP pipeline covers Claude Code. For rules you need to fire automatically on both, use inclusion: always.
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-agent-integration","level":3,"title":"ctx agent Integration","text":"

            When ctx agent builds a context packet, steering files are loaded as Tier 6 of the budget-aware assembly (see ctx agent). Files with inclusion: always are always included; auto files are scored against the current prompt and included in priority order until the tier budget is exhausted.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#see-also","level":3,"title":"See Also","text":"
            • ctx setup: configure which tools receive steering syncs
            • ctx trigger: lifecycle scripts (a different hooking concept, see below)
            • Building steering files recipe: walkthrough from first file to synced output
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/sysinfo/","level":1,"title":"Sysinfo","text":"","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/sysinfo/#ctx-sysinfo","level":3,"title":"ctx sysinfo","text":"

            Display a snapshot of system resources (memory, swap, disk, load) with threshold-based alert severities. Mirrors what the check-resource hook plumbing monitors in the background, but this command prints the full report at any severity level, not only at DANGER.

            ctx sysinfo [flags]\n

            Flags:

            Flag Description --json Output in JSON format

            Alert thresholds:

            Resource WARNING DANGER Memory ≥ 75% ≥ 90% Swap ≥ 50% ≥ 75% Disk ≥ 85% ≥ 95% Load ≥ 1.0x CPUs ≥ 1.5x CPUs

            Examples:

            ctx sysinfo                  # Human-readable table\nctx sysinfo --json           # Structured output\n
            ","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/system/","level":1,"title":"System","text":"","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system","level":3,"title":"ctx system","text":"

            Hidden parent command that hosts Claude Code hook plumbing and a small set of session-lifecycle plumbing subcommands used by skills and editor integrations. The parent is registered without a visible group in ctx --help; run ctx system --help to see its subcommands.

            ctx system <subcommand>\n

            Commands Previously under ctx system

            Several user-facing maintenance commands used to live under ctx system and were promoted to top-level:

            • ctx system backupctx backup
            • ctx system eventsctx hook event
            • ctx system messagectx hook message
            • ctx system prunectx prune
            • ctx system resourcesctx sysinfo
            • ctx system statsctx usage

            ctx system bootstrap remains under ctx system as a hidden, agent-only command. Update any scripts or personal docs that reference the old paths.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#plumbing-subcommands","level":2,"title":"Plumbing Subcommands","text":"

            These are not hook handlers; they're called by skills and editor integrations during the session lifecycle. Safe to run manually.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-journal","level":4,"title":"ctx system mark-journal","text":"

            Update processing state for a journal entry. Records the current date in .context/journal/.state.json. Used by journal skills to record pipeline progress.

            ctx system mark-journal <filename> <stage>\n

            Stages: exported, enriched, normalized, fences_verified

            Flag Description --check Check if stage is set (exit 1 if not)

            Example:

            ctx system mark-journal 2026-01-21-session-abc12345.md enriched\nctx system mark-journal 2026-01-21-session-abc12345.md normalized\nctx system mark-journal --check 2026-01-21-session-abc12345.md fences_verified\n
            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-wrapped-up","level":4,"title":"ctx system mark-wrapped-up","text":"

            Suppress context checkpoint nudges after a wrap-up ceremony. Writes a marker file that check-context-size checks before emitting checkpoint boxes. The marker expires after 2 hours.

            Called automatically by /ctx-wrap-up after persisting context (not intended for direct use).

            ctx system mark-wrapped-up\n

            No flags, no arguments. Idempotent: running it again updates the marker timestamp.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-pause-ctx-system-resume","level":4,"title":"ctx system pause / ctx system resume","text":"

            Session-scoped hook suppression. ctx system pause writes a marker file that causes hook plumbing to no-op for the current session; ctx system resume removes it. These are the hook-plumbing counterparts to the ctx hook pause / ctx hook resume commands (which call them internally).

            Read the session ID from stdin JSON (same as hooks) or pass --session-id.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-session-event","level":4,"title":"ctx system session-event","text":"

            Records a session lifecycle event (start or end) to the event log. Called by editor integrations when a workspace is opened or closed.

            ctx system session-event --type start --caller vscode\nctx system session-event --type end --caller vscode\n
            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#hook-subcommands","level":2,"title":"Hook Subcommands","text":"

            Hidden Claude Code hook handlers implementing the hook contract: read JSON from stdin, perform logic, emit output on stdout, exit 0. Block commands output JSON with a decision field.

            UserPromptSubmit hooks: context-load-gate, check-context-size, check-persistence, check-ceremony, check-journal, check-version, check-resource, check-knowledge, check-map-staleness, check-memory-drift, check-reminder, check-freshness, check-hub-sync, check-backup-age, check-skill-discovery, heartbeat.

            PreToolUse hooks: block-non-path-ctx, block-dangerous-command, qa-reminder, specs-nudge.

            PostToolUse hooks: post-commit, check-task-completion.

            See AI Tools for registration details and the Claude Code plugin integration.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/trace/","level":1,"title":"Commit Context Tracing","text":"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace","level":3,"title":"ctx trace","text":"

            Show the context behind git commits. Links commits back to the decisions, tasks, learnings, and sessions that motivated them.

            git log shows what changed, git blame shows who, and ctx trace shows why.

            ctx trace [commit] [flags]\n

            Flags:

            Flag Description --last N Show context for last N commits --json Output as JSON for scripting

            Examples:

            # Show context for a specific commit\nctx trace abc123\n\n# Show context for last 10 commits\nctx trace --last 10\n\n# JSON output\nctx trace abc123 --json\n

            Output:

            Commit: abc123 \"Fix auth token expiry\"\nDate:   2026-03-14 10:00:00 -0700\nContext:\n  [Decision] #12: Use short-lived tokens with server-side refresh\n    Date: 2026-03-10\n\n  [Task] #8: Implement token rotation for compliance\n    Status: completed\n

            When listing recent commits with --last:

            abc123  Fix auth token expiry         decision:12, task:8\ndef456  Add rate limiting             decision:15, learning:7\n789abc  Update dependencies           (none)\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-file","level":3,"title":"ctx trace file","text":"

            Show the context trail for a file. Combines git log with context resolution.

            ctx trace file <path[:line-range]> [flags]\n

            Flags:

            Flag Description --last N Maximum commits to show (default: 20)

            Examples:

            # Show context trail for a file\nctx trace file src/auth.go\n\n# Show context for specific line range\nctx trace file src/auth.go:42-60\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-tag","level":3,"title":"ctx trace tag","text":"

            Manually tag a commit with context. For commits made without the hook, or to add extra context after the fact.

            Tags are stored in .context/trace/overrides.jsonl since git trailers cannot be added to existing commits without rewriting history.

            ctx trace tag <commit> --note \"<text>\"\n

            Examples:

            ctx trace tag HEAD --note \"Hotfix for production outage\"\nctx trace tag abc123 --note \"Part of Q1 compliance initiative\"\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-hook","level":3,"title":"ctx trace hook","text":"

            Enable or disable the prepare-commit-msg hook for automatic context tracing. When enabled, commits automatically receive a ctx-context trailer with references to relevant decisions, tasks, learnings, and sessions.

            ctx trace hook <enable|disable>\n

            Prerequisites: ctx must be on your $PATH. If you installed via go install, ensure $GOPATH/bin (or $HOME/go/bin) is in your shell's $PATH.

            What the hook does:

            1. Before each commit, collects context from three sources:
            2. Pending context accumulated during work (ctx add, ctx task complete)
            3. Staged file changes to .context/ files
            4. Working state (in-progress tasks, active AI session)
            5. Injects a ctx-context trailer into the commit message
            6. After commit, records the mapping in .context/trace/history.jsonl

            Examples:

            # Install the hook\nctx trace hook enable\n\n# Remove the hook\nctx trace hook disable\n

            Resulting commit message:

            Fix auth token expiry handling\n\nRefactored token refresh logic to handle edge case\nwhere refresh token expires during request.\n\nctx-context: decision:12, task:8, session:abc123\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#reference-types","level":3,"title":"Reference Types","text":"

            The ctx-context trailer supports these reference types:

            Prefix Points to Example decision:<n> Entry #n in DECISIONS.md decision:12 learning:<n> Entry #n in LEARNINGS.md learning:5 task:<n> Task #n in TASKS.md task:8 convention:<n> Entry #n in CONVENTIONS.md convention:3 session:<id> AI session by ID session:abc123 \"<text>\" Free-form context note \"Performance fix for P1 incident\"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#storage","level":3,"title":"Storage","text":"

            Context trace data is stored in the .context/ directory:

            File Purpose Lifecycle state/pending-context.jsonl Accumulates refs during work Truncated after each commit trace/history.jsonl Permanent commit-to-context map Append-only, never truncated trace/overrides.jsonl Manual tags for existing commits Append-only","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trigger/","level":1,"title":"Trigger","text":"","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger","level":2,"title":"ctx trigger","text":"

            Manage lifecycle triggers: executable scripts that fire at specific events during an AI session. Triggers can block tool calls, inject context, and automate reactions: any side effect you want at session boundaries, tool boundaries, or file-save events.

            ctx trigger <subcommand>\n

            Triggers Execute Arbitrary Scripts

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks: only enable scripts you've read and understand. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#where-triggers-live","level":3,"title":"Where Triggers Live","text":"

            Triggers live in .context/hooks/<trigger-type>/ as executable scripts. The on-disk directory name is still hooks/ for historical reasons even though the command is ctx trigger. Each script:

            • Reads a JSON payload from stdin.
            • Returns a JSON payload on stdout.
            • Returns a non-zero exit code to block or error.
            .context/\n└── hooks/\n    ├── session-start/\n    │   └── inject-context.sh\n    ├── pre-tool-use/\n    │   └── block-legacy.sh\n    └── post-tool-use/\n        └── record-edit.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#trigger-types","level":3,"title":"Trigger Types","text":"Type Fires when session-start An AI session begins session-end An AI session ends pre-tool-use Before an AI tool call is executed post-tool-use After an AI tool call returns file-save When a file is saved context-add When a context entry is added","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#input-and-output-contract","level":3,"title":"Input and Output Contract","text":"

            Each trigger receives a JSON object on stdin with the event details. Minimal contract (fields vary by trigger type):

            {\n  \"type\": \"pre-tool-use\",\n  \"tool\": \"write_file\",\n  \"path\": \"src/auth.go\",\n  \"session_id\": \"abc123-...\"\n}\n

            The trigger may write a JSON object to stdout to influence behavior. Example for a blocking pre-tool-use trigger:

            {\n  \"action\": \"block\",\n  \"message\": \"Editing src/auth.go requires approval from #security\"\n}\n

            For non-blocking event loggers, simply read stdin and exit 0 without writing to stdout.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-add","level":3,"title":"ctx trigger add","text":"

            Create a new trigger script with a template. The generated file has a bash shebang, a stdin reader using jq, and a basic JSON output structure.

            ctx trigger add <trigger-type> <name>\n

            Arguments:

            • trigger-type: One of session-start, session-end, pre-tool-use, post-tool-use, file-save, context-add
            • name: Script name (without .sh extension)

            Examples:

            ctx trigger add session-start inject-context\n# Created .context/hooks/session-start/inject-context.sh\n\nctx trigger add pre-tool-use block-legacy\n# Created .context/hooks/pre-tool-use/block-legacy.sh\n

            The generated script is not executable by default. Enable it with ctx trigger enable after reviewing the contents.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-list","level":3,"title":"ctx trigger list","text":"

            List all discovered triggers, grouped by trigger type, with their enabled/disabled status.

            Examples:

            ctx trigger list\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-test","level":3,"title":"ctx trigger test","text":"

            Run all enabled triggers of a given type against a mock payload. Use --tool and --path to customize the mock input for tool-related events.

            ctx trigger test <trigger-type> [flags]\n

            Flags:

            Flag Description --tool Tool name to put in mock input --path File path to put in mock input

            Examples:

            ctx trigger test session-start\nctx trigger test pre-tool-use --tool write_file --path src/main.go\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-enable","level":3,"title":"ctx trigger enable","text":"

            Enable a trigger by setting its executable permission bit. Searches every trigger-type directory for a script matching <name>.

            ctx trigger enable <name>\n

            Examples:

            ctx trigger enable inject-context\n# Enabled .context/hooks/session-start/inject-context.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-disable","level":3,"title":"ctx trigger disable","text":"

            Disable a trigger by clearing its executable permission bit. Searches every trigger-type directory for a script matching <name>.

            ctx trigger disable <name>\n

            Examples:

            ctx trigger disable inject-context\n# Disabled .context/hooks/session-start/inject-context.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#three-hooking-concepts-in-ctx-dont-confuse-them","level":3,"title":"Three Hooking Concepts in ctx (Don't Confuse Them)","text":"

            This is a common source of confusion. ctx has three distinct hook-like layers, and they serve different purposes:

            Layer Owned by Where it runs Configured via ctx trigger You .context/hooks/<type>/*.sh ctx trigger add/enable ctx system hooks ctx itself built-in, called by ctx's own lifecycle internal (see ctx system --help) Claude Code hooks Claude Code .claude/settings.local.json edit JSON, or /ctx-sanitize-permissions

            Use ctx trigger when you want project-specific automation that your AI tool will run at lifecycle events. Use Claude Code hooks for tool-specific integrations that don't need to be portable across tools. ctx system hooks are not something you author; they're the internal nudge machinery that ships with ctx.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#see-also","level":3,"title":"See Also","text":"
            • ctx steering: persistent AI behavioral rules (a different concept; rules vs scripts)
            • Authoring triggers recipe: a full walkthrough with security guidance
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/usage/","level":1,"title":"Usage","text":"","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/usage/#ctx-usage","level":3,"title":"ctx usage","text":"

            Display per-session token usage statistics from the local stats JSONL files written by the heartbeat hook. By default, shows the last 20 entries across all sessions. Use --follow to stream new entries as they arrive (like tail -f).

            ctx usage [flags]\n

            Flags:

            Flag Description -f, --follow Stream new entries as they arrive -s, --session Filter by session ID (prefix match) -n, --last Show last N entries (default: 20) -j, --json Output raw JSONL

            Examples:

            ctx usage                     # Last 20 entries across all sessions\nctx usage --follow            # Live stream (like tail -f)\nctx usage --session abc123    # Filter to one session\nctx usage --last 100 --json   # Last 100 as raw JSONL\n
            ","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/watch/","level":1,"title":"Watch","text":"","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/watch/#ctx-watch","level":2,"title":"ctx watch","text":"

            Watch for AI output and auto-apply context updates.

            Parses <context-update> XML commands from AI output and applies them to context files.

            ctx watch [flags]\n

            Flags:

            Flag Description --log <file> Log file to watch (default: stdin) --dry-run Preview updates without applying

            Examples:

            # Watch stdin\nai-tool | ctx watch\n\n# Watch a log file\nctx watch --log /path/to/ai-output.log\n\n# Preview without applying\nctx watch --dry-run\n
            ","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/why/","level":1,"title":"Why","text":"","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"cli/why/#ctx-why","level":2,"title":"ctx why","text":"

            Read ctx's philosophy documents directly in the terminal.

            ctx why [DOCUMENT]\n

            Documents:

            Name Description manifesto The ctx Manifesto: creation, not code about About ctx: what it is and why it exists invariants Design invariants: properties that must hold

            Examples:

            # Interactive numbered menu\nctx why\n\n# Show a specific document\nctx why manifesto\nctx why about\nctx why invariants\n\n# Pipe to a pager\nctx why manifesto | less\n
            ","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"home/","level":1,"title":"Home","text":"
            • ctx is not a prompt.
            • ctx is version-controlled cognitive state.

            ctx is the persistence layer for human-AI reasoning.

            Deterministic. Git-native. Human-readable. Local-first.

            Start here.

            Learn what ctx does, set it up, and run your first session.

            Pre-1.0: Moving Fast

            ctx is under active development. This website tracks the development branch, not the latest release:

            Some features described here may not exist in the binary you have installed.

            Expect rough edges.

            If something is missing or broken, open an issue.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#introduction","level":2,"title":"Introduction","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#about","level":3,"title":"About","text":"

            What ctx is, how it works, and why persistent context changes how you work with AI.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#is-it-right-for-me","level":3,"title":"Is It Right for Me?","text":"

            Good fit, not-so-good fit, and a 5-minute trial to find out for yourself.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#faq","level":3,"title":"FAQ","text":"

            Quick answers to the questions newcomers ask most about ctx, files, tooling, and trade-offs.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#get-started","level":2,"title":"Get Started","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#getting-started","level":3,"title":"Getting Started","text":"

            Install the binary, set up the plugin, and verify it works.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#your-first-session","level":3,"title":"Your First Session","text":"

            Step-by-step walkthrough from ctx init to verified recall.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#common-workflows","level":3,"title":"Common Workflows","text":"

            Day-to-day commands for tracking context, checking health, and browsing history.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#concepts","level":2,"title":"Concepts","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#context-files","level":3,"title":"Context Files","text":"

            What each .context/ file does. What's their purpose. How do we best leverage them.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#configuration","level":3,"title":"Configuration","text":"

            Flexible configuration: .ctxrc, environment variables, and CLI flags.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#hub","level":3,"title":"Hub","text":"

            A fan-out channel for decisions, learnings, conventions, and tasks that need to cross project boundaries, without replicating everything else.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#working-with-ai","level":2,"title":"Working with AI","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#prompting-guide","level":3,"title":"Prompting Guide","text":"

            Effective prompts for AI sessions with ctx.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#keeping-ai-honest","level":3,"title":"Keeping AI Honest","text":"

            AI agents confabulate: they invent history, claim familiarity with decisions never made, and sometimes declare tasks complete when they aren't. Tools and habits to push back.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#my-ai-keeps-making-the-same-mistakes","level":3,"title":"My AI Keeps Making the Same Mistakes","text":"

            Stop rediscovering the same bugs and dead-ends across sessions.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#joining-a-project","level":3,"title":"Joining a Project","text":"

            You inherited a .context/ directory. Get oriented fast: priority order, what to read first, how to ramp up.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#customization","level":2,"title":"Customization","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#steering-files","level":3,"title":"Steering Files","text":"

            Tell the assistant how to behave when a specific kind of prompt arrives.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#lifecycle-triggers","level":3,"title":"Lifecycle Triggers","text":"

            Make things happen at session boundaries: block dangerous tool calls, inject standup notes, log file saves.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#community","level":2,"title":"Community","text":"","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#ctx","level":3,"title":"#ctx","text":"

            We are the builders who care about durable context. Join the community. Hang out in IRC. Star ctx on GitHub.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/#contributing","level":3,"title":"Contributing","text":"

            Development setup, project layout, and pull request process.

            ","path":["Home","Introduction","Home"],"tags":[]},{"location":"home/about/","level":1,"title":"About","text":"

            \"Creation, not code; Context, not prompts; Verification, not vibes.\"

            Read the ctx Manifesto →

            \"Without durable context, intelligence resets; with ctx, creation compounds.\"

            Without persistent memory, every session starts at zero; ctx makes sessions cumulative.

            Join the ctx Community →

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#what-is-ctx","level":2,"title":"What Is ctx?","text":"

            ctx (Context) is a file-based system that enables AI coding assistants to persist project knowledge across sessions. It lives in a .context/ directory in your repo.

            • A session is interactive.
            • ctx enables cognitive continuity.
            • Cognitive continuity enables durable, symbiotic-like human-AI workflows.

            Context Files

            Context files let AI tools remember decisions, conventions, and learnings:

            Context files are explicit and versionable contracts between you and your agents.

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#why-do-i-keep-re-explaining-my-codebase","level":2,"title":"Why Do I Keep Re-Explaining My Codebase?!?!","text":"

            You open a new AI session. The first thing you do is re-explain your project.

            Again.

            The architecture, the database choice, the naming conventions, the thing you tried last week that didn't work. You have said all of this before (maybe yesterday, maybe an hour ago), but the AI doesn't know that.

            • You explain the same architecture every session;
            • The AI suggests an approach you already rejected, again;
            • A decision you made three sessions ago gets relitigated from scratch;
            • You spend more time setting context than building features.

            This isn't AI failure: It is staleness.

            Without persistent memory, every session starts at zero.

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#day-and-night","level":3,"title":"Day and Night","text":"

            ctx gives your AI a memory that persists across sessions:

            Without ctxWith ctx
            Session 12: Monday morning\n\nAI:  \"I'll scaffold a MySQL schema for this.\"\nYou: \"We use PostgreSQL.\"\n\nAI:  \"For auth, session-based would be simplest.\"\nYou: \"We decided on JWT.\"\n\nAI:  \"Endpoints in camelCase?\"\nYou: \"snake_case! Like the rest of the API!!!!!!\"\n\nYou: *scrolls up to copy an existing endpoint*\n\n20 minutes spent restoring decisions that already exist.\n
            Session 12: Monday morning\n\nYou:  \"Let's continue the auth implementation.\"\nAI:   \"Per DECISIONS.md (2026-02-12): JWT, not sessions: \n       The project uses PostgreSQL with JWT\n       authentication. The API follows snake_case conventions.\n       Last session you completed the login endpoint: the\n       registration endpoint is next on TASKS.md. Want me to\n       start there?\"\n\n0 minutes re-explaining. Straight to building.\n
            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#how-ctx-solves-this","level":3,"title":"How ctx Solves This","text":"

            ctx creates a .context/ directory in your project that stores structured knowledge files:

            File What It Remembers TASKS.md What you're working on and what's next DECISIONS.md Architectural choices and why you made them LEARNINGS.md Gotchas, bugs, things that didn't work CONVENTIONS.md Naming patterns, code style, project rules CONSTITUTION.md Hard rules the AI must never violate

            These files can version with your code in git:

            • They load automatically at the session start (via hooks in Claude Code, or manually with ctx agent for other tools).
            • The AI reads them, cites them, and builds on them, instead of asking you to start over.
              • And when it acts, it can point to the exact file and line that justifies the choice.

            Every decision you record, every lesson you capture, makes the next session smarter.

            ctx accumulates.

            Connect with ctx

            • Join the Community →: ask questions, share workflows, and help shape what comes next
            • Read the Blog →: real-world patterns, ponderings, and lessons learned from building ctx using ctx

            Ready to Get Started?

            • Getting Started →: full installation and setup
            • Your First Session →: step-by-step walkthrough from ctx init to verified recall
            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/common-workflows/","level":1,"title":"Common Workflows","text":"

            The commands below cover what you'll use most often:

            • recording context,
            • checking health,
            • browsing history,
            • and running loops.

            Each section is a self-contained snippet you can copy into your terminal.

            For deeper, step-by-step guides, see Recipes.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#track-context","level":2,"title":"Track Context","text":"

            Prefer Skills over Raw Commands

            When working with an AI agent, use /ctx-task-add, /ctx-decision-add, or /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (full ADR fields required)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning\nctx add learning \"Mock functions must be hoisted in Jest\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Jest hoists mock calls to top of file\" \\\n  --application \"Place jest.mock() before imports\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Mark task complete\nctx task complete \"user auth\"\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#leave-a-reminder-for-next-session","level":2,"title":"Leave a Reminder for Next Session","text":"

            Drop a note that surfaces automatically at the start of your next session:

            # Leave a reminder\nctx remind \"refactor the swagger definitions\"\n\n# Date-gated: don't surface until a specific date\nctx remind \"check CI after the deploy\" --after 2026-02-25\n\n# List pending reminders\nctx remind list\n\n# Dismiss reminders by ID (supports ranges)\nctx remind dismiss 1\nctx remind dismiss 3 5-7\n

            Reminders are relayed verbatim at session start by the check-reminders hook and repeat every session until you dismiss them.

            See Session Reminders for the full recipe.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#check-context-health","level":2,"title":"Check Context Health","text":"
            # Detect stale paths, missing files, potential secrets\nctx drift\n\n# See full context summary\nctx status\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#browse-session-history","level":2,"title":"Browse Session History","text":"

            List and search past AI sessions from the terminal:

            ctx journal source --limit 5\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#journal-site","level":3,"title":"Journal Site","text":"

            Import session transcripts to a browsable static site with search, navigation, and topic indices.

            The ctx journal command requires zensical (Python >= 3.10).

            zensical is a Python-based static site generator from the Material for MkDocs team.

            (why zensical?).

            If you don't have it on your system, install zensical once with pipx:

            # One-time setup\npipx install zensical\n

            Avoid pip install zensical

            pip install often fails: For example, on macOS, system Python installs a non-functional stub (zensical requires Python >= 3.10), and Homebrew Python blocks system-wide installs (PEP 668).

            pipx creates an isolated environment with the correct Python version automatically.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#import-and-serve","level":3,"title":"Import and Serve","text":"

            Then, import and serve:

            # Import all sessions to .context/journal/ (only new files)\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

            Open http://localhost:8000 to browse.

            To update after new sessions, run the same two commands again.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#safe-by-default","level":3,"title":"Safe by Default","text":"

            ctx journal import --all is safe by default:

            • It only imports new sessions and skips existing files.
            • Locked entries (via ctx journal lock) are always skipped by both import and enrichment skills.
            • If you add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#re-importing-existing-files","level":3,"title":"Re-Importing Existing Files","text":"

            Here is how you regenerate existing files.

            Backup your .context folder before regeneration, as this is a potentially destructive action.

            To re-import journal files, you need to explicitly opt-in using the --regenerate flag:

            Flag combination Frontmatter Body --regenerate Preserved Overwritten from source --regenerate --keep-frontmatter=false Overwritten Overwritten

            Regeneration Overwrites Body Edits

            --regenerate preserves your YAML frontmatter (tags, summary, enrichment metadata) but it replaces the Markdown body with a fresh import.

            Any manual edits you made to the transcript will be lost.

            Lock entries you want to protect first: ctx journal lock <session-id>.

            See Session Journal for the full pipeline including normalization and enrichment.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#scratchpad","level":2,"title":"Scratchpad","text":"

            Store short, sensitive one-liners in an encrypted scratchpad that travels with the project:

            # Write a note\nctx pad set db-password \"postgres://user:pass@localhost/mydb\"\n\n# Read it back\nctx pad get db-password\n\n# List all keys\nctx pad list\n

            The scratchpad is encrypted with a key stored at ~/.ctx/.ctx.key (outside the project, never committed).

            See Scratchpad for details.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#run-an-autonomous-loop","level":2,"title":"Run an Autonomous Loop","text":"

            Generate a script that iterates an AI agent until a completion signal is detected:

            ctx loop\nchmod +x loop.sh\n./loop.sh\n

            See Autonomous Loops for configuration and advanced usage.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#trace-commit-context","level":2,"title":"Trace Commit Context","text":"

            Link your git commits back to the decisions, tasks, and learnings that motivated them. Enable the hook once:

            # Install the git hook (one-time setup)\nctx trace hook enable\n

            From now on, every git commit automatically gets a ctx-context trailer linking it to relevant context. No extra steps needed; just use ctx add, ctx task complete, and commit as usual.

            # Later: why was this commit made?\nctx trace abc123\n\n# Recent commits with their context\nctx trace --last 10\n\n# Context trail for a specific file\nctx trace file src/auth.go\n\n# Manually tag a commit after the fact\nctx trace tag HEAD --note \"Hotfix for production outage\"\n

            To stop: ctx trace hook disable.

            See CLI Reference: trace for details.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#agent-session-start","level":2,"title":"Agent Session Start","text":"

            The first thing an AI agent should do at session start is discover where context lives:

            ctx system bootstrap\n

            This prints the resolved context directory, the files in it, and the operating rules. The CLAUDE.md template instructs the agent to run this automatically. See CLI Reference: bootstrap.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#the-two-skills-you-should-always-use","level":2,"title":"The Two Skills You Should Always Use","text":"

            Using /ctx-remember at session start and /ctx-wrap-up at session end are the highest-value skills in the entire catalog:

            # session begins:\n/ctx-remember\n... do work ...\n# before closing the session:\n/ctx-wrap-up\n

            Let's provide some context, because this is important:

            Although the agent will eventually discover your context through CLAUDE.md → AGENT_PLAYBOOK.md, /ctx-remember hydrates the full context up front (tasks, decisions, recent sessions) so the agent starts informed rather than piecing things together over several turns.

            /ctx-wrap-up is the other half: A structured review that captures learnings, decisions, and tasks before you close the window.

            Hooks like check-persistence remind you (the user) mid-session that context hasn't been saved in a while, but they don't trigger persistence automatically: You still have to act. Also, a CTRL+C can end things at any moment with no reliable \"before session end\" event.

            In short, /ctx-wrap-up is the deliberate checkpoint that makes sure nothing slips through. And /ctx-remember it its mirror skill to be used at session start.

            See Session Ceremonies for the full workflow.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-commands-vs-ai-skills","level":2,"title":"CLI Commands vs. AI Skills","text":"

            Most ctx operations come in two flavors: a CLI command you run in your terminal and an AI skill (slash command) you invoke inside your coding assistant.

            Commands and skills are not interchangeable: Each has a distinct role.

            ctx CLI command ctx AI skill Runs where Your terminal Inside the AI assistant Speed Fast (milliseconds) Slower (LLM round-trip) Cost Free Consumes tokens and context Analysis Deterministic heuristics Semantic / judgment-based Best for Quick checks, scripting, CI Deep analysis, generation, workflow orchestration","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#paired-commands","level":3,"title":"Paired Commands","text":"

            These have both a CLI and a skill counterpart. Use the CLI for quick, deterministic checks; use the skill when you need the agent's judgment.

            CLI Skill When to prefer the skill ctx drift /ctx-drift Semantic analysis: catches meaning drift the CLI misses ctx status /ctx-status Interpreted summary with recommendations ctx add task /ctx-task-add Agent decomposes vague goals into concrete tasks ctx add decision /ctx-decision-add Agent drafts rationale and consequences from discussion ctx add learning /ctx-learning-add Agent extracts the lesson from a debugging session ctx add convention /ctx-convention-add Agent observes a repeated pattern and codifies it ctx task archive /ctx-archive Agent reviews which tasks are truly done ctx pad /ctx-pad Agent reads/writes scratchpad entries in conversation flow ctx journal /ctx-history Agent searches session history with semantic understanding ctx agent /ctx-agent Agent loads and acts on the context packet ctx loop /ctx-loop Agent tailors the loop script to your project ctx doctor /ctx-doctor Agent adds semantic analysis to structural checks ctx hook pause /ctx-pause Agent pauses hooks with session-aware reasoning ctx hook resume /ctx-resume Agent resumes hooks after a pause ctx remind /ctx-remind Agent manages reminders in conversation flow","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#ai-only-skills","level":3,"title":"AI-Only Skills","text":"

            These have no CLI equivalent. They require the agent's reasoning.

            Skill Purpose /ctx-remember Load context and present structured readback at session start /ctx-wrap-up End-of-session ceremony: persist learnings, decisions, tasks /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Pause and assess session progress /ctx-consolidate Merge overlapping learnings or decisions /ctx-prompt-audit Analyze prompting patterns for improvement /ctx-plan-import Import Claude Code plan files into project specs /ctx-implement Execute a plan step-by-step with verification /ctx-worktree Manage parallel agent worktrees /ctx-journal-enrich Add metadata, tags, and summaries to journal entries /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich /ctx-blog Generate a blog post (zensical-flavored Markdown) /ctx-blog-changelog Generate themed blog post from commits between releases /ctx-architecture Build and maintain architecture maps (ARCHITECTURE.md, DETAILED_DESIGN.md)","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-only-commands","level":3,"title":"CLI-Only Commands","text":"

            These are infrastructure: used in scripts, CI, or one-time setup.

            Command Purpose ctx init Initialize .context/ directory ctx load Output assembled context for piping ctx task complete Mark a task done by substring match ctx sync Reconcile context with codebase state ctx compact Consolidate and clean up context files ctx trace Show context behind git commits ctx trace hook Enable/disable commit context tracing hook ctx setup Generate AI tool integration config ctx watch Watch AI output and auto-apply context updates ctx serve Serve any zensical directory (default: journal) ctx permission snapshot Save settings as a golden image ctx permission restore Restore settings from golden image ctx journal site Generate browsable journal from exports ctx hook notify setup Configure webhook notifications ctx decision List and filter decisions ctx learning List and filter learnings ctx task List tasks, manage archival and snapshots ctx why Read the philosophy behind ctx ctx guide Quick-reference cheat sheet ctx site Site management commands ctx config Manage runtime configuration profiles ctx system System diagnostics and hook commands ctx backup Back up context and Claude data to tar.gz / SMB ctx completion Generate shell autocompletion scripts

            Rule of Thumb

            Quick check? Use the CLI.

            Need judgment? Use the skill.

            When in doubt, start with the CLI: It's free and instant.

            Escalate to the skill when heuristics aren't enough.

            Next Up: Context Files →: what each .context/ file does and how to use it

            See Also:

            • Recipes: targeted how-to guides for specific tasks
            • Knowledge Capture: patterns for recording decisions, learnings, and conventions
            • Context Health: keeping your .context/ accurate and drift-free
            • Session Archaeology: digging into past sessions
            • Task Management: tracking and completing work items
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/community/","level":1,"title":"#ctx","text":"

            Open source is better together.

            We are the builders who care about durable context, verifiable decisions, and human-AI workflows that compound over time.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#help-ctx-change-how-ai-remembers","level":2,"title":"Help ctx Change How AI Remembers","text":"

            If you like the idea, a star helps ctx reach engineers who run into context drift every day:

            Star ctx on GitHub ⭐

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#ctx-you","level":2,"title":"ctx ♥️ You","text":"

            Join the community to ask questions, share feedback, and connect with other users:

            • Discord join the ctx Discord: Real-time discussion, field notes, and early ideas.
            • Read the ctx Source on GitHub: Issues, discussions, and contributions.
            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#want-to-contribute","level":2,"title":"Want to Contribute?","text":"

            Early adopters shape the conventions.

            ctx is free and open source software.

            Contributions are always welcome and appreciated.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

            Clear context requires respectful collaboration.

            ctx follows the Contributor Covenant.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/configuration/","level":1,"title":"Configuration","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#configuration","level":2,"title":"Configuration","text":"

            ctx uses three layers of configuration. Each layer overrides the one below it:

            1. CLI flags: Per-invocation overrides (highest priority)
            2. Environment variables: Shell or CI/CD overrides
            3. The .ctxrc file: Project-level defaults (YAML)
            4. Built-in defaults: Hardcoded fallbacks (lowest priority)

            All settings are optional: If nothing is configured, ctx works out of the box with sensible defaults.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#the-ctxrc-file","level":2,"title":"The .ctxrc File","text":"

            The .ctxrc file is an optional YAML file placed in the project root (next to your .context/ directory). It lets you set project-level defaults that apply to every ctx command.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#location","level":3,"title":"Location","text":"
            my-project/\n├── .ctxrc              ← configuration file\n├── .context/\n│   ├── TASKS.md\n│   ├── DECISIONS.md\n│   └── ...\n└── src/\n

            ctx looks for .ctxrc in the current working directory when any command runs. There is no global or user-level config file: Configuration is always per-project.

            Contributors: Dev Configuration Profile

            The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy is gitignored and swapped between them via ctx config switch dev / ctx config switch base. See Contributing: Configuration Profiles.

            Using a Different .Context Directory

            The default .context/ directory can be changed per-project via the context_dir key in .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

            See Environment Variables and CLI Global Flags below for details.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#full-reference","level":3,"title":"Full Reference","text":"

            A commented .ctxrc showing all options and their defaults:

            # .ctxrc: ctx runtime configuration\n# https://ctx.ist/configuration/\n#\n# All settings are optional. Missing values use defaults.\n# Priority: CLI flags > environment variables > .ctxrc > defaults\n#\n# context_dir: .context\n# token_budget: 8000\n# auto_archive: true\n# archive_after_days: 7\n# scratchpad_encrypt: true\n# allow_outside_cwd: false\n# event_log: false\n# entry_count_learnings: 30\n# entry_count_decisions: 20\n# convention_line_count: 200\n# injection_token_warn: 15000\n# context_window: 200000      # auto-detected for Claude Code; override for other tools\n# billing_token_warn: 0       # one-shot warning at this token count (0 = disabled)\n#\n# stale_age_days: 30      # days before drift flags a context file as stale (0 = disabled)\n# key_rotation_days: 90\n# task_nudge_interval: 5   # Edit/Write calls between task completion nudges\n#\n# notify:               # requires: ctx hook notify setup\n#   events:             # required: no events sent unless listed\n#     - loop\n#     - nudge\n#     - relay\n#\n# tool: \"\"              # Active AI tool: claude, cursor, cline, kiro, codex\n#\n# steering:             # Steering layer configuration\n#   dir: .context/steering\n#   default_inclusion: manual\n#   default_tools: []\n#\n# hooks:                # Hook system configuration\n#   dir: .context/hooks\n#   timeout: 10\n#   enabled: true\n#\n# provenance_required:  # Relax provenance flags for ctx add\n#   session_id: true    # Require --session-id (default: true)\n#   branch: true        # Require --branch (default: true)\n#   commit: true        # Require --commit (default: true)\n#\n# priority_order:\n#   - CONSTITUTION.md\n#   - TASKS.md\n#   - CONVENTIONS.md\n#   - ARCHITECTURE.md\n#   - DECISIONS.md\n#   - LEARNINGS.md\n#   - GLOSSARY.md\n#   - AGENT_PLAYBOOK.md\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#option-reference","level":3,"title":"Option Reference","text":"Option Type Default Description context_dir string .context Context directory name (relative to project root) token_budget int 8000 Default token budget for ctx agent and ctx load auto_archive bool true Auto-archive completed tasks during ctx compact archive_after_days int 7 Days before completed tasks are archived scratchpad_encrypt bool true Encrypt scratchpad with AES-256-GCM allow_outside_cwd bool false Allow context directory outside the current working directory event_log bool false Enable local hook event logging to .context/state/events.jsonl entry_count_learnings int 30 Drift warning when LEARNINGS.md exceeds this entry count (0 = disable) entry_count_decisions int 20 Drift warning when DECISIONS.md exceeds this entry count (0 = disable) convention_line_count int 200 Drift warning when CONVENTIONS.md exceeds this line count (0 = disable) injection_token_warn int 15000 Warn when auto-injected context exceeds this token count (0 = disable) context_window int 200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warn int 0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled). For plans where tokens beyond an included allowance cost extra stale_age_days int 30 Days before ctx drift flags a context file as stale (0 = disable) key_rotation_days int 90 Days before encryption key rotation nudge task_nudge_interval int 5 Edit/Write calls between task completion nudges notify.events []string (all) Event filter for webhook notifications (empty = all) priority_order []string (see below) Custom file loading priority for context assembly tool string (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex). Used by steering sync and hook dispatch steering.dir string .context/steering Steering files directory steering.default_inclusion string manual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools []string (all) Default tool filter for new steering files (empty = all tools) hooks.dir string .context/hooks Hook scripts directory hooks.timeout int 10 Per-hook execution timeout in seconds hooks.enabled bool true Whether hook execution is enabled provenance_required.session_id bool true Require --session-id on ctx add for tasks, decisions, learnings provenance_required.branch bool true Require --branch on ctx add for tasks, decisions, learnings provenance_required.commit bool true Require --commit on ctx add for tasks, decisions, learnings

            Default priority order (used when priority_order is not set):

            1. CONSTITUTION.md
            2. TASKS.md
            3. CONVENTIONS.md
            4. ARCHITECTURE.md
            5. DECISIONS.md
            6. LEARNINGS.md
            7. GLOSSARY.md
            8. AGENT_PLAYBOOK.md

            See Context Files for the rationale behind this ordering.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#environment-variables","level":2,"title":"Environment Variables","text":"

            Environment variables override .ctxrc values but are overridden by CLI flags.

            Variable Description Equivalent .ctxrc key CTX_DIR Override the context directory path context_dir CTX_TOKEN_BUDGET Override the default token budget token_budget","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples","level":3,"title":"Examples","text":"
            # Use a shared context directory\nCTX_DIR=/shared/team-context ctx status\n\n# Increase token budget for a single run\nCTX_TOKEN_BUDGET=16000 ctx agent\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#cli-global-flags","level":2,"title":"CLI Global Flags","text":"

            CLI flags have the highest priority and override both environment variables and .ctxrc settings. These flags are available on every ctx command.

            Flag Description --context-dir <path> Override context directory (default: .context/) --allow-outside-cwd Allow context directory outside current working directory --tool <name> Override active AI tool identifier (e.g. kiro, cursor) --version Show version and exit --help Show command help and exit","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_1","level":3,"title":"Examples","text":"
            # Point to a different context directory:\nctx status --context-dir /path/to/shared/.context\n\n# Allow external context directory (skips boundary check):\nctx status --context-dir /mnt/nas/project-context --allow-outside-cwd\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#priority-order","level":2,"title":"Priority Order","text":"

            When the same setting is configured in multiple layers, the highest-priority layer wins:

            CLI flags  >  Environment variables  >  .ctxrc  >  Built-in defaults\n(highest)                                          (lowest)\n

            Example resolution for context_dir:

            Layer Value Wins? --context-dir /tmp/ctx Yes CTX_DIR /shared/context No .ctxrc .my-context No Default .context No

            The CLI flag /tmp/ctx is used because it has the highest priority.

            If the CLI flag were absent, CTX_DIR=/shared/context would win. If neither the flag nor the env var were set, the .ctxrc value .my-context would be used. With nothing configured, the default .context applies.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_2","level":2,"title":"Examples","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#external-context-directory","level":3,"title":"External .context Directory","text":"

            Store context outside the project tree (useful for monorepos or shared context):

            # .ctxrc\ncontext_dir: /home/team/shared-context\nallow_outside_cwd: true\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-token-budget","level":3,"title":"Custom Token Budget","text":"

            Increase the token budget for projects with large context:

            # .ctxrc\ntoken_budget: 16000\n

            This affects the default budget for ctx agent and ctx load. You can still override per-invocation with ctx agent --budget 4000.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#disabled-scratchpad-encryption","level":3,"title":"Disabled Scratchpad Encryption","text":"

            Turn off encryption for the scratchpad (useful in ephemeral environments where key management is unnecessary):

            # .ctxrc\nscratchpad_encrypt: false\n

            Unencrypted Scratchpads Store Secrets in Plaintext

            Only disable encryption if you understand the security implications.

            The scratchpad may contain sensitive data such as API keys, database URLs, or deployment credentials.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-priority-order","level":3,"title":"Custom Priority Order","text":"

            Reorder context files to prioritize architecture over conventions:

            # .ctxrc\npriority_order:\n  - CONSTITUTION.md\n  - TASKS.md\n  - ARCHITECTURE.md\n  - DECISIONS.md\n  - CONVENTIONS.md\n  - LEARNINGS.md\n  - GLOSSARY.md\n  - AGENT_PLAYBOOK.md\n

            Files not listed in priority_order receive the lowest priority (100). The order affects ctx agent, ctx load, and drift's file-priority calculations.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#billing-token-threshold","level":3,"title":"Billing Token Threshold","text":"

            Get a one-shot warning when your session crosses a token threshold where extra charges begin (e.g., Claude Pro includes 200k tokens; beyond that costs extra):

            # .ctxrc\nbilling_token_warn: 180000   # warn before hitting the 200k paid boundary\n

            The warning fires once per session the first time token usage exceeds the threshold. Set to 0 (or omit) to disable.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#adjusted-drift-thresholds","level":3,"title":"Adjusted Drift Thresholds","text":"

            Raise or lower the entry-count thresholds that trigger drift warnings:

            # .ctxrc\nentry_count_learnings: 50   # warn above 50 learnings (default: 30)\nentry_count_decisions: 10   # warn above 10 decisions (default: 20)\nconvention_line_count: 300  # warn above 300 lines (default: 200)\n

            Set any threshold to 0 to disable that specific check.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

            Get notified when loops complete, hooks fire, or agents reach milestones:

            # Configure the webhook URL (encrypted, safe to commit)\nctx hook notify setup\n\n# Test delivery\nctx hook notify test\n

            Filter which events reach your webhook:

            # .ctxrc\nnotify:\n  events:\n    - loop      # loop completion/max-iteration\n    - nudge     # VERBATIM relay hooks fired\n    # - relay   # all hook output (verbose, for debugging)\n    # - heartbeat  # every-prompt session-alive signal\n

            Notifications are opt-in: No events are sent unless explicitly listed.

            See Webhook Notifications for a step-by-step recipe.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#hook-message-overrides","level":2,"title":"Hook Message Overrides","text":"

            Hook messages control what text hooks emit when they fire. Each message can be overridden per-project by placing a text file at the matching path under .context/:

            .context/hooks/messages/{hook}/{variant}.txt\n

            The override takes priority over the embedded default compiled into the ctx binary. An empty file silences the message while preserving the hook's logic (counting, state tracking, cooldowns).

            Use ctx hook message to discover and manage overrides:

            ctx hook message list                      # see all messages\nctx hook message show qa-reminder gate     # view the current template\nctx hook message edit qa-reminder gate     # copy default for editing\nctx hook message reset qa-reminder gate    # revert to default\n

            See Customizing Hook Messages for detailed examples including Python, JavaScript, and silence configurations.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#agent-bootstrapping","level":2,"title":"Agent Bootstrapping","text":"

            AI agents need to know the resolved context directory at session start. The ctx system bootstrap command prints the context path, file list, and operating rules in both text and JSON formats:

            ctx system bootstrap          # text output for agents\nctx system bootstrap -q       # just the context directory path\nctx system bootstrap --json   # structured output for automation\n

            The CLAUDE.md template instructs the agent to run this as its first action. Every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: <dir> footer that re-anchors the agent to the correct directory throughout the session.

            This replaces the previous approach of hardcoding .context/ paths in agent instructions.

            See CLI Reference: bootstrap for full details.

            See also: CLI Reference | Context Files | Scratchpad

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/context-files/","level":1,"title":"Context Files","text":"","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#context","level":2,"title":".context/","text":"

            Each context file in .context/ serves a specific purpose.

            Files are designed to be human-readable, AI-parseable, and token-efficient.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#file-overview","level":2,"title":"File Overview","text":"

            The core context files live directly under .context/. They are the substrate ctx reads in priority order when assembling the agent context packet:

            File Purpose Priority CONSTITUTION.md Hard rules that must NEVER be violated 1 (highest) TASKS.md Current and planned work 2 CONVENTIONS.md Project patterns and standards 3 ARCHITECTURE.md System overview and components 4 DECISIONS.md Architectural decisions with rationale 5 LEARNINGS.md Lessons learned, gotchas, tips 6 GLOSSARY.md Domain terms and abbreviations 7 AGENT_PLAYBOOK.md Instructions for AI tools 8 (lowest)

            Two subdirectories under .context/ are implementation details that are user-editable but not part of the priority read order:

            • .context/templates/: format templates for ctx add decision and ctx add learning. See templates below.
            • .context/steering/: behavioral rules with YAML frontmatter that get synced into each AI tool's native config. See steering below, and the full Steering files page for the design and workflow.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#outside-context","level":3,"title":"Outside .context/","text":"

            Two other moving parts are often confused with context files but are not under .context/:

            • Skills live in .claude/skills/ (project-local) or are provided by the installed ctx plugin. A typical project doesn't see the plugin's skills at all; they ride with the plugin and are owned by its update cycle. See ctx skill and Skills reference.
            • Hooks: Claude Code PreToolUse/PostToolUse/ UserPromptSubmit entries configured in .claude/settings.json or shipped by a plugin. The ctx plugin registers its own hooks automatically; a typical project does not author hooks by hand, and any local edits to plugin-owned hook files will be overridden on the next plugin update. If you need to customize behavior, edit your own project settings, not the plugin's files. See Hook sequence diagrams.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#read-order-rationale","level":2,"title":"Read Order Rationale","text":"

            The priority order follows a logical progression for AI tools:

            1. CONSTITUTION.md: Inviolable rules first. The AI tool must know what it cannot do before attempting anything.
            2. TASKS.md: Current work items. What the AI tool should focus on.
            3. CONVENTIONS.md: How to write code. Patterns and standards to follow when implementing tasks.
            4. ARCHITECTURE.md: System structure. Understanding of components and boundaries before making changes.
            5. DECISIONS.md: Historical context. Why things are the way they are, to avoid re-debating settled decisions.
            6. LEARNINGS.md: Gotchas and tips. Lessons from past work that inform the current implementation.
            7. GLOSSARY.md: Reference material. Domain terms and abbreviations for lookup as needed.
            8. AGENT_PLAYBOOK.md: Meta instructions last. How to use this context system itself. Loaded last because the agent should understand the content (rules, tasks, patterns) before the operating manual.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#constitutionmd","level":2,"title":"CONSTITUTION.md","text":"

            Purpose: Define hard invariants: Rules that must NEVER be violated, regardless of the task.

            AI tools read this first and should refuse tasks that violate these rules.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure","level":3,"title":"Structure","text":"
            # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these, the task \nis wrong.\n\n## Security Invariants\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never store customer/user data in context files\n* [ ] Never disable security linters without documented exception\n\n## Quality Invariants\n\n* [ ] All code must pass tests before commit\n* [ ] No `any` types in TypeScript without documented reason\n* [ ] No TODO comments in main branch (*move to `TASKS.md`*)\n\n## Process Invariants\n\n* [ ] All architectural changes require a decision record\n* [ ] Breaking changes require version bump\n* [ ] Generated files are never committed\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines","level":3,"title":"Guidelines","text":"
            • Keep rules minimal and absolute
            • Each rule should be enforceable (can verify compliance)
            • Use checkbox format for clarity
            • Never compromise on these rules
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tasksmd","level":2,"title":"TASKS.md","text":"

            Purpose: Track current work, planned work, and blockers.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_1","level":3,"title":"Structure","text":"

            Tasks are organized by Phase: logical groupings that preserve order and enable replay.

            Tasks stay in their Phase permanently; status is tracked via checkboxes and inline tags.

            # Tasks\n\n## Phase 1: Initial Setup\n\n* [x] Set up project structure\n* [x] Configure linting and formatting\n* [ ] Add CI/CD pipeline `#in-progress`\n\n## Phase 2: Core Features\n\n* [ ] Implement user authentication `#priority:high`\n* [ ] Add API rate limiting `#priority:medium`\n  * Blocked by: Need to finalize auth first\n\n## Backlog\n\n* [ ] Performance optimization `#priority:low`\n* [ ] Add metrics dashboard `#priority:deferred`\n

            Key principles:

            • Tasks never move between sections: mark as [x] or [-] in place
            • Use #in-progress inline tag to indicate current work
            • Phase headers provide structure and replay order
            • Backlog section for unscheduled work
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tags","level":3,"title":"Tags","text":"

            Use inline backtick-wrapped tags for metadata:

            Tag Values Purpose #priority high, medium, low Task urgency #area core, cli, docs, tests Codebase area #estimate 1h, 4h, 1d Time estimate (optional) #in-progress (none) Currently being worked on

            Lifecycle tags (for session correlation):

            Tag Format When to add #added YYYY-MM-DD-HHMMSS Auto-added by ctx add task #started YYYY-MM-DD-HHMMSS When beginning work on the task

            These timestamps help correlate tasks with session files and track which session started vs completed work.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-markers","level":3,"title":"Status Markers","text":"Marker Meaning [ ] Pending [x] Completed [-] Skipped (include reason)","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_1","level":3,"title":"Guidelines","text":"
            • Never delete tasks; mark as [x] completed or [-] skipped
            • Never move tasks between sections; use inline tags for status
            • Use ctx task archive periodically to move completed tasks to archive
            • Mark current work with #in-progress inline tag
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#decisionsmd","level":2,"title":"DECISIONS.md","text":"

            Purpose: Record architectural decisions with rationale so they don't get re-debated.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_2","level":3,"title":"Structure","text":"
            # Decisions\n\n## [YYYY-MM-DD] Decision Title\n\n**Status**: Accepted | Superseded | Deprecated\n\n**Context**: What situation prompted this decision?\n\n**Decision**: What was decided?\n\n**Rationale**: Why was this the right choice?\n\n**Consequence**: What are the implications?\n\n**Alternatives Considered**:\n* Alternative A: Why rejected\n* Alternative B: Why rejected\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example","level":3,"title":"Example","text":"
            ## [2025-01-15] Use TypeScript Strict Mode\n\n**Status**: Accepted\n\n**Context**: Starting a new project, need to choose the type-checking level.\n\n**Decision**: Enable TypeScript strict mode with all strict flags.\n\n**Rationale**: Catches more bugs at compile time. Team has experience\nwith strict mode. Upfront cost pays off in reduced runtime errors.\n\n**Consequence**: More verbose type annotations required. Some\nthird-party libraries need type assertions.\n\n**Alternatives Considered**:\n- Basic TypeScript: Rejected because it misses null checks\n- JavaScript with JSDoc: Rejected because tooling support is weaker\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-values","level":3,"title":"Status Values","text":"Status Meaning Accepted Current, active decision Superseded Replaced by newer decision (link to it) Deprecated No longer relevant","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#learningsmd","level":2,"title":"LEARNINGS.md","text":"

            Purpose: Capture lessons learned, gotchas, and tips that shouldn't be forgotten.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_3","level":3,"title":"Structure","text":"
            # Learnings\n\n## Category Name\n\n### Learning Title\n\n**Discovered**: YYYY-MM-DD\n\n**Context**: When/how was this learned?\n\n**Lesson**: What's the takeaway?\n\n**Application**: How should this inform future work?\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example_1","level":3,"title":"Example","text":"
            ## Testing\n\n### Vitest Mocks Must Be Hoisted\n\n**Discovered**: 2025-01-15\n\n**Context**: Tests were failing intermittently when mocking fs module.\n\n**Lesson**: Vitest requires `vi.mock()` calls to be hoisted to the\ntop of the file. Dynamic mocks need `vi.doMock()` instead.\n\n**Application**: Always use `vi.mock()` at file top. Use `vi.doMock()`\nonly when mock needs runtime values.\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#categories","level":3,"title":"Categories","text":"

            Organize learnings by topic:

            • Testing
            • Build & Deploy
            • Performance
            • Security
            • Third-Party Libraries
            • Git and Workflow
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#conventionsmd","level":2,"title":"CONVENTIONS.md","text":"

            Purpose: Document project patterns, naming conventions, and standards.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_4","level":3,"title":"Structure","text":"
            # Conventions\n\n## Naming\n\n* **Files**: kebab-case for all source files\n* **Components**: PascalCase for React components\n* **Functions**: camelCase, verb-first (getUser, parseConfig)\n* **Constants**: SCREAMING_SNAKE_CASE\n\n## Patterns\n\n### Pattern Name\n\n**When to use**: Situation description\n\n**Implementation**:\n// in triple backticks\n// Example code\n\n**Why**: Rationale for this pattern\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_2","level":3,"title":"Guidelines","text":"
            • Include concrete examples
            • Explain the \"why\" not just the \"what\"
            • Keep patterns minimal: Only document what's non-obvious
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#architecturemd","level":2,"title":"ARCHITECTURE.md","text":"

            Purpose: Provide system overview and component relationships.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_5","level":3,"title":"Structure","text":"
            # Architecture\n\n## Overview\n\nBrief description of what the system does and how it's organized.\n\n## Components\n\n### Component Name\n\n**Responsibility**: What this component does\n\n**Dependencies**: What it depends on\n\n**Dependents**: What depends on it\n\n**Key Files**:\n* path/to/file.ts: Description\n\n## Data Flow\n\nDescription or diagram of how data moves through the system.\n\n## Boundaries\n\nWhat's in scope vs out of scope for this codebase.\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_3","level":3,"title":"Guidelines","text":"
            • Keep diagrams simple (Mermaid works well)
            • Focus on boundaries and interfaces
            • Update when major structural changes occur
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#glossarymd","level":2,"title":"GLOSSARY.md","text":"

            Purpose: Define domain terms, abbreviations, and project vocabulary.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_6","level":3,"title":"Structure","text":"
            # Glossary\n\n## Domain Terms\n\n### Term Name\n\n**Definition**: What it means in this project's context\n\n**Not to be confused with**: Similar terms that mean different things\n\n**Example**: How it's used\n\n## Abbreviations\n\n| Abbrev | Expansion                     | Context                |\n|--------|-------------------------------|------------------------|\n| ADR    | Architectural Decision Record | Decision documentation |\n| SUT    | System Under Test             | Testing                |\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_4","level":3,"title":"Guidelines","text":"
            • Define project-specific meanings
            • Clarify potentially ambiguous terms
            • Include abbreviations used in code or docs
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#agent_playbookmd","level":2,"title":"AGENT_PLAYBOOK.md","text":"

            Purpose: Explicit instructions for how AI tools should read, apply, and update context.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#key-sections","level":3,"title":"Key Sections","text":"

            Read Order: Priority order for loading context files

            When to Update: Events that trigger context updates

            How to Avoid Hallucinating Memory: Critical rules:

            1. Never assume: If not in files, you don't know it
            2. Never invent history: Don't claim \"we discussed\" without evidence
            3. Verify before referencing: Search files before citing
            4. When uncertain, say so
            5. Trust files over intuition

            Context Update Commands: Format for automated updates via ctx watch:

            <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"complete\">user auth</context-update>\n<context-update type=\"learning\"\n  context=\"Debugging hooks\"\n  lesson=\"Hooks receive JSON via stdin\"\n  application=\"Parse JSON stdin with the host language\"\n>Hook Input Format</context-update>\n<context-update type=\"decision\"\n  context=\"Need a caching layer\"\n  rationale=\"Redis is fast and team has experience\"\n  consequence=\"Must provision Redis infrastructure\"\n>Use Redis for caching</context-update>\n

            See Integrations for full documentation.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#templates","level":2,"title":"templates/","text":"

            Location: .context/templates/. Status: implementation detail, user-editable.

            Purpose: Format templates for ctx add decision and ctx add learning. These control the structure of new entries appended to DECISIONS.md and LEARNINGS.md.

            ctx init deploys two starter templates:

            • decision.md: sections Context, Rationale, Consequence
            • learning.md: sections Context, Lesson, Application
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing","level":3,"title":"Customizing","text":"

            Edit the templates directly. Changes take effect immediately on the next ctx add command. For example, to add a \"References\" section to all new decisions, edit .context/templates/decision.md.

            Templates are committed to git, so customizations are shared with the team.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#steering","level":2,"title":"steering/","text":"

            Location: .context/steering/. Status: implementation detail, user-editable.

            Purpose: Behavioral rules with YAML frontmatter that tell an AI assistant how to behave when a specific kind of prompt arrives. Unlike the core context files (which describe what the project is), steering files describe what to do and ride alongside the prompt through the AI tool's native rule pipeline (Claude Code, Cursor, Kiro, Cline). ctx matches steering files to prompts and syncs them out to each tool's config.

            ctx init scaffolds four foundation files:

            • product.md: who this project serves and why
            • tech.md: the technology stack and its constraints
            • structure.md: how the code is organized
            • workflow.md: how work moves through the system

            Each file carries YAML frontmatter describing when it applies (always, matching prompts, or manually referenced) and what tool scope it covers. The foundation files use inclusion: always by default so every session picks them up.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing_1","level":3,"title":"Customizing","text":"

            Edit the files directly. Add your own steering files with ctx steering add, preview the match set with ctx steering preview, and run ctx steering sync to push them into each AI tool's config after changes. Steering files are committed to git, so they're shared with the team.

            For the design rationale, the full inclusion/priority model, and the end-to-end sync workflow, see the dedicated Steering files page.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#parsing-rules","level":2,"title":"Parsing Rules","text":"

            All context files follow these conventions:

            1. Headers define structure: # for title, ## for sections, ### for items
            2. Bold keys for fields: **Key**: followed by value
            3. Code blocks are literal: Never parse code block content as structure
            4. Lists are ordered: Items appear in priority/chronological order
            5. Tags are inline: Backtick-wrapped tags like #priority:high
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#further-reading","level":2,"title":"Further Reading","text":"
            • Refactoring with Intent: how persistent context prevents drift during refactoring sessions
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#token-efficiency","level":2,"title":"Token Efficiency","text":"

            Keep context files concise:

            • Use abbreviations in tags, not prose;
            • Omit obvious words (\"The,\" \"This\");
            • Prefer bullet points over paragraphs;
            • Keep examples minimal but illustrative;
            • Archive old completed items periodically.

            Next Up: Prompting Guide →: effective prompts for AI sessions with ctx

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/contributing/","level":1,"title":"Contributing","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#development-setup","level":2,"title":"Development Setup","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#prerequisites","level":3,"title":"Prerequisites","text":"
            • Go (version defined in go.mod)
            • Claude Code
            • Git
            • GNU Make
            • Zensical
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#1-fork-or-clone-the-repository","level":3,"title":"1. Fork (or Clone) the Repository","text":"
            # Fork on GitHub, then:\ngit clone https://github.com/<you>/ctx.git\ncd ctx\n\n# Or, if you have push access:\ngit clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#2-build-and-install-the-binary","level":3,"title":"2. Build and Install the Binary","text":"
            make build\nsudo make install\n

            This compiles the ctx binary and places it in /usr/local/bin/.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#3-install-the-plugin-from-your-local-clone","level":3,"title":"3. Install the Plugin from Your Local Clone","text":"

            The repository ships a Claude Code plugin under internal/assets/claude/. Point Claude Code at your local copy so that skills and hooks reflect your working tree: no reinstall needed after edits:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace
            4. Enter the absolute path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: it points Claude Code to the actual plugin in internal/assets/claude);
            5. Back in /plugin, select Install and choose ctx.

            Claude Code Caches Plugin Files

            Even though the marketplace points at a directory on disk, Claude Code caches skills and hooks. After editing files under internal/assets/claude/, clear the cache and restart:

            make plugin-reload   # then restart Claude Code\n

            See Skill or Hook Changes for details.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#4-verify","level":3,"title":"4. Verify","text":"
            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

            You should see the ctx plugin listed, sourced from your local path.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#project-layout","level":2,"title":"Project Layout","text":"
            ctx/\n├── cmd/ctx/            # CLI entry point\n├── internal/\n│   ├── assets/claude/  # ← Claude Code plugin (skills, hooks)\n│   ├── bootstrap/      # Project initialization templates\n│   ├── claude/         # Claude Code integration helpers\n│   ├── cli/            # Command implementations\n│   ├── config/         # Configuration loading\n│   ├── context/        # Core context logic\n│   ├── crypto/         # Scratchpad encryption\n│   ├── drift/          # Drift detection\n│   ├── index/          # Context file indexing\n│   ├── journal/        # Journal site generation\n│   ├── memory/         # Memory bridge (discover, mirror, import, publish)\n│   ├── notify/         # Webhook notifications\n│   ├── rc/             # .ctxrc parsing\n│   ├── journal/        # Session history, parsers, and state\n│   ├── sysinfo/        # System resource monitoring\n│   ├── task/           # Task management\n│   └── validation/     # Input validation\n├── .claude/\n│   └── skills/         # Dev-only skills (not distributed)\n├── assets/             # Static assets (banners, logos)\n├── docs/               # Documentation site source\n├── editors/            # Editor extensions (VS Code)\n├── examples/           # Example configurations\n├── hack/               # Build scripts\n├── specs/              # Feature specifications\n└── .context/           # ctx's own context (dogfooding)\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skills-two-directories-one-rule","level":3,"title":"Skills: Two Directories, One Rule","text":"Directory What lives here Distributed to users? internal/assets/claude/skills/ The 39 ctx-* skills that ship with the plugin Yes .claude/skills/ Dev-only skills (release, QA, backup, etc.) No

            internal/assets/claude/skills/ is the single source of truth for user-facing skills. If you are adding or modifying a ctx-* skill, edit it there.

            .claude/skills/ holds skills that only make sense inside this repository (release automation, QA checks, backup scripts). These are never distributed to users.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#dev-only-skills-reference","level":4,"title":"Dev-Only Skills Reference","text":"Skill When to use /_ctx-absorb Merge deltas from a parallel worktree or separate checkout /_ctx-audit Detect code-level drift after YOLO sprints or before releases /_ctx-backup Backup context and Claude data to SMB share /_ctx-qa Run QA checks before committing /_ctx-release Run the full release process /_ctx-release-notes Generate release notes for dist/RELEASE_NOTES.md /_ctx-alignment-audit Audit doc claims against agent instructions /_ctx-update-docs Check docs/code consistency after changes /_ctx-command-audit Audit CLI surface after renames, moves, or deletions

            Six skills previously in this list have been promoted to bundled plugin skills and are now available to all ctx users: /ctx-brainstorm, /ctx-link-check, /ctx-permission-sanitize, /ctx-skill-create, /ctx-spec.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#how-to-add-things","level":2,"title":"How to Add Things","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-cli-command","level":3,"title":"Adding a New CLI Command","text":"
            1. Create a package under internal/cli/<name>/ with doc.go, cmd.go, and run.go;
            2. Implement Cmd() *cobra.Command as the entry point;
            3. Add Use* and DescKey* constants in internal/config/embed/cmd/<name>.go;
            4. Add command descriptions in internal/assets/commands/commands.yaml;
            5. Add examples in internal/assets/commands/examples.yaml;
            6. Add flag descriptions in internal/assets/commands/flags.yaml;
            7. Register the command in internal/bootstrap/group.go (add import + entry in the appropriate group function);
            8. Create an output package at internal/write/<name>/ for all user-facing output (see Package Taxonomy);
            9. Create error constructors at internal/err/<name>/ for domain-specific errors;
            10. Add tests in the same package (<name>_test.go);
            11. Add a doc page at docs/cli/<name>.md and update docs/cli/index.md;
            12. Add the page to zensical.toml nav.

            Pattern to follow: internal/cli/pad/pad.go (parent with subcommands) or internal/cli/drift/ (single command).

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#package-taxonomy","level":3,"title":"Package Taxonomy","text":"

            ctx separates concerns into a strict package taxonomy. Knowing where things go prevents code review friction and keeps the AST lint tests happy.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#output-internalwrite","level":4,"title":"Output: internal/write/","text":"

            Every CLI command's user-facing output lives in its own sub-package under internal/write/<domain>/. Output functions accept *cobra.Command and call cmd.Println(...), never fmt.Print* directly. All text strings are loaded from YAML via desc.Text(text.DescKey*), never inline.

            internal/write/add/add.go       # output for ctx add\ninternal/write/stat/stat.go     # output for ctx usage\ninternal/write/resource/        # output for ctx sysinfo\n

            Exception: write/rc/ writes to os.Stderr because rc loads before cobra is initialized.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#errors-internalerr","level":4,"title":"Errors: internal/err/","text":"

            Domain-specific error constructors live under internal/err/<domain>/. Each package mirrors the write structure. Functions return error (never custom error types) and load messages from YAML via desc.Text(text.DescKey*).

            internal/err/add/add.go         # errors for ctx add\ninternal/err/config/config.go   # errors for configuration\ninternal/err/cli/cli.go         # errors for CLI argument validation\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#config-constants-internalconfig","level":4,"title":"Config Constants: internal/config/","text":"

            Pure-constant leaf packages with zero internal dependencies (stdlib only). Over 60 sub-packages, organized by domain. See internal/config/README.md for the full decision tree.

            What you're adding Where it goes File names, extensions, paths config/file/, config/dir/ Regex patterns config/regex/ CLI flag names (--flag-name) config/flag/flag.go Flag description YAML keys config/embed/flag/<cmd>.go Command Use/DescKey strings config/embed/cmd/<cmd>.go User-facing text YAML keys config/embed/text/<domain>.go Time durations, thresholds config/<domain>/","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#the-assets-pipeline","level":4,"title":"The Assets Pipeline","text":"

            User-facing text flows through a three-level chain:

            1. Go constant (config/embed/text/) defines a string key: DescKeyWriteAddedTo = \"write.added-to\"
            2. Call site resolves it: desc.Text(text.DescKeyWriteAddedTo)
            3. YAML (internal/assets/commands/text/write.yaml) holds the actual text: write.added-to: { short: \"Added to %s\" }

            The same pattern applies to command descriptions (commands.yaml), flag descriptions (flags.yaml), and examples (examples.yaml). The TestDescKeyYAMLLinkage test verifies every constant resolves to a non-empty YAML value.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-session-parser","level":3,"title":"Adding a New Session Parser","text":"

            The journal system uses a SessionParser interface. To add support for a new AI tool (e.g. Aider, Cursor):

            1. Create internal/journal/parser/<tool>.go;
            2. Implement parsing logic that returns []*Session;
            3. Register the parser in FindSessions() / FindSessionsForCWD();
            4. Use config.Tool* constants for the tool identifier;
            5. Add test fixtures and parser tests.

            Pattern to follow: the Claude Code JSONL parser in internal/journal/parser/.

            Multilingual Session Headers

            The Markdown parser recognizes session header prefixes configured via session_prefixes in .ctxrc (default: Session:). To support a new language, users add a prefix to their .ctxrc - no code change needed. New parser implementations can use rc.SessionPrefixes() if they also need prefix-based header detection.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-bundled-skill","level":3,"title":"Adding a Bundled Skill","text":"
            1. Create internal/assets/claude/skills/<skill-name>/SKILL.md;
            2. Follow the skill format: trigger, negative triggers, steps, quality gate;
            3. Run make plugin-reload and restart Claude Code to test;
            4. Add a Skill entry to .claude-plugin/plugin.json if user-invocable;
            5. Document in docs/reference/skills.md.

            Pattern to follow: any skill in internal/assets/claude/skills/ctx-status/.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#test-expectations","level":3,"title":"Test Expectations","text":"
            • Unit tests: colocated with source (foo.gofoo_test.go);
            • Test helpers: use t.Helper() so failures point to callers;
            • HOME isolation: use t.TempDir() + t.Setenv(\"HOME\", ...) for tests that touch ~/.claude/ or ~/.ctx/;
            • rc.Reset(): call after os.Chdir in tests that change working directory (rc caches on first access);
            • No network: all tests run offline, use fixtures.

            Run make test before submitting. Target: no failures, no skips.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#day-to-day-workflow","level":2,"title":"Day-to-Day Workflow","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#go-code-changes","level":3,"title":"Go Code Changes","text":"

            After modifying Go source files, rebuild and reinstall:

            make build && sudo make install\n

            The ctx binary is statically compiled. There is no hot reload. You must rebuild for Go changes to take effect.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skill-or-hook-changes","level":3,"title":"Skill or Hook Changes","text":"

            Edit files under internal/assets/claude/skills/ or internal/assets/claude/hooks/.

            Claude Code caches plugin files, so edits aren't picked up automatically.

            Clear the cache and restart:

            make plugin-reload   # nukes ~/.claude/plugins/cache/activememory-ctx/\n# then restart Claude Code\n

            The plugin will be re-installed from your local marketplace on startup. No version bump is needed during development.

            Version Bumps Are for Releases, Not Iteration

            Only bump VERSION, plugin.json, and marketplace.json when cutting a release. During development, make plugin-reload is all you need.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

            The repo ships two .ctxrc source profiles. The working copy (.ctxrc) is gitignored and swapped between them:

            File Purpose .ctxrc.base Golden baseline: all defaults, no logging .ctxrc.dev Dev profile: notify events enabled, verbose logging .ctxrc Working copy (gitignored: copied from one of the above)

            Use ctx commands to switch:

            ctx config switch dev      # switch to dev profile\nctx config switch base     # switch to base profile\nctx config status          # show which profile is active\n

            After cloning, run ctx config switch dev to get started with full logging.

            See Configuration for the full .ctxrc option reference.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#backups","level":3,"title":"Backups","text":"

            Back up project context and global Claude Code data with:

            ctx backup                    # both project + global (default)\nctx backup --scope project    # .context/, .claude/, ideas/ only\nctx backup --scope global     # ~/.claude/ only\n

            Archives are saved to /tmp/. When CTX_BACKUP_SMB_URL is configured, they are also copied to an SMB share. See CLI Reference: backup for details.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-tests","level":3,"title":"Running Tests","text":"
            make test   # fast: all tests\nmake audit  # full: fmt + vet + lint + drift + docs + test\nmake smoke  # build + run basic commands end-to-end\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-the-docs-site-locally","level":3,"title":"Running the Docs Site Locally","text":"
            make site-setup  # one-time: install zensical via pipx\nmake site-serve  # serve at localhost\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#submitting-changes","level":2,"title":"Submitting Changes","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#before-you-start","level":3,"title":"Before You Start","text":"
            1. Check existing issues to avoid duplicating effort;
            2. For large changes, open an issue first to discuss the approach;
            3. Read the specs in specs/ for design context.
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#pull-request-process","level":3,"title":"Pull Request Process","text":"

            Respect the maintainers' time and energy: Keep your pull requests isolated and strive to minimze code changes.

            If you Pull Request solves more than one distinct issues, it's better to create separate pull requests instead of sending them in one large bundle.

            1. Create a feature branch: git checkout -b feature/my-feature;
            2. Make your changes;
            3. Run make audit to catch issues early;
            4. Commit with a clear message;
            5. Push and open a pull request.

            Audit Your Code Before Submitting

            Run make audit before submitting:

            make audit covers formatting, vetting, linting, drift checks, doc consistency, and tests in one pass.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#commit-messages","level":3,"title":"Commit Messages","text":"

            Following conventional commits is recommended but not required:

            Types: feat, fix, docs, test, refactor, chore

            Examples:

            • feat(cli): add ctx export command
            • fix(drift): handle missing files gracefully
            • docs: update installation instructions
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-style","level":3,"title":"Code Style","text":"
            • Follow Go conventions (gofmt, go vet);
            • Keep functions focused and small;
            • Add tests for new functionality;
            • Handle errors explicitly; use descriptive names (readErr, writeErr) not repeated err;
            • No magic strings: all repeated literals go in internal/config/;
            • Output goes through internal/write/ packages, not fmt.Print*;
            • Errors go through internal/err/ constructors, not inline fmt.Errorf;
            • See Package Taxonomy and .context/CONVENTIONS.md for the full reference.
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

            A clear context requires respectful collaboration.

            ctx follows the Contributor Covenant.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#boring-legal-stuff","level":2,"title":"Boring Legal Stuff","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#developer-certificate-of-origin-dco","level":3,"title":"Developer Certificate of Origin (DCO)","text":"

            By contributing, you agree to the Developer Certificate of Origin.

            All commits must be signed off:

            git commit -s -m \"feat: add new feature\"\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#license","level":3,"title":"License","text":"

            Contributions are licensed under the Apache 2.0 License.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/faq/","level":1,"title":"FAQ","text":"","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-markdown","level":2,"title":"Why Markdown?","text":"

            Markdown is human-readable, version-controllable, and tool-agnostic. Every AI model can parse it natively. Every developer can read it in a terminal, a browser, or a code review. There's no schema to learn, no binary format to decode, no vendor lock-in. You can inspect your context with cat, diff it with git diff, and review it in a PR.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-ctx-work-offline","level":2,"title":"Does ctx Work Offline?","text":"

            Yes. ctx is completely local. It reads and writes files on disk, generates context packets from local state, and requires no network access. The only feature that touches the network is the optional webhook notifications hook, which you have to explicitly configure.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-gets-committed-to-git","level":2,"title":"What Gets Committed to Git?","text":"

            The .context/ directory: yes, commit it. That's the whole point. Team members and AI agents read the same context files.

            What not to commit:

            • .ctx.key: your encryption key. Stored at ~/.ctx/.ctx.key, never in the repo. ctx init handles this automatically.
            • journal/ and logs/: generated data, potentially large. ctx init adds these to .gitignore.
            • scratchpad.enc: your choice. It's encrypted, so it's safe to commit if you want shared scratchpad state. See Scratchpad for details.
            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#how-big-should-my-token-budget-be","level":2,"title":"How Big Should My Token Budget Be?","text":"

            The default is 8000 tokens, which works well for most projects. Configure it via .ctxrc or the CTX_TOKEN_BUDGET environment variable:

            # In .ctxrc\ntoken_budget = 12000\n\n# Or as an environment variable\nexport CTX_TOKEN_BUDGET=12000\n\n# Or per-invocation\nctx agent --budget 4000\n

            Higher budgets include more context but cost more tokens per request. Lower budgets force sharper prioritization: ctx drops lower-priority content first, so CONSTITUTION and TASKS always make the cut.

            See Configuration for all available settings.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-not-a-database","level":2,"title":"Why Not a Database?","text":"

            Files are inspectable, diffable, and reviewable in pull requests. You can grep them, cat them, pipe them through jq or awk. They work with every version control system and every text editor.

            A database would add a dependency, require migrations, and make context opaque. The design bet is that context should be as visible and portable as the code it describes.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-it-work-with-tools-other-than-claude-code","level":2,"title":"Does It Work with Tools Other than Claude Code?","text":"

            Yes. ctx agent outputs a context packet that any AI tool can consume: paste it into ChatGPT, Cursor, Copilot, Aider, or anything else that accepts text input.

            Claude Code gets first-class integration via the ctx plugin (hooks, skills, automatic context loading). VS Code Copilot Chat has a dedicated ctx extension. Other tools integrate via generated instruction files or manual pasting.

            See Integrations for tool-specific setup, including the multi-tool recipe.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#can-i-use-ctx-on-an-existing-project","level":2,"title":"Can I Use ctx on an Existing Project?","text":"

            Yes. Run ctx init in any repo and it creates .context/ with template files. Start recording decisions, tasks, and conventions as you work. Context grows naturally; you don't need to backfill everything on day one.

            See Getting Started for the full setup flow, or Joining a ctx Project if someone else already initialized it.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-happens-when-context-files-get-too-big","level":2,"title":"What Happens When Context Files Get Too Big?","text":"

            Token budgeting handles this automatically. ctx agent prioritizes content by file priority (CONSTITUTION first, GLOSSARY last) and trims lower-priority entries when the budget is tight.

            For manual maintenance, ctx compact archives completed tasks and old entries, keeping active context lean. You can also run ctx task archive to move completed tasks out of TASKS.md.

            The goal is to keep context files focused on current state. Historical entries belong in git history or the archive.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#is-context-meant-to-be-shared","level":2,"title":"Is .context/ Meant to Be Shared?","text":"

            Yes. Commit it to your repo. Every team member and every AI agent reads the same files. That's the mechanism for shared memory: decisions made in one session are visible in the next, regardless of who (or what) starts it.

            The only per-user state is the encryption key (~/.ctx/.ctx.key) and the optional scratchpad. Everything else is team-shared by design.

            Related:

            • Getting Started - installation and first setup
            • Configuration - .ctxrc, environment variables, and defaults
            • Context Files - what each file does and how to use it
            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/first-session/","level":1,"title":"Your First Session","text":"

            Here's what a complete first session looks like, from initialization to the moment your AI cites your project context back to you.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-1-initialize-your-project","level":2,"title":"Step 1: Initialize Your Project","text":"

            Run ctx init in your project root:

            cd your-project\nctx init\n

            Sample output:

            Context initialized in .context/\n\n  ✓ CONSTITUTION.md\n  ✓ TASKS.md\n  ✓ DECISIONS.md\n  ✓ LEARNINGS.md\n  ✓ CONVENTIONS.md\n  ✓ ARCHITECTURE.md\n  ✓ GLOSSARY.md\n  ✓ AGENT_PLAYBOOK.md\n\nSetting up encryption key...\n  ✓ ~/.ctx/.ctx.key\n\nClaude Code plugin (hooks + skills):\n  Install: claude /plugin marketplace add ActiveMemory/ctx\n  Then:    claude /plugin install ctx@activememory-ctx\n\nNext steps:\n  1. Edit .context/TASKS.md to add your current tasks\n  2. Run 'ctx status' to see context summary\n  3. Run 'ctx agent' to get AI-ready context packet\n

            This created your .context/ directory with template files.

            For Claude Code, install the ctx plugin to get automatic hooks and skills.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-2-populate-your-context","level":2,"title":"Step 2: Populate Your Context","text":"

            Add a task and a decision: These are the entries your AI will remember:

            ctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to TASKS.md\n\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to DECISIONS.md\n

            These entries are what the AI will recall in future sessions. You don't need to populate everything now: Context grows naturally as you work.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-3-check-your-context","level":2,"title":"Step 3: Check Your Context","text":"
            ctx status\n

            Sample output:

            Context Status\n====================\n\nContext Directory: .context/\nTotal Files: 8\nToken Estimate: 1,247 tokens\n\nFiles:\n  ✓ CONSTITUTION.md (loaded)\n  ✓ TASKS.md (1 items)\n  ✓ DECISIONS.md (1 items)\n  ○ LEARNINGS.md (empty)\n  ✓ CONVENTIONS.md (loaded)\n  ✓ ARCHITECTURE.md (loaded)\n  ✓ GLOSSARY.md (loaded)\n  ✓ AGENT_PLAYBOOK.md (loaded)\n\nRecent Activity:\n  - TASKS.md modified 2 minutes ago\n  - DECISIONS.md modified 1 minute ago\n

            Notice the token estimate: This is how much context your AI will load.

            The next to LEARNINGS.md means it's still empty; it will fill in as you capture lessons during development.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-4-start-an-ai-session","level":2,"title":"Step 4: Start an AI Session","text":"

            With Claude Code (and the ctx plugin), start every session with:

            /ctx-remember\n

            This loads your context and presents a structured readback so you can confirm the agent knows what is going on. Context also loads automatically via hooks, but the explicit ceremony gives you a readback to verify.

            Steering Files Fire Automatically

            If you edited the four foundation files scaffolded by ctx init (.context/steering/product.md, tech.md, structure.md, workflow.md), their inclusion: always rules are prepended to every tool call via the plugin's PreToolUse hook, with no /ctx-remember needed, no MCP call. Edit a file, save, and the next tool call in Claude Code picks it up. See Steering files for details on the inclusion modes.

            Using VS Code?

            With VS Code Copilot Chat (and the ctx extension), type @ctx /agent in chat to load your context packet, or @ctx /status to check your project context. Run ctx setup copilot --write once to generate .github/copilot-instructions.md for automatic context loading.

            If you are not using Claude Code, generate a context packet for your AI tool:

            ctx agent --budget 8000\n

            Sample output:

            # Context Packet\nGenerated: 2026-02-14T15:30:45Z | Budget: 8000 tokens | Used: ~2450\n\n## Read These Files (in order)\n1. .context/CONSTITUTION.md\n2. .context/TASKS.md\n3. .context/CONVENTIONS.md\n...\n\n## Current Tasks\n- [ ] Implement user authentication\n- [ ] Add rate limiting to API endpoints\n\n## Key Conventions\n- Use gofmt for formatting\n- Path construction uses filepath.Join\n\n## Recent Decisions\n## [2026-02-14-120000] Use PostgreSQL for the primary database\n\n**Context**: Evaluated PostgreSQL, MySQL, and SQLite...\n**Rationale**: PostgreSQL offers better JSON support...\n\n## Key Learnings\n## [2026-02-14-100000] Connection pool sizing matters\n\n**Context**: Hit connection limits under load...\n**Lesson**: Default pool size of 10 is too low for concurrent requests...\n\n## Also Noted\n- Use JWT for session management\n- Always validate input at API boundary\n

            Paste this output into your AI tool's system prompt or conversation start.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-5-verify-it-works","level":2,"title":"Step 5: Verify It Works","text":"

            Ask your AI: \"What are our current tasks?\"

            A working setup produces a response like:

            Based on the project context, you have one active task:\n\n- **Implement user authentication** (pending)\n\nThere's also a recent architectural decision to **use PostgreSQL for\nthe primary database**, chosen for its ACID compliance and JSON support.\n\nWant me to start on the authentication task?\n

            That's the success moment:

            The AI is citing your exact context entries from Step 2, not hallucinating or asking you to re-explain.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-gets-created","level":2,"title":"What Gets Created","text":"
            .context/\n├── CONSTITUTION.md     # Hard rules: NEVER violate these\n├── TASKS.md            # Current and planned work\n├── CONVENTIONS.md      # Project patterns and standards\n├── ARCHITECTURE.md     # System overview\n├── DECISIONS.md        # Architectural decisions with rationale\n├── LEARNINGS.md        # Lessons learned, gotchas, tips\n├── GLOSSARY.md         # Domain terms and abbreviations\n└── AGENT_PLAYBOOK.md   # How AI tools should use this\n

            Claude Code integration (hooks + skills) is provided by the ctx plugin: See Integrations/Claude Code.

            VS Code Copilot Chat integration is provided by the ctx extension: See Integrations/VS Code.

            See Context Files for detailed documentation of each file.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-to-gitignore","level":2,"title":"What to .gitignore","text":"

            Rule of Thumb

            • If it's knowledge (decisions, tasks, learnings, conventions), commit it.
            • If it's generated output, raw session data, or a secret, .gitignore it.

            Commit your .context/ knowledge files: that's the whole point.

            You should .gitignore the generated and sensitive paths:

            # Journal data (large, potentially sensitive)\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Hook logs (machine-specific)\n.context/logs/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

            ctx init Patches Your .Gitignore for You

            ctx init automatically adds these entries to your .gitignore.

            Review the additions with cat .gitignore after init.

            See also:

            • Security Considerations
            • Scratchpad Encryption
            • Session Journal

            Next Up: Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/getting-started/","level":1,"title":"Getting Started","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"

            ctx does not require git, but using version control with your .context/ directory is strongly recommended:

            AI sessions occasionally modify or overwrite context files inadvertently. With git, the AI can check history and restore lost content: Without it, the data is gone.

            Also, several ctx features (journal changelog, blog generation) also use git history directly.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#installation","level":2,"title":"Installation","text":"

            Every setup starts with the ctx binary: the CLI tool itself.

            If you use Claude Code, you also install the ctx plugin, which adds hooks (context autoloading, persistence nudges) and 25+ /ctx-* skills. For other AI tools, ctx integrates via generated instruction files or manual context pasting: see Integrations for tool-specific setup.

            Pick one of the options below to install the binary. Claude Code users should also follow the plugin steps included in each option.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-1-build-from-source-recommended","level":3,"title":"Option 1: Build from Source (Recommended)","text":"

            Requires Go (version defined in go.mod) and Claude Code.

            git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\nmake build\nsudo make install\n

            Install the Claude Code plugin from your local clone:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace
            4. Enter the path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: It points Claude Code to the actual plugin in internal/assets/claude)
            5. Back in /plugin, select Install and choose ctx

            This points Claude Code at the plugin source on disk. Changes you make to hooks or skills take effect immediately: No reinstall is needed.

            Local Installs Need Manual Enablement

            Unlike marketplace installs, local plugin installs are not auto-enabled globally. The plugin will only work in projects that explicitly enable it. Run ctx init in each project (it auto-enables the plugin), or add the entry to ~/.claude/settings.json manually:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Verify:

            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

            Use the Source, Luke

            Building from source gives you the latest features and bug fixes.

            Since ctx is predominantly a developer tool, this is the recommended approach:

            You get the freshest code, can inspect what you are installing, and the plugin stays in sync with the binary.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-2-binary-download-marketplace","level":3,"title":"Option 2: Binary Download + Marketplace","text":"

            Pre-built binaries are available from the releases page.

            Linux (x86_64)Linux (ARM64)macOS (Apple Silicon)macOS (Intel)Windows
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64\nchmod +x ctx-0.8.1-linux-amd64\nsudo mv ctx-0.8.1-linux-amd64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-arm64\nchmod +x ctx-0.8.1-linux-arm64\nsudo mv ctx-0.8.1-linux-arm64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-arm64\nchmod +x ctx-0.8.1-darwin-arm64\nsudo mv ctx-0.8.1-darwin-arm64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-amd64\nchmod +x ctx-0.8.1-darwin-amd64\nsudo mv ctx-0.8.1-darwin-amd64 /usr/local/bin/ctx\n

            Download ctx-0.8.1-windows-amd64.exe from the releases page and add it to your PATH.

            Claude Code users: install the plugin from the marketplace:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace;
            4. Enter ActiveMemory/ctx;
            5. Back in /plugin, select Install and choose ctx.

            Other tool users: see Integrations for tool-specific setup (Cursor, Copilot, Aider, Windsurf, etc.).

            Verify the Plugin Is Enabled

            After installing, confirm the plugin is enabled globally. Check ~/.claude/settings.json for an enabledPlugins entry. If missing, run ctx init in your project (it auto-enables the plugin), or add it manually:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Verify:

            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed (Claude Code only)\n
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#verifying-checksums","level":4,"title":"Verifying Checksums","text":"

            Each binary has a corresponding .sha256 checksum file. To verify your download:

            # Download the checksum file\ncurl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64.sha256\n\n# Verify the binary\nsha256sum -c ctx-0.8.1-linux-amd64.sha256\n

            On macOS, use shasum -a 256 -c instead of sha256sum -c.

            Plugin Details

            After installation (either option) you get:

            • Context autoloading: ctx agent runs on every tool use (with cooldown)
            • Persistence nudges: reminders to capture learnings and decisions
            • Post-commit hooks: nudge context capture after git commit
            • Context size monitoring: alerts as sessions grow large
            • Project skills: /ctx-status, /ctx-task-add, /ctx-history, and more

            See Integrations for the full hook and skill reference.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#quick-start","level":2,"title":"Quick Start","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#1-initialize-context","level":3,"title":"1. Initialize Context","text":"
            cd your-project\nctx init\n

            This creates a .context/ directory with template files and an encryption key at ~/.ctx/ for the encrypted scratchpad. For Claude Code, install the ctx plugin for automatic hooks and skills.

            ctx init also scaffolds four foundation steering files in .context/steering/; these are behavioral-rule templates that tell your AI how to act on your project:

            File What it captures product.md Product context, goals, and target users tech.md Technology stack, constraints, key dependencies structure.md Project structure and directory conventions workflow.md Development workflow and process rules

            Each file starts with a self-documenting HTML comment explaining the three inclusion modes (always / auto / manual), priority, and tool scoping. The defaults are set to inclusion: always and priority: 10, so they fire on every AI tool call until you edit them.

            You should open each of these files and replace the placeholder content with your project's actual rules. Running ctx init again won't clobber your edits; existing files are left alone. To opt out entirely, use ctx init --no-steering-init.

            See Writing Steering Files for the full walkthrough, or ctx steering for the command reference.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#2-check-status","level":3,"title":"2. Check Status","text":"
            ctx status\n

            Shows context summary: files present, token estimate, and recent activity.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3-start-using-with-ai","level":3,"title":"3. Start Using with AI","text":"

            With Claude Code (and the ctx plugin installed), context loads automatically via hooks.

            With VS Code Copilot Chat, install the ctx extension and use @ctx /status, @ctx /agent, and other slash commands directly in chat. Run ctx setup copilot --write to generate .github/copilot-instructions.md for automatic context loading.

            For other tools, paste the output of:

            ctx agent --budget 8000\n
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3b-set-up-for-your-ai-tool","level":3,"title":"3B. Set Up for Your AI Tool","text":"

            If you use an MCP-compatible tool, generate the integration config with ctx setup:

            KiroCursorCline
            ctx setup kiro --write\n# Creates .kiro/settings/mcp.json and syncs steering files\n
            ctx setup cursor --write\n# Creates .cursor/mcp.json and syncs steering files\n
            ctx setup cline --write\n# Creates .vscode/mcp.json and syncs steering files\n

            This registers the ctx MCP server and syncs any steering files into the tool's native format. Re-run after adding or changing steering files.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#4-verify-it-works","level":3,"title":"4. Verify It Works","text":"

            Ask your AI: \"Do you remember?\"

            It should cite specific context: current tasks, recent decisions, or previous session topics.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#5-set-up-companion-tools-highly-recommended","level":3,"title":"5. Set Up Companion Tools (Highly Recommended)","text":"

            ctx works on its own, but two companion MCP servers unlock significantly better agent behavior. The investment is small and the benefits compound over sessions:

            • Gemini Search grounded web search with citations. Skills like /ctx-code-review and /ctx-explain use it for up-to-date documentation lookups instead of relying on training data.
            • GitNexus: code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Skills like /ctx-refactor and /ctx-code-review use it for impact analysis and dependency awareness.

            # Index your project for GitNexus (run once, then after major changes)\nnpx gitnexus analyze\n

            Both are optional MCP servers: if they are not connected, skills degrade gracefully to built-in capabilities. See Companion Tools for setup details and verification.

            Next Up:

            • Your First Session →: a step-by-step walkthrough from ctx init to verified recall
            • Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/hub/","level":1,"title":"Hub","text":"","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#sharing-is-caring","level":2,"title":"Sharing Is Caring","text":"

            ctx projects are normally independent: each project has its own .context/ directory, its own decisions, its own learnings, its own journal. That's the right default, since most work is project-local, and mixing context across projects tends to dilute more than it helps.

            But sometimes a decision or a learning should cross project boundaries. A convention you codified in one project deserves to be visible in another. A gotcha you discovered debugging service A is the same gotcha waiting for you in service B. The ctx Hub is the feature that makes those specific entries travel, without replicating everything else.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#what-the-hub-actually-is","level":2,"title":"What the Hub Actually Is","text":"

            In one paragraph: the ctx Hub is a fan-out channel for four specific kinds of structured entries: decision, learning, convention, and task. You publish an entry with ctx add --share in one project, and it appears in .context/hub/ for every other project subscribed to that type. When you run ctx agent --include-hub, those shared entries become part of your next agent context packet.

            That is the entire feature. The Hub does not:

            • Share your session journal (.context/journal/). That stays local to each project.
            • Share your scratchpad (.context/pad). Encrypted notes never leave the machine that created them.
            • Share your TASKS.md, DECISIONS.md, LEARNINGS.md, or CONVENTIONS.md wholesale. Only entries you explicitly --share cross the boundary.
            • Provide user identity or attribution. The Hub identifies projects, not people.

            If you want \"my agent in project B sees everything my agent did in project A,\" that's not the Hub. Local session density stays local.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#who-its-for","level":2,"title":"Who It's For","text":"

            Two shapes, same mechanics, different trust models.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

            One developer, many projects. You want a learning from project A to show up when you open project B a week later. You want a convention you codified in your dotfiles project to be visible everywhere else on your workstation. Run a Hub on localhost, register each project, done.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#small-trusted-team","level":3,"title":"Small Trusted Team","text":"

            A few teammates on a LAN or a hub.ctx-like self-hosted server. You want team conventions to propagate without a wiki. You want lessons from one on-call engineer's 3 AM incident to reach everyone else's agent on the next session. Same mechanics as the personal case, plus TLS in front and a short security runbook.

            The Hub is not a multi-tenant public service. It assumes everyone holding a client token is friendly. Don't stand up hub.example.com for untrusted participants.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#going-further","level":2,"title":"Going Further","text":"
            • First-time setup: Hub: Getting Started, a five-minute walkthrough on localhost.
            • Mental model and user stories: Hub Overview, what flows, what doesn't, and when not to use it.
            • Team / LAN deployment: Multi-machine setup.
            • Redundancy: HA cluster.
            • Operating a Hub: Hub Operations and Hub Failure Modes.
            • Security posture: Hub Security Model.
            • Command reference: ctx serve, ctx connect, ctx hub.
            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/is-ctx-right/","level":1,"title":"Is It Right for Me?","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#good-fit","level":2,"title":"Good Fit","text":"

            ctx shines when context matters more than code.

            If any of these sound like your project, it's worth trying:

            • Multi-session AI work: You use AI across many sessions on the same codebase, and re-explaining is slowing you down.
            • Architectural decisions that matter: Your project has non-obvious choices (database, auth strategy, API design) that the AI keeps second-guessing.
            • \"Why\" matters as much as \"what\": you need the AI to understand rationale, not just current code
            • Team handoffs: Multiple people (or multiple AI tools) work on the same project and need shared context.
            • AI-assisted development across tools: Uou switch between Claude Code, Cursor, Copilot, or other tools and want context to follow the project, not the tool.
            • Long-lived projects: Anything you'll work on for weeks or months, where accumulated knowledge has compounding value.
            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#may-not-be-the-right-fit","level":2,"title":"May Not Be the Right Fit","text":"

            ctx adds overhead that isn't worth it for every project. Be honest about when to skip it:

            • One-off scripts: If the project is a single file you'll finish today, there's nothing to remember.
            • RAG-only workflows: If retrieval from an external knowledge base already gives the agent everything it needs for each session, adding ctx may be unnecessary. RAG retrieves information; ctx defines the project's working memory: They are complementary.
            • No AI involvement: ctx is designed for human-AI workflows; without an AI consumer, the files are just documentation.
            • Enterprise-managed context platforms: If your organization provides centralized context services, ctx may duplicate that layer.

            For a deeper technical comparison with RAG, prompt management tools, and agent frameworks, see ctx and Similar Tools.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#project-size-guide","level":2,"title":"Project Size Guide","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#solo-developer-single-repo","level":3,"title":"Solo Developer, Single Repo","text":"

            This is ctx's sweet spot.

            You get the most value here: one person, one project, decisions, and learnings accumulating over time. Setup takes 5 minutes and the .context/ directory directory stays small, and every session gets faster.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#small-team-one-or-two-repos","level":3,"title":"Small Team, One or Two Repos","text":"

            Works well.

            Context files commit to git, so the whole team shares the same decisions and conventions. Each person's AI starts with the team's decisions already loaded. Merge conflicts on .context/ files are rare and easy to resolve (they are just Markdown).

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#multiple-repos-or-larger-teams","level":3,"title":"Multiple Repos or Larger Teams","text":"

            ctx operates per repository.

            Each repo has its own .context/ directory with its own decisions, tasks, and learnings. This matches the way code, ownership, and history already work in git.

            There is no built-in cross-repo context layer.

            For organizations that need centralized, organization-wide knowledge, ctx complements a platform solution by providing durable, project-local working memory for AI sessions.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#5-minute-trial","level":2,"title":"5-Minute Trial","text":"

            Zero commitment. Try it, and delete .context/ if it's not for you.

            Using Claude Code?

            Install the ctx plugin from the Marketplace for Claude-native hooks, skills, and automatic context loading:

            1. Type /plugin and press Enter
            2. Select Marketplaces → Add Marketplace
            3. Enter ActiveMemory/ctx
            4. Back in /plugin, select Install and choose ctx

            You'll still need the ctx binary for the CLI: See Getting Started for install options.

            # 1. Initialize\ncd your-project\nctx init\n\n# 2. Add one real decision from your project\nctx add decision \"Your actual architectural choice\" \\\n  --context \"What prompted this decision\" \\\n  --rationale \"Why you chose this approach\" \\\n  --consequence \"What changes as a result\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# 3. Check what the AI will see\nctx status\n\n# 4. Start an AI session and ask: \"Do you remember?\"\n

            If the AI cites your decision back to you, it's working.

            Want to remove it later? One command:

            rm -rf .context/\n

            No dependencies to uninstall. No configuration to revert. Just files.

            Ready to try it out?

            • Join the Community→: Open Source is better together.
            • Getting Started →: Full installation and setup.
            • ctx and Similar Tools →: Detailed comparison with other approaches.
            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/joining-a-project/","level":1,"title":"Joining a Project","text":"

            You've joined a team or inherited a project, and there's a .context/ directory in the repo. Good news: someone already set up persistent context. This page gets you oriented fast.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#what-to-read-first","level":2,"title":"What to Read First","text":"

            The files in .context/ have a deliberate priority order. Read them top-down:

            1. CONSTITUTION.md: Hard rules. Read this before you touch anything. These are inviolable constraints the team has agreed on.
            2. TASKS.md: Current and planned work. Shows what's in progress, what's pending, and what's blocked.
            3. CONVENTIONS.md: How the team writes code. Naming patterns, file organization, preferred idioms.
            4. ARCHITECTURE.md: System overview. Components, boundaries, data flow.
            5. DECISIONS.md: Why things are the way they are. Saves you from re-proposing something the team already evaluated and rejected.
            6. LEARNINGS.md: Gotchas, tips, and hard-won lessons. The stuff that doesn't fit anywhere else but will save you hours.

            See Context Files for detailed documentation of each file's structure and purpose.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#checking-context-health","level":2,"title":"Checking Context Health","text":"

            Before you start working, check whether the context is current:

            ctx status\n

            This shows file counts, token estimates, and recent activity. If files haven't been touched in weeks, the context may be stale.

            ctx drift\n

            This compares context files against recent code changes and flags potential drift: decisions that no longer match the codebase, conventions that have shifted, or tasks that look outdated.

            If things are stale, mention it to the team. Don't silently fix it yourself on day one.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#starting-your-first-session","level":2,"title":"Starting Your First Session","text":"

            Generate a context packet to prime your AI:

            ctx agent --budget 8000\n

            This outputs a token-budgeted summary of the project context, ordered by priority. With Claude Code and the ctx plugin, context loads automatically via hooks. You can also use the /ctx-remember skill to get a structured readback of what the AI knows.

            The readback is your verification step: if the AI can cite specific tasks and decisions, the context is working.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#adding-context","level":2,"title":"Adding Context","text":"

            As you work, you'll discover things worth recording. Use the CLI:

            # Record a decision you made or learned about\nctx add decision \"Use connection pooling for DB access\" \\\n  --rationale \"Reduces connection overhead under load\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Capture a gotcha you hit\nctx add learning \"Redis timeout defaults to 5s\" \\\n  --context \"Hit timeouts during bulk operations\" \\\n  --application \"Set explicit timeout for batch jobs\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add a convention you noticed the team follows\nctx add convention \"All API handlers return structured errors\"\n

            You can also just tell the AI: \"Record this as a learning\" or \"Add this decision to context.\" With the ctx plugin, context-update commands handle the file writes.

            See the Knowledge Capture recipe for the full workflow.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#session-etiquette","level":2,"title":"Session Etiquette","text":"

            A few norms for working in a ctx-managed project:

            • Respect existing conventions. If CONVENTIONS.md says \"use filepath.Join,\" use filepath.Join. If you disagree, propose a change, don't silently diverge.
            • Don't restructure context files without asking. The file layout and section structure are shared state. Reorganizing them affects every team member and every AI session.
            • Mark tasks done when complete. Check the box ([x]) in place. Don't move tasks between sections or delete them.
            • Add context as you go. Decisions, learnings, and conventions you discover are valuable to the next person (or the next session).
            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

            Ignoring CONSTITUTION.md. The constitution exists for a reason. If a task conflicts with a constitution rule, the task is wrong. Raise it with the team instead of working around the constraint.

            Deleting tasks. Never delete a task from TASKS.md. Mark it [x] (done) or [-] (skipped with a reason). The history matters for session replay and audit.

            Bypassing hooks. If the project uses ctx hooks (pre-commit nudges, context autoloading), don't disable them. They exist to keep context fresh. If a hook is noisy or broken, fix it or file a task.

            Over-contributing on day one. Read first, then contribute. Adding a dozen learnings before you understand the project's norms creates noise, not signal.

            Related:

            • Getting Started: installation and setup from scratch
            • Context Files: detailed file reference
            • Knowledge Capture: recording decisions, learnings, and conventions
            • Session Lifecycle: how a typical AI session flows with ctx
            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/keeping-ai-honest/","level":1,"title":"Keeping AI Honest","text":"","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-problem","level":2,"title":"The Problem","text":"

            AI agents confabulate. They invent history that never happened, claim familiarity with decisions that were never made, and sometimes declare a task complete when it is not. This is not malice - it is the default behavior of a system optimizing for plausible-sounding responses.

            When your AI says \"we decided to use Redis for caching last week,\" can you verify that? When it says \"the auth module is complete,\" can you confirm it? Without grounded, persistent context, the answer is no. You are trusting vibes.

            ctx replaces vibes with verifiable artifacts.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#grounded-memory","level":2,"title":"Grounded Memory","text":"

            Every entry in ctx context files has a timestamp and structured fields. When the AI cites a decision, you can check it.

            ## [2026-01-28-143022] Use Event Sourcing for Audit Trail\n\n**Status**: Accepted\n\n**Context**: Compliance requires full mutation history.\n\n**Decision**: Event sourcing for the audit subsystem only.\n\n**Rationale**: Append-only log meets compliance requirements\nwithout imposing event sourcing on the entire domain model.\n

            The timestamp 2026-01-28-143022 is not decoration. It is a verifiable anchor. If the AI references this decision, you can open DECISIONS.md, find the entry, and confirm it says what the AI claims. If the entry does not exist, the AI is hallucinating - and you know immediately.

            This is grounded memory: claims that trace back to artifacts you control and can audit.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#constitutionmd-hard-guardrails","level":2,"title":"CONSTITUTION.md: Hard Guardrails","text":"

            CONSTITUTION.md defines rules the AI must treat as inviolable. These are not suggestions or best practices - they are constraints that override task requirements.

            # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these,\nthe task is wrong.\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] All public API changes require a decision record\n* [ ] Never delete context files without explicit user approval\n

            The AI reads these at session start, before anything else. A well- integrated agent will refuse a task that conflicts with a constitutional rule, citing the specific rule it would violate.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-agent-playbooks-anti-hallucination-rules","level":2,"title":"The Agent Playbook's Anti-Hallucination Rules","text":"

            The AGENT_PLAYBOOK.md file includes a section called \"How to Avoid Hallucinating Memory\" with five explicit rules:

            1. Never assume. If it is not in the context files, you do not know it.
            2. Never invent history. Do not claim \"we discussed\" something without a file reference.
            3. Verify before referencing. Search files before citing them.
            4. When uncertain, say so. \"I don't see a decision on this\" is always better than a fabricated one.
            5. Trust files over intuition. If the files say PostgreSQL but your training data suggests MySQL, the files win.

            These rules create a behavioral contract. The AI is not left to guess how confident it should be - it has explicit instructions to ground every claim in the context directory.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#drift-detection","level":2,"title":"Drift Detection","text":"

            Context files can go stale. You rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist. Stale context is almost as dangerous as no context: the AI treats outdated information as current truth.

            ctx drift detects this divergence:

            ctx drift\n

            It scans context files for references to files, paths, and symbols that no longer exist in the codebase. Stale references get flagged so you can update or remove them before they mislead the next session.

            Regular drift checks - weekly, or after major refactors - keep your context files honest the same way tests keep your code honest.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-verification-loop","level":2,"title":"The Verification Loop","text":"

            The /ctx-commit skill includes a built-in verification step: before staging, it maps claims to evidence and runs self-audit questions to surface gaps. This catches inconsistencies at the point where they matter most: right before code is committed.

            This closes the loop. You write context. The AI reads context. The verification step confirms that context still matches reality. When it does not, you fix it - and the next session starts from truth, not from drift.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#trust-through-structure","level":2,"title":"Trust through Structure","text":"

            The common thread across all of these mechanisms is structure over prose. Timestamps make claims verifiable. Constitutional rules make boundaries explicit. Drift detection makes staleness visible. The playbook makes behavioral expectations concrete.

            You do not need to trust the AI. You need to trust the system -- and verify when it matters.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#further-reading","level":2,"title":"Further Reading","text":"
            • Detecting and Fixing Drift: the full workflow for keeping context files accurate
            • Invariants: the properties that must hold for any valid ctx implementation
            • Agent Security: threat model and mitigations for AI agents operating with persistent context
            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/prompting-guide/","level":1,"title":"Prompting Guide","text":"

            New to ctx?

            This guide references context files like TASKS.md, DECISIONS.md, and LEARNINGS.md:

            These are plain Markdown files that ctx maintains in your project's .context/ directory.

            If terms like \"context packet\" or \"session ceremony\" are unfamiliar,

            • start with the ctx Manifesto for the why,
            • About for the big picture,
            • then Getting Started to set up your first project.
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#literature-matters","level":2,"title":"Literature Matters","text":"

            This guide is about crafting effective prompts for working with AI assistants in ctx-enabled projects, but the guidelines given here apply to other AI systems, too.

            The right prompt triggers the right behavior.

            This guide documents prompts that reliably produce good results.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#tldr","level":2,"title":"TL;DR","text":"Goal Prompt Load context \"Do you remember?\" Resume work \"What's the current state?\" What's next /ctx-next Debug \"Why doesn't X work?\" Validate \"Is this consistent with our decisions?\" Impact analysis \"What would break if we...\" Reflect /ctx-reflect Wrap up /ctx-wrap-up Persist \"Add this as a learning\" Explore \"How does X work in this codebase?\" Sanity check \"Is this the right approach?\" Completeness \"What am I missing?\" One more thing \"What's the single smartest addition?\" Set tone \"Push back if my assumptions are wrong.\" Constrain scope \"Only change files in X. Nothing else.\" Course correct \"Stop. That's not what I meant.\" Check health \"Run ctx drift\" Commit /ctx-commit","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#session-start","level":2,"title":"Session Start","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#do-you-remember","level":3,"title":"\"do you remember?\"","text":"

            Triggers the AI to silently read TASKS.md, DECISIONS.md, LEARNINGS.md, and check recent history via ctx journal before responding with a structured readback:

            1. Last session: most recent session topic and date
            2. Active work: pending or in-progress tasks
            3. Recent context: 1-2 recent decisions or learnings
            4. Next step: offer to continue or ask what to focus on

            Use this at the start of every important session.

            Do you remember what we were working on?\n

            This question implies prior context exists. The AI checks files rather than admitting ignorance. The expected response cites specific context (session names, task counts, decisions), not vague summaries.

            If the AI instead narrates its discovery process (\"Let me check if there are files...\"), it has not loaded CLAUDE.md or AGENT_PLAYBOOK.md properly.

            For a detailed case study on making agents actually follow this protocol (including the failure modes, the timing problem, and the hook design that solved it) see The Dog Ate My Homework.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#whats-the-current-state","level":3,"title":"\"What's the Current State?\"","text":"

            Prompts reading of TASKS.md, recent sessions, and status overview.

            Use this when resuming work after a break.

            Variants:

            • \"Where did we leave off?\"
            • \"What's in progress?\"
            • \"Show me the open tasks.\"
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#during-work","level":2,"title":"During Work","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-doesnt-x-work","level":3,"title":"\"Why Doesn't X Work?\"","text":"

            This triggers root cause analysis rather than surface-level fixes.

            Use this when something fails unexpectedly.

            Framing as \"why\" encourages investigation before action. The AI will trace through code, check configurations, and identify the actual cause.

            Real Example

            \"Why can't I run /ctx-reflect?\" led to discovering missing permissions in settings.local.json bootstrapping.

            This was a fix that benefited all users of ctx.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-consistent-with-our-decisions","level":3,"title":"\"Is This Consistent with Our Decisions?\"","text":"

            This prompts checking DECISIONS.md before implementing.

            Use this before making architectural choices.

            Variants:

            • \"Check if we've decided on this before\"
            • \"Does this align with our conventions?\"
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-would-break-if-we","level":3,"title":"\"What Would Break If We...\"","text":"

            This triggers defensive thinking and impact analysis.

            Use this before making significant changes.

            What would break if we change the Settings struct?\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#before-you-start-read-x","level":3,"title":"\"Before You Start, Read X\"","text":"

            This ensures specific context is loaded before work begins.

            Use this when you know the relevant context exists in a specific file.

            Before you start, check ctx journal source for the auth discussion session\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-control","level":3,"title":"Scope Control","text":"

            Constrain the AI to prevent sprawl. These are some of the most useful prompts in day-to-day work.

            Only change files in internal/cli/add/. Nothing else.\n
            No new files. Modify the existing implementation.\n
            Keep the public API unchanged. Internal refactor only.\n

            Use these when the AI tends to \"helpfully\" modify adjacent code, add documentation you didn't ask for, or create new abstractions.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#course-correction","level":3,"title":"Course Correction","text":"

            Steer the AI when it goes off-track: Don't wait for it to finish a wrong approach.

            Stop! That's not what I meant. Let me clarify.\n
            Let's step back. Explain what you're about to do before changing anything.\n
            Undo that last change and try a different approach.\n

            These work because they interrupt momentum.

            Without explicit course correction, the AI tends to commit harder to a wrong path rather than reconsidering.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#failure-modes","level":3,"title":"Failure Modes","text":"

            When the AI misbehaves, match the symptom to the recovery prompt:

            Symptom Recovery prompt Hand-waves (\"should work now\") \"Show evidence: file/line refs, command output, or test name.\" Creates unnecessary files \"No new files. Modify the existing implementation.\" Expands scope unprompted \"Stop after the smallest working change. Ask before expanding scope.\" Narrates instead of acting \"Skip the explanation. Make the change and show the diff.\" Repeats a failed approach \"That didn't work last time. Try a different approach.\" Claims completion without proof \"Run the test. Show me the output.\"

            These are recovery handles, not rules to paste into CLAUDE.md.

            Use them in the moment when you see the behavior.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reflection-and-persistence","level":2,"title":"Reflection and Persistence","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-did-we-learn","level":3,"title":"\"What Did We Learn?\"","text":"

            This prompts reflection on the session and often triggers adding learnings to LEARNINGS.md.

            Use this after completing a task or debugging session.

            This is an explicit reflection prompt. The AI will summarize insights and often offer to persist them.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#add-this-as-a-learningdecision","level":3,"title":"\"Add This as a Learning/decision\"","text":"

            This is an explicit persistence request.

            Use this when you have discovered something worth remembering.

            Add this as a learning: \"JSON marshal escapes angle brackets by default\"\n\n# or simply.\nAdd this as a learning.\n# and let the AI autonomously infer and summarize.\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#save-context-before-we-end","level":3,"title":"\"Save Context Before We End\"","text":"

            This triggers context persistence before the session closes.

            Use it at the end of the session or before switching topics.

            Variants:

            • \"Let's persist what we did\"
            • \"Update the context files\"
            • /ctx-wrap-up:the recommended end-of-session ceremony (see Session Ceremonies)
            • /ctx-reflect: mid-session reflection checkpoint
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#exploration-and-research","level":2,"title":"Exploration and Research","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-the-codebase-for-x","level":3,"title":"\"Explore the Codebase for X\"","text":"

            This triggers thorough codebase search rather than guessing.

            Use this when you need to understand how something works.

            This works because \"Explore\" signals that investigation is needed, not immediate action.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#how-does-x-work-in-this-codebase","level":3,"title":"\"How Does X Work in This Codebase?\"","text":"

            This prompts reading actual code rather than explaining general concepts.

            Use this to understand the existing implementation.

            How does session saving work in this codebase?\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#find-all-places-where-x","level":3,"title":"\"Find All Places Where X\"","text":"

            This triggers a comprehensive search across the codebase.

            Use this before refactoring or understanding the impact.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#meta-and-process","level":2,"title":"Meta and Process","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-should-we-document-from-this","level":3,"title":"\"What Should We Document from This?\"","text":"

            This prompts identifying learnings, decisions, and conventions worth persisting.

            Use this after complex discussions or implementations.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-the-right-approach","level":3,"title":"\"Is This the Right Approach?\"","text":"

            This invites the AI to challenge the current direction.

            Use this when you want a sanity check.

            This works because it allows AI to disagree.

            AIs often default to agreeing; this prompt signals you want an honest assessment.

            Stronger variant: \"Push back if my assumptions are wrong.\" This sets the tone for the entire session: The AI will flag questionable choices proactively instead of waiting to be asked.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-am-i-missing","level":3,"title":"\"What Am I Missing?\"","text":"

            This prompts thinking about edge cases, overlooked requirements, or unconsidered approaches.

            Use this before finalizing a design or implementation.

            Forward-looking variant: \"What's the single smartest addition you could make to this at this point?\" Use this after you think you're done: It surfaces improvements you wouldn't have thought to ask for. The constraint to one thing prevents feature sprawl.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#cli-commands-as-prompts","level":2,"title":"CLI Commands as Prompts","text":"

            Asking the AI to run ctx commands is itself a prompt. These load context or trigger specific behaviors:

            Command What it does \"Run ctx status\" Shows context summary, file presence, staleness \"Run ctx agent\" Loads token-budgeted context packet \"Run ctx drift\" Detects dead paths, stale files, missing context","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ctx-skills","level":3,"title":"ctx Skills","text":"

            The SKILS.md Standard

            Skills are formalized prompts stored as SKILL.md files.

            The /slash-command syntax below is Claude Code specific.

            Other agents can use the same skill files, but invocation may differ.

            Use ctx skills by name:

            Skill When to use /ctx-status Quick context summary /ctx-agent Load full context packet /ctx-remember Recall project context and structured readback /ctx-wrap-up End-of-session context persistence /ctx-history Browse session history for past discussions /ctx-reflect Structured reflection checkpoint /ctx-next Suggest what to work on next /ctx-commit Commit with context persistence /ctx-drift Detect and fix context drift /ctx-implement Execute a plan step-by-step with verification /ctx-loop Generate autonomous loop script /ctx-pad Manage encrypted scratchpad /ctx-archive Archive completed tasks /check-links Audit docs for dead links

            Ceremony vs. Workflow Skills

            Most skills work conversationally: \"what should we work on?\" triggers /ctx-next, \"save that as a learning\" triggers /ctx-learning-add. Natural language is the recommended approach.

            Two skills are the exception: /ctx-remember and /ctx-wrap-up are ceremony skills for session boundaries: Invoke them as explicit slash commands: conversational triggers risk partial execution. See Session Ceremonies.

            Skills combine a prompt, tool permissions, and domain knowledge into a single invocation.

            Skills beyond Claude Code

            The /slash-command syntax above is Claude Code native, but the underlying SKILL.md files are a standard markdown format that any agent can consume. If you use a different coding agent, consult its documentation for how to load skill files as prompt templates.

            See Integrations for setup details.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#anti-patterns","level":2,"title":"Anti-Patterns","text":"

            Based on our ctx development experience (i.e., \"sipping our own champagne\") so far, here are some prompts that tend to produce poor results:

            Prompt Problem Better Alternative \"Fix this\" Too vague, may patch symptoms \"Why is this failing?\" \"Make it work\" Encourages quick hacks \"What's the right way to solve this?\" \"Just do it\" Skips planning \"Plan this, then implement\" \"You should remember\" Confrontational \"Do you remember?\" \"Obviously...\" Discourages questions State the requirement directly \"Idiomatic X\" Triggers language priors \"Follow project conventions\" \"Implement everything\" No phasing, sprawl risk Break into tasks, implement one at a time \"You should know this\" Assumes context is loaded \"Before you start, read X\"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reliability-checklist","level":2,"title":"Reliability Checklist","text":"

            Before sending a non-trivial prompt, check these four elements. This is the guide's DNA in one screenful.

            1. Goal in one sentence: What does \"done\" look like?
            2. Files to read: What existing code or context should the AI review before acting?
            3. Verification command: How will you prove it worked? (test name, CLI command, expected output)
            4. Scope boundary: What should the AI not touch?

            A prompt that covers all four is almost always good enough.

            A prompt missing #3 is how you get \"should work now\" without evidence.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#safety-invariants","level":2,"title":"Safety Invariants","text":"

            These Are Invariants: Not Suggestions

            A prompting guide earns its trust by being honest about risk.

            These four rules mentioned below don't change with model versions, agent frameworks, or project size.

            Build them into your workflow once and stop thinking about them.

            Tool-using agents can read files, run commands, and modify your codebase. That power makes them useful. It also creates a trust boundary you should be aware of.

            These invariants apply regardless of which agent or model you use.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#treat-the-repository-text-as-untrusted-input","level":3,"title":"Treat the Repository Text as \"Untrusted Input\"","text":"

            Issue descriptions, PR comments, commit messages, documentation, and even code comments can contain text that looks like instructions. An agent that reads a GitHub issue and then runs a command found inside it is executing untrusted input.

            The rule: Before running any command the agent found in repo text (issues, docs, comments), restate the command explicitly and confirm it does what you expect. Don't let the agent copy-paste from untrusted sources into a shell.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ask-before-destructive-operations","level":3,"title":"Ask Before Destructive Operations","text":"

            git push --force, rm -rf, DROP TABLE, docker system prune: these are irreversible or hard to reverse. A good agent should pause before running them, but don't rely on that.

            The rule: For any operation that deletes data, overwrites history, or affects shared infrastructure, require explicit confirmation. If the agent runs something destructive without asking, that's a course-correction moment: \"Stop. Never run destructive commands without asking first.\"

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-the-blast-radius","level":3,"title":"Scope the Blast Radius","text":"

            An agent told to \"fix the tests\" might modify test fixtures, change assertions, or delete tests that inconveniently fail. An agent told to \"deploy\" might push to production. Broad mandates create broad risk.

            The rule: Constrain scope before starting work. The Reliability Checklist's scope boundary (#4) is your primary safety lever. When in doubt, err on the side of a tighter boundary.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#secrets-never-belong-in-context","level":3,"title":"Secrets Never Belong in Context","text":"

            LEARNINGS.md, DECISIONS.md, and session transcripts are plain-text files that may be committed to version control.

            Don't persist API keys, passwords, tokens, or credentials in context files.

            The rule: If the agent encounters a secret during work, it should use it transiently (environment variable, an alias to the secret instead of the actual secret, etc.) and never write it to a context file.

            Any Secret Seen IS Exposed

            If you see a secret in a context file, remove it immediately and rotate the credential.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-plan-implement","level":2,"title":"Explore → Plan → Implement","text":"

            For non-trivial work, name the phase you want:

            Explore src/auth and summarize the current flow.\nThen propose a plan. After I approve, implement with tests.\n

            This prevents the AI from jumping straight to code.

            The three phases map to different modes of thinking:

            • Explore: read, search, understand: no changes
            • Plan: propose approach, trade-offs, scope: no changes
            • Implement: write code, run tests, verify: changes

            Small fixes skip straight to implement. Complex or uncertain work benefits from all three.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#prompts-by-task-type","level":2,"title":"Prompts by Task Type","text":"

            Different tasks need different prompt structures. The pattern: symptom + location + verification.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#bugfix","level":3,"title":"Bugfix","text":"
            Users report search returns empty results for queries with hyphens.\nReproduce in src/search/. Write a failing test for \"foo-bar\",\nfix the root cause, run: go test ./internal/search/...\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#refactor","level":3,"title":"Refactor","text":"
            Inspect src/auth/ and list duplication hotspots.\nPropose a refactor plan scoped to one module.\nAfter approval, remove duplication without changing behavior.\nAdd a test if coverage is missing. Run: make audit\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#research","level":3,"title":"Research","text":"
            Explore the request flow around src/api/.\nSummarize likely bottlenecks with evidence.\nPropose 2-3 hypotheses. Do not implement yet.\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#docs","level":3,"title":"Docs","text":"
            Update docs/cli-reference.md to reflect the new --format flag.\nConfirm the flag exists in the code and the example works.\n

            Notice each prompt includes what to verify and how. Without that, you get a \"should work now\" instead of evidence.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#writing-tasks-as-prompts","level":2,"title":"Writing Tasks as Prompts","text":"

            Tasks in TASKS.md are indirect prompts to the AI. How you write them shapes how the AI approaches the work.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-motivation-not-just-the-goal","level":3,"title":"State the Motivation, Not Just the Goal","text":"

            Tell the AI why you are building something, not just what.

            Bad: \"Build a calendar view.\"

            Good: \"Build a calendar view. The motivation is that all notes and tasks we build later should be viewable here.\"

            The second version lets the AI anticipate downstream requirements:

            It will design the calendar's data model to be compatible with future features: Without you having to spell out every integration point. Motivation turns a one-off task into a directional task.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-deliverable-not-just-steps","level":3,"title":"State the Deliverable, Not Just Steps","text":"

            Bad task (implementation-focused):

            - [ ] T1.1.0: Parser system\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

            The AI may complete all subtasks but miss the actual goal. What does \"Parser system\" deliver to the user?

            Good task (deliverable-focused):

            - [ ] T1.1.0: Parser CLI command\n  **Deliverable**: `ctx journal source` command that shows parsed sessions\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

            Now the AI knows the subtasks serve a specific user-facing deliverable.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#use-acceptance-criteria","level":3,"title":"Use Acceptance Criteria","text":"

            For complex tasks, add explicit \"done when\" criteria:

            - [ ] T2.0: Authentication system\n  **Done when**:\n  - [ ] User can register with email\n  - [ ] User can log in and get a token\n  - [ ] Protected routes reject unauthenticated requests\n

            This prevents premature \"task complete\" when only the implementation details are done, but the feature doesn't actually work.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#subtasks-parent-task","level":3,"title":"Subtasks ≠ Parent Task","text":"

            Completing all subtasks does not mean the parent task is complete.

            The parent task describes what the user gets.

            Subtasks describe how to build it.

            Always re-read the parent task description before marking it complete. Verify the stated deliverable exists and works.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-do-these-approaches-work","level":2,"title":"Why Do These Approaches Work?","text":"

            The patterns in this guide aren't invented here: They are practitioner translations of well-established, peer-reviewed research, most of which predate the current AI (hype) wave.

            The underlying ideas come from decades of work in machine learning, cognitive science, and numerical optimization. For a concrete case study showing how these principles play out when an agent decides whether to follow instructions (attention competition, optimization toward least-resistance paths, and observable compliance as a design goal) see The Dog Ate My Homework.

            Phased work (\"Explore → Plan → Implement\") applies chain-of-thought reasoning: Decomposing a problem into sequential steps before acting. Forcing intermediate reasoning steps measurably improves output quality in language models, just as it does in human problem-solving. Wei et al., Chain-of-Thought Prompting Elicits Reasoning in Large Language Models (2022).

            Root-cause prompts (\"Why doesn't X work?\") use step-back abstraction: Retreating to a higher-level question before diving into specifics. This mirrors how experienced engineers debug: they ask \"what should happen?\" before asking \"what went wrong?\" Zheng et al., Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models (2023).

            Exploring alternatives (\"Propose 2-3 approaches\") leverages self-consistency: Generating multiple independent reasoning paths and selecting the most coherent result. The idea traces back to ensemble methods in ML: A committee of diverse solutions outperforms any single one. Wang et al., Self-Consistency Improves Chain of Thought Reasoning in Language Models (2022).

            Impact analysis (\"What would break if we...\") is a form of tree-structured exploration: Branching into multiple consequence paths before committing. This is the same principle behind game-tree search (minimax, MCTS) that has powered decision-making systems since the 1950s. Yao et al., Tree of Thoughts: Deliberate Problem Solving with Large Language Models (2023).

            Motivation prompting (\"Build X because Y\") works through goal conditioning: Providing the objective function alongside the task. In optimization terms, you are giving the gradient direction, not just the loss. The model can make locally coherent decisions that serve the global objective because it knows what \"better\" means.

            Scope constraints (\"Only change files in X\") apply constrained optimization: Bounding the search space to prevent divergence. This is the same principle behind regularization in ML: Without boundaries, powerful optimizers find solutions that technically satisfy the objective but are practically useless.

            CLI commands as prompts (\"Run ctx status\") interleave reasoning with acting: The model thinks, acts on external tools, observes results, then thinks again. Grounding reasoning in real tool output reduces hallucination because the model can't ignore evidence it just retrieved. Yao et al., ReAct: Synergizing Reasoning and Acting in Language Models (2022).

            Task decomposition (\"Prompts by Task Type\") applies least-to-most prompting: Breaking a complex problem into subproblems and solving them sequentially, each building on the last. This is the research version of \"plan, then implement one slice.\" Zhou et al., Least-to-Most Prompting Enables Complex Reasoning in Large Language Models (2022).

            Explicit planning (\"Explore → Plan → Implement\") is directly supported by plan-and-solve prompting, which addresses missing-step failures in zero-shot reasoning by extracting a plan before executing. The phased structure prevents the model from jumping to code before understanding the problem. Wang et al., Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models (2023).

            Session reflection (\"What did we learn?\", /ctx-reflect) is a form of verbal reinforcement learning: Improving future performance by persisting linguistic feedback as memory rather than updating weights. This is exactly what LEARNINGS.md and DECISIONS.md provide: a durable feedback signal across sessions. Shinn et al., Reflexion: Language Agents with Verbal Reinforcement Learning (2023).

            These aren't prompting \"hacks\" that you will find in the \"1000 AI Prompts for the Curious\" listicles: They are applications of foundational principles:

            • Decomposition,
            • Abstraction,
            • Ensemble Reasoning,
            • Search,
            • and Constrained Optimization.

            They work because language models are, at their core, optimization systems navigating probabilistic landscapes.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#further-reading","level":2,"title":"Further Reading","text":"
            • The Attention Budget: Why your AI forgets what you just told it, and how token budgets shape context strategy
            • The Dog Ate My Homework: A case study in making agents follow instructions: attention timing, delegation decay, and observable compliance as a design goal
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#contributing","level":2,"title":"Contributing","text":"

            Found a prompt that works well? Open an issue or PR with:

            1. The prompt text;
            2. What behavior it triggers;
            3. When to use it;
            4. Why it works (optional but helpful).

            Dive Deeper:

            • Recipes: targeted how-to guides for specific tasks
            • CLI Reference: all commands and flags
            • Integrations: setup for Claude Code, Cursor, Aider
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/repeated-mistakes/","level":1,"title":"My AI Keeps Making the Same Mistakes","text":"","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-problem","level":2,"title":"The Problem","text":"

            You found a bug last Tuesday. You debugged it, understood the root cause, and moved on. Today, a new session hits the exact same bug. The AI rediscovers it from scratch, burning twenty minutes on something you already solved.

            Worse: you spent an hour last week evaluating two database migration strategies, picked one, documented why in a comment somewhere, and now the AI is cheerfully suggesting the approach you rejected. Again.

            This is not a model problem. It is a memory problem. Without persistent context, every session starts with amnesia.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#how-ctx-stops-the-loop","level":2,"title":"How ctx Stops the Loop","text":"

            ctx gives your AI three files that directly prevent repeated mistakes, each targeting a different failure mode.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#decisionsmd-stop-relitigating-settled-choices","level":3,"title":"DECISIONS.md: Stop Relitigating Settled Choices","text":"

            When you make an architectural decision, record it with rationale and rejected alternatives. The AI reads this at session start and treats it as settled.

            ## [2026-02-12] Use JWT for Authentication\n\n**Status**: Accepted\n\n**Context**: Need stateless auth for the API layer.\n\n**Decision**: JWT with short-lived access tokens and refresh rotation.\n\n**Rationale**: Stateless, scales horizontally, team has prior experience.\n\n**Alternatives Considered**:\n- Session-based auth: Rejected. Requires sticky sessions or shared store.\n- API keys only: Rejected. No user identity, no expiry rotation.\n

            Next session, when the AI considers auth, it reads this entry and builds on the decision instead of re-debating it. If someone asks \"why not sessions?\", the rationale is already there.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#learningsmd-capture-gotchas-once","level":3,"title":"LEARNINGS.md: Capture Gotchas Once","text":"

            Learnings are the bugs, quirks, and non-obvious behaviors that cost you time the first time around. Write them down so they cost you zero time the second time.

            ## Build\n\n### CGO Required for SQLite on Alpine\n\n**Discovered**: 2026-01-20\n\n**Context**: Docker build failed silently with \"no such table\" at runtime.\n\n**Lesson**: The go-sqlite3 driver requires CGO_ENABLED=1 and gcc\ninstalled in the build stage. Alpine needs apk add build-base.\n\n**Application**: Always use the golang:alpine image with build-base\nfor SQLite builds. Never set CGO_ENABLED=0.\n

            Without this entry, the next session that touches the Dockerfile will hit the same wall. With it, the AI knows before it starts.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#constitutionmd-draw-hard-lines","level":3,"title":"CONSTITUTION.md: Draw Hard Lines","text":"

            Some mistakes are not about forgetting - they are about boundaries the AI should never cross. CONSTITUTION.md sets inviolable rules.

            * [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never disable security linters without a documented exception\n* [ ] All database migrations must be reversible\n

            The AI reads these as absolute constraints. It does not weigh them against convenience. It refuses tasks that would violate them.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-accumulation-effect","level":2,"title":"The Accumulation Effect","text":"

            Each of these files grows over time. Session one captures two decisions. Session five adds a tricky learning about timezone handling. Session twelve records a convention about error message formatting.

            By session twenty, your AI has a knowledge base that no single person carries in their head. New team members - human or AI - inherit it instantly.

            The key insight: you are not just coding. You are building a knowledge layer that makes every future session faster.

            ctx files version with your code in git. They survive branch switches, team changes, and model upgrades. The context outlives any single session.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#getting-started","level":2,"title":"Getting Started","text":"

            Capture your first decision or learning right now:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a relational database for the project\" \\\n  --rationale \"Team expertise, JSONB support, mature ecosystem\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\nctx add learning \"Vitest mock hoisting\" \\\n  --context \"Tests failing intermittently\" \\\n  --lesson \"vi.mock() must be at file top level\" \\\n  --application \"Use vi.doMock() for dynamic mocks\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#further-reading","level":2,"title":"Further Reading","text":"
            • Knowledge Capture: the full workflow for persisting decisions, learnings, and conventions
            • Context Files Reference: structure and format for every file in .context/
            • About ctx: the bigger picture - why persistent context changes how you work with AI
            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/steering/","level":1,"title":"Steering Files","text":"","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#steering-files","level":2,"title":"Steering Files","text":"

            ctx projects talk to AI assistants through several layers (context files, decisions, conventions, the agent context packet) but none of those can tell the assistant how to behave when a specific kind of prompt arrives. That's what steering files are for.

            A steering file is a small markdown document with YAML frontmatter that says: \"when the user asks about X, prepend these rules to the prompt.\" ctx manages those files in .context/steering/, decides which ones match each prompt, and syncs them out to each AI tool's native config (Claude Code, Cursor, Kiro, Cline) so the rules actually land in the prompt pipeline.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#not-the-same-as-decisions-or-conventions","level":2,"title":"Not the Same as Decisions or Conventions","text":"

            The three look similar on disk but serve different purposes:

            Kind Purpose Decisions (DECISIONS.md) What was chosen and why Conventions (CONVENTIONS.md) How the codebase is written Steering (.context/steering/*.md) How the AI should behave on matching prompts

            If you find yourself writing \"the AI should always do X when asked about Y,\" that belongs in steering, not decisions.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#your-first-steering-files","level":2,"title":"Your First Steering Files","text":"

            ctx init scaffolds four foundation steering files in .context/steering/ so you start with something to edit rather than an empty directory:

            File What to fill in product.md What the project is, who it's for, what's out of scope tech.md Languages, frameworks, runtime, hard constraints structure.md Directory layout, where new files go, naming rules workflow.md Branch strategy, commit conventions, pre-commit checks

            Each file starts with an inline HTML comment explaining the three inclusion modes, priority semantics, and tool scoping. The comment is invisible in rendered markdown but visible when you open the file to edit it; it's self-documenting scaffolding, not forever guidance. Delete the comment once you've customized the file.

            Default settings for foundation files:

            • inclusion: always: fires on every AI tool call
            • priority: 10: injected near the top of the prompt
            • tools: []: applies to every configured AI tool

            You should open each of these files and replace the placeholder content with your project's actual rules. Re-running ctx init is safe: existing files are left alone, so your edits survive. Use ctx init --no-steering-init to opt out of the scaffold entirely.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#inclusion-modes","level":2,"title":"Inclusion Modes","text":"

            Each steering file declares an inclusion mode in its frontmatter:

            Mode When the file is included always Every prompt, unconditionally auto When the prompt keywords match the file's description manual Only when the user explicitly names the file

            Which mode to pick depends on the AI tool you use, because the two tool families consume steering very differently.

            Claude Code and Codex: prefer inclusion: always for rules that must fire reliably. These tools have two delivery channels:

            1. The plugin's PreToolUse hook runs ctx agent with an empty prompt, so only always files match and get injected automatically on every tool call.
            2. The ctx_steering_get MCP tool, registered automatically when the ctx plugin is installed. Claude can call this tool mid-task to fetch auto or manual files matching a specific prompt. Verify with claude mcp list; look for ctx: ✓ Connected.

            Use always for invariants and anything that must fire every session. Use auto for situational rules where \"Claude fetches this when the prompt is relevant\" is the right behavior; those still land, just on Claude's judgment. Use manual for reference libraries you'll name explicitly.

            Cursor, Cline, Kiro: auto is the natural default. These tools read .cursor/rules/, .clinerules/, or .kiro/steering/ natively and resolve the description match on their own, so auto files fire when the prompt matches. manual files load on explicit invocation. always still works but consumes context budget on every turn.

            Mixed setups: if a rule must fire on Claude Code, pick always, even if it's overkill for your Cursor setup. The context budget cost is small; the alternative (silently not firing) is worse.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-families-of-ai-tools-two-delivery-paths","level":2,"title":"Two Families of AI Tools, Two Delivery Paths","text":"

            Not every AI tool consumes steering the same way. ctx handles two tool families differently, and it's worth knowing which family your editor is in before you wonder why a rule isn't firing.

            Native-rules tools (Cursor, Cline, Kiro) have a built-in rules primitive. They read a specific directory (.cursor/rules/, .clinerules/, .kiro/steering/) and apply the rules they find there. ctx handles these via ctx steering sync, which exports your files into the tool-native format. Run sync whenever you edit a steering file.

            Hook + MCP tools (Claude Code, Codex) have no native rules primitive, so ctx steering sync is a no-op for them. Instead, ctx delivers steering through two non-sync channels:

            1. Automatic injection via a PreToolUse hook. The ctx setup claude-code plugin wires a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them by the active prompt, and includes matching bodies in the context packet it prints. Claude Code feeds that output back into its context. Every tool call, automatically.
            2. On-demand via the ctx_steering_get MCP tool. The ctx MCP server exposes a tool Claude can call mid-task to fetch matching steering files for a specific prompt. Claude decides when to call it; it's not automatic.

            Both channels activate when you run ctx setup claude-code --write. After that, steering just works for Claude Code.

            Practical takeaway:

            • Using Cursor/Cline/Kiro only? Run ctx steering sync after edits.
            • Using Claude Code or Codex only? Never run sync; the hook+MCP pipeline handles it.
            • Using both? Run sync for the native-rules tools; the hook+MCP pipeline covers Claude Code automatically.
            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-shapes-of-automation-rules-and-scripts","level":2,"title":"Two Shapes of Automation: Rules and Scripts","text":"

            Steering is one of two hook-like layers ctx provides for customizing AI behavior. They're complementary:

            • Steering: persistent rules that get prepended to prompts. Declarative, text-only, scored by match.
            • Triggers: executable shell scripts that fire at lifecycle events. Imperative, runs arbitrary code, gated by exit codes.

            Pick steering when you want \"always remind the AI of X.\" Pick triggers when you want \"do Y when event Z happens.\" They can coexist; many projects use both.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Writing Steering Files: a six-step walkthrough: scaffold, write the rule, preview matches, list, get-rules-in-front-of-the-AI (two paths depending on tool family), verify.
            • ctx steering reference: full command, flag, and frontmatter reference; includes the per-tool delivery-mechanism table and a dedicated section on how Claude Code and Codex consume steering.
            • ctx setup: configure which AI tools receive steering. For Cursor/Cline/Kiro this is about sync targets; for Claude Code/Codex it installs the plugin that wires the PreToolUse hook and MCP server.
            • Lifecycle Triggers: the imperative companion to steering files.
            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/triggers/","level":1,"title":"Lifecycle Triggers","text":"","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#lifecycle-triggers","level":2,"title":"Lifecycle Triggers","text":"

            Some things can't be expressed as a rule you want the AI to follow. Sometimes you want something to happen: block a dangerous tool call, inject today's standup notes into the next session, log every file save to a journal. That's what triggers are for.

            A trigger is an executable shell script that ctx runs at a specific lifecycle event: the start of a session, before a tool call, when a file is saved, and so on. Triggers read a JSON payload from stdin, do whatever they need, and write a JSON response on stdout. They can allow, block, or inject context into the pipeline depending on the event type.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#trigger-types","level":2,"title":"Trigger Types","text":"Type Fires when Use case session-start A new AI session begins Inject rotating context, standup notes session-end An AI session ends Persist summaries, send notifications pre-tool-use Before a tool call executes Block, gate, or audit post-tool-use After a tool call completes Log, react, post-process file-save A file is saved Lint on save, update indices context-add A new entry is added to .context/ Cross-link, notify, enrich","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-are-arbitrary-code-treat-them-like-pre-commit-hooks","level":2,"title":"Triggers Are Arbitrary Code: Treat Them like Pre-Commit Hooks","text":"

            Only Enable Scripts You've Read and Understand

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            ctx trigger add intentionally creates new scripts disabled (no executable bit). You must ctx trigger enable <name> after reviewing the contents. That's not a suggestion; it's the security model.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#three-hook-like-layers-in-ctx","level":2,"title":"Three Hook-like Layers in ctx","text":"

            Triggers are one of three distinct hook-like concepts in ctx. The names are similar but the owners and use cases are not:

            Layer Owned by Where they live When to use ctx trigger You .context/hooks/<type>/*.sh Project-specific automation, any AI tool ctx system hooks ctx itself built-in, wired into tool configs Built-in nudges (you don't author these) Claude Code hooks Claude Code .claude/settings.local.json Claude-Code-only tool-specific integration

            This page is about the first category. The other two run automatically and are invisible to you.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-vs-steering-same-problem-different-shape","level":2,"title":"Triggers vs Steering: Same Problem, Different Shape","text":"

            Triggers are the imperative counterpart to steering files. Steering expresses persistent rules the AI reads before each prompt; triggers express side effects that run on lifecycle events. They're complementary, not competing:

            • Want the AI to remember something? → Steering.
            • Want a script to run when something happens? → Trigger.

            Most projects use both.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Authoring Lifecycle Triggers: walkthrough with security guidance: scaffold, test, enable, iterate.
            • ctx trigger reference: command reference, trigger type table, input/output contract.
            • Steering files: the declarative counterpart to triggers.
            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"operations/","level":1,"title":"Operations","text":"

            Guides for installing, upgrading, integrating, and running ctx. Split into three groups by audience.

            ","path":["Operations"],"tags":[]},{"location":"operations/#day-to-day","level":2,"title":"Day-to-Day","text":"

            Everyday operation guides for anyone running ctx in a project or adopting it in a team.

            ","path":["Operations"],"tags":[]},{"location":"operations/#integration","level":3,"title":"Integration","text":"

            Adopt ctx in an existing project: initialize context files, migrate from other tools, and onboard team members.

            ","path":["Operations"],"tags":[]},{"location":"operations/#upgrade","level":3,"title":"Upgrade","text":"

            Upgrade between versions with step-by-step migration notes and breaking-change guidance.

            ","path":["Operations"],"tags":[]},{"location":"operations/#ai-tools","level":3,"title":"AI Tools","text":"

            Configure ctx with Claude Code, Cursor, Aider, Copilot, Windsurf, and other AI coding tools.

            ","path":["Operations"],"tags":[]},{"location":"operations/#autonomous-loops","level":3,"title":"Autonomous Loops","text":"

            Run an unattended AI agent that works through tasks overnight, with ctx providing persistent memory between iterations.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub","level":2,"title":"Hub","text":"

            Operator guides for running a ctx Hub, the gRPC server that fans out structured entries across projects. If you're a client connecting to a Hub someone else runs, see ctx connect and the Hub recipes instead.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub-operations","level":3,"title":"Hub Operations","text":"

            Data directory layout, daemon management, systemd unit, backup and restore, log rotation, monitoring, and upgrades.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub-failure-modes","level":3,"title":"Hub Failure Modes","text":"

            What can go wrong in network, storage, cluster, auth, and clock layers, and what you should do about each one. Includes the short-list table oncall engineers will want bookmarked.

            ","path":["Operations"],"tags":[]},{"location":"operations/#maintainers","level":2,"title":"Maintainers","text":"

            Runbooks for people shipping ctx itself.

            ","path":["Operations"],"tags":[]},{"location":"operations/#cutting-a-release","level":3,"title":"Cutting a Release","text":"

            Step-by-step runbook for maintainers: bump version, generate release notes, run the release script, and verify the result.

            ","path":["Operations"],"tags":[]},{"location":"operations/#runbooks","level":2,"title":"Runbooks","text":"

            Step-by-step procedures you run with your agent. Each runbook includes a prompt to paste into a Claude Code session and guidance on triaging the results.

            Runbook Purpose When to run Release checklist Full pre-release sequence Before every release Plugin release Plugin-specific release steps Plugin changes ship Breaking migration Guide users across breaking changes Releases with renames Hub deployment Set up a ctx Hub end-to-end First-time hub setup New contributor Onboarding: clone to first session New contributors Codebase audit AST audits, magic strings, dead code, doc alignment Before release, quarterly Docs semantic audit Narrative gaps, weak pages, structural problems Before release, after adding pages Sanitize permissions Clean .claude/settings.local.json of over-broad grants After heavy permission granting Architecture exploration Systematic architecture docs across repos New codebase onboarding, reviews

            Recommended cadence:

            • Before every release: release checklist (which includes codebase audit + docs semantic audit)
            • Monthly: sanitize permissions
            • Quarterly: full sweep of all audit runbooks
            ","path":["Operations"],"tags":[]},{"location":"operations/autonomous-loop/","level":1,"title":"Autonomous Loops","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#autonomous-ai-development","level":2,"title":"Autonomous AI Development","text":"

            Iterate until done.

            An autonomous loop is an iterative AI development workflow where an agent works on tasks until completion, without constant human intervention.

            ctx provides the memory that makes this possible:

            • ctx provides the memory: persistent context that survives across iterations
            • The loop provides the automation: continuous execution until done

            Together, they enable fully autonomous AI development where the agent remembers everything across iterations.

            Origin

            This pattern is inspired by Geoffrey Huntley's Ralph Wiggum technique.

            We use generic terminology here so the concepts remain clear regardless of trends.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#how-it-works","level":2,"title":"How It Works","text":"
            graph TD\n    A[Start Loop] --> B[Load .context/loop.md]\n    B --> C[AI reads .context/]\n    C --> D[AI picks task from TASKS.md]\n    D --> E[AI completes task]\n    E --> F[AI updates context files]\n    F --> G[AI commits changes]\n    G --> H{Check signals}\n    H -->|SYSTEM_CONVERGED| I[Done - all tasks complete]\n    H -->|SYSTEM_BLOCKED| J[Done - needs human input]\n    H -->|Continue| B
            1. Loop reads .context/loop.md and invokes AI
            2. AI loads context from .context/
            3. AI picks one task and completes it
            4. AI updates context files (mark task done, add learnings)
            5. AI commits changes
            6. Loop checks for completion signals
            7. Repeat until converged or blocked
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#quick-start-shell-while-loop-recommended","level":2,"title":"Quick Start: Shell While Loop (Recommended)","text":"

            The best way to run an autonomous loop is a plain shell script that invokes your AI tool in a fresh process on each iteration. This is \"pure ralph\":

            The only state that carries between iterations is what lives in .context/ and the git history. No context window bleed, no accumulated tokens, no hidden state.

            Create a loop.sh:

            #!/bin/bash\n# loop.sh: an autonomous iteration loop\n\nPROMPT_FILE=\"${1:-.context/loop.md}\"\nMAX_ITERATIONS=\"${2:-10}\"\nOUTPUT_FILE=\"/tmp/loop_output.txt\"\n\nfor i in $(seq 1 $MAX_ITERATIONS); do\n  echo \"=== Iteration $i ===\"\n\n  # Invoke AI with prompt\n  cat \"$PROMPT_FILE\" | claude --print > \"$OUTPUT_FILE\" 2>&1\n\n  # Display output\n  cat \"$OUTPUT_FILE\"\n\n  # Check for completion signals\n  if grep -q \"SYSTEM_CONVERGED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop complete: All tasks done\"\n    break\n  fi\n\n  if grep -q \"SYSTEM_BLOCKED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop blocked: Needs human input\"\n    break\n  fi\n\n  sleep 2\ndone\n

            Make it executable and run:

            chmod +x loop.sh\n./loop.sh\n

            You can also generate this script with ctx loop (see CLI Reference).

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-do-we-use-a-shell-loop","level":3,"title":"Why Do We Use a Shell Loop?","text":"

            Each iteration starts a fresh AI process with zero context window history. The agent knows only what it reads from .context/ files: Exactly the information you chose to persist.

            This is the core loop principle: memory is explicit, not accidental.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#alternative-claude-codes-built-in-loop","level":2,"title":"Alternative: Claude Code's Built-in Loop","text":"

            Claude Code has built-in loop support:

            # Start autonomous loop\n/loop\n\n# Cancel running loop\n/cancel-loop\n

            This is convenient for quick iterations, but be aware of important caveats:

            This Loop Is Not Pure

            Claude Code's /loop runs all iterations within the same session. This means:

            • State leaks between iterations: The context window accumulates output from every previous iteration. The agent \"remembers\" things it saw earlier (even if they were never persisted to .context/).
            • Token budget degrades: Each iteration adds to the context window, leaving less room for actual work in later iterations.
            • Not ergonomic for long runs: Users report that the built-in loop is less predictable for 10+ iteration runs compared to a shell loop.

            For short explorations (2-5 iterations) or interactive use, /loop works fine. For overnight unattended runs or anything where iteration independence matters, use the shell while loop instead.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#the-contextloopmd-file","level":2,"title":"The .context/loop.md File","text":"

            The prompt file instructs the AI on how to work autonomously. Here's a template:

            # Autonomous Development Prompt\n\nYou are working on this project autonomously. Follow these steps:\n\n## 1. Load Context\n\nRead these files in order:\n\n1. `.context/CONSTITUTION.md`: NEVER violate these rules\n2. `.context/TASKS.md`: Find work to do\n3. `.context/CONVENTIONS.md`: Follow these patterns\n4. `.context/DECISIONS.md`: Understand past choices\n\n## 2. Pick One Task\n\nFrom `.context/TASKS.md`, select ONE task that is:\n\n- Not blocked\n- Highest priority available\n- Within your capabilities\n\n## 3. Complete the Task\n\n- Write code following conventions\n- Run tests if applicable\n- Keep changes focused and minimal\n\n## 4. Update Context\n\nAfter completing work:\n\n- Mark task complete in `TASKS.md`\n- Add any learnings to `LEARNINGS.md`\n- Add any decisions to `DECISIONS.md`\n\n## 5. Commit Changes\n\nCreate a focused commit with clear message.\n\n## 6. Signal Status\n\nEnd your response with exactly ONE of:\n\n- `SYSTEM_CONVERGED`: All tasks in TASKS.md are complete\n- `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n- (no signal): More work remains, continue to next iteration\n\n## Rules\n\n- ONE task per iteration\n- NEVER skip tests\n- NEVER violate CONSTITUTION.md\n- Commit after each task\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#completion-signals","level":2,"title":"Completion Signals","text":"

            The loop watches for these signals in AI output:

            Signal Meaning When to Use SYSTEM_CONVERGED All tasks complete No pending tasks in TASKS.md SYSTEM_BLOCKED Cannot proceed Needs clarification, access, or decision BOOTSTRAP_COMPLETE Initial setup done Project scaffolding finished","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-usage","level":3,"title":"Example Usage","text":"

            converged state

            I've completed all tasks in TASKS.md:\n- [x] Set up project structure\n- [x] Implement core API\n- [x] Add authentication\n- [x] Write tests\n\nNo pending tasks remain.\n\nSYSTEM_CONVERGED\n

            blocked state

            I cannot proceed with the \"Deploy to production\" task because:\n- Missing AWS credentials\n- Need confirmation on region selection\n\nPlease provide credentials and confirm deployment region.\n\nSYSTEM_BLOCKED\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-ctx-and-loops-work-well-together","level":2,"title":"Why ctx and Loops Work Well Together","text":"Without ctx With ctx Each iteration starts fresh Each iteration has full history Decisions get re-made Decisions persist in DECISIONS.md Learnings are lost Learnings accumulate in LEARNINGS.md Tasks can be forgotten Tasks tracked in TASKS.md","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#automatic-context-updates","level":3,"title":"Automatic Context Updates","text":"

            During the loop, the AI should update context files:

            Mark task complete:

            ctx task complete \"implement user auth\"\n

            Or emit an update command (parsed by ctx watch):

            <context-update type=\"complete\">user auth</context-update>\n

            Add learning:

            ctx add learning \"Rate limiting requires Redis connection\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            Or via update command:

            <context-update type=\"learning\"\n  context=\"Implementing rate limiter\"\n  lesson=\"Rate limiting requires Redis connection\"\n  application=\"Ensure Redis is provisioned before enabling rate limits\"\n>Rate Limiting Redis Dependency</context-update>\n

            Record decision:

            ctx add decision \"Use JWT tokens for API authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#advanced-watch-mode","level":2,"title":"Advanced: Watch Mode","text":"

            Run ctx watch alongside the loop to automatically process context updates:

            # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

            The watch command processes context updates from the loop output in real time.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#project-setup","level":2,"title":"Project Setup","text":"

            Initialize a project for autonomous loop operation:

            ctx init\n

            The loop prompt template is deployed to .context/loop.md during initialization. It instructs the agent to:

            • Work autonomously without asking clarifying questions;
            • Follow one-task-per-iteration discipline;
            • Use SYSTEM_CONVERGED / SYSTEM_BLOCKED signals;
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-project-structure","level":2,"title":"Example Project Structure","text":"
            my-project/\n├── .context/\n│   ├── CONSTITUTION.md\n│   ├── TASKS.md          # Work items for the loop\n│   ├── DECISIONS.md\n│   ├── LEARNINGS.md\n│   ├── CONVENTIONS.md\n│   └── sessions/         # Loop iteration history\n├── loop.sh               # Loop script (if not using Claude Code)\n└── src/                  # Your code\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#sample-tasksmd-for-autonomous-loops","level":3,"title":"Sample TASKS.md for Autonomous Loops","text":"
            # Tasks\n\n## Phase 1: Setup\n\n- [x] Initialize project structure\n- [x] Set up testing framework\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Polish\n\n- [ ] Add rate limiting `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n

            The loop will work through these systematically, marking each complete.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#loop-runs-forever","level":3,"title":"Loop Runs Forever","text":"

            Cause: AI not emitting completion signals

            Fix: Ensure .context/loop.md explicitly instructs signaling:

            End EVERY response with one of:\n- SYSTEM_CONVERGED (if all tasks done)\n- SYSTEM_BLOCKED (if stuck)\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#context-not-persisting","level":3,"title":"Context Not Persisting","text":"

            Cause: AI not updating context files

            Fix: Add explicit instructions to .context/loop.md:

            After completing a task, you MUST:\n1. Run: ctx task complete \"<task>\"\n2. Add learnings: ctx add learning \"...\" --session-id abc12345 --branch main --commit 68fbc00a\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#tasks-getting-repeated","level":3,"title":"Tasks Getting Repeated","text":"

            Cause: Task not marked complete before next iteration

            Fix: Ensure commit happens after context update:

            Order of operations:\n1. Complete coding work\n2. Update context files (*`ctx task complete`, `ctx add`*)\n3. Commit **ALL** changes including `.context/`\n4. Then signal status\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#ai-violating-constitution","level":3,"title":"AI Violating Constitution","text":"

            Cause: Constitution not read first

            Fix: Make constitution check explicit in .context/loop.md:

            BEFORE any work:\n1. Read .context/CONSTITUTION.md\n2. If task would violate ANY rule, emit SYSTEM_BLOCKED\n3. Explain which rule prevents the work\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#further-reading","level":2,"title":"Further Reading","text":"
            • Building ctx Using ctx: The dogfooding story: how autonomous loops built the tool that powers them
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#resources","level":2,"title":"Resources","text":"
            • Geoffrey Huntley's Ralph Wiggum Technique: The original inspiration
            • Context CLI: Command reference
            • Integrations: Tool-specific setup
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/hub-failure-modes/","level":1,"title":"Hub Failure Modes","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#ctx-hub-failure-modes","level":1,"title":"ctx Hub: Failure Modes","text":"

            What can go wrong, what the system does about it, and what you should do. Complementary to ctx Hub Operations.

            Design Posture

            The hub is best-effort knowledge sharing, not a durable ledger. Local .context/ files are the source of truth for each project; the hub is a fan-out channel. This framing informs every failure-mode decision below.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#network","level":2,"title":"Network","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#client-loses-connection-mid-stream","level":3,"title":"Client Loses Connection Mid-Stream","text":"

            What happens: ctx connection listen detects the EOF, waits with exponential backoff, and reconnects. On reconnect it passes its last-seen sequence; the hub replays everything newer.

            What you should do: nothing. If reconnects are looping, check firewall state on the hub and ctx hub status output.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-majority-side-reachable","level":3,"title":"Partition: Majority Side Reachable","text":"

            What happens: clients routed to the majority side continue to publish and listen. The minority nodes step down to followers that cannot accept writes (Raft quorum lost).

            What you should do: let it heal. When the partition closes, followers catch up via sequence-based sync automatically.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-split-brain-no-quorum","level":3,"title":"Partition: Split Brain (No Quorum)","text":"

            What happens: no node holds a majority, so no leader is elected. All nodes become read-only. ctx connection publish and ctx add --share fail with a \"no leader\" error; local writes still succeed.

            What you should do: fix the network. If the partition is permanent (e.g., a data center is gone), bootstrap a new cluster from the survivors with ctx hub peer remove for the dead nodes.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#hub-unreachable-during-ctx-add-share","level":3,"title":"Hub Unreachable during ctx add --share","text":"

            What happens: the local write succeeds; the share step prints a warning and exits non-zero on the share leg only. --share is best-effort; it never blocks local context updates.

            What you should do: run ctx connection publish later to backfill, or rely on another --share for the same entry ID. The hub deduplicates by entry ID.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#storage","level":2,"title":"Storage","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#disk-full-on-the-leader","level":3,"title":"Disk Full on the Leader","text":"

            What happens: entries.jsonl append fails. The hub rejects writes with an error and stays up for read traffic. Clients retry; followers keep their in-sync status using whatever the leader already wrote.

            What you should do: free disk or grow the volume, then nothing else; the hub resumes accepting writes on the next append attempt.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#corrupt-entriesjsonl","level":3,"title":"Corrupt entries.jsonl","text":"

            What happens: if the last line is a partial JSON write from a crash, the hub truncates it on startup and logs a warning. If any earlier line is malformed, the hub refuses to start.

            What you should do: inspect with jq -c . <data-dir>/entries.jsonl > /dev/null to find the bad line. Move the bad region to a .quarantine file, then start. Nothing is ever silently dropped.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#metajson-entriesjsonl-sequence-mismatch","level":3,"title":"meta.json / entries.jsonl Sequence Mismatch","text":"

            What happens: the hub refuses to start. This usually means someone copied one file without the other.

            What you should do: restore both files from the same backup, or accept the higher sequence by regenerating meta.json from entries.jsonl (manual for now; file a bug).

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#cluster","level":2,"title":"Cluster","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-clean-shutdown","level":3,"title":"Leader Crash, Clean Shutdown","text":"

            What happens: ctx hub stop triggers stepdown first, so a new leader is elected before the old one exits. In-flight writes drain. Clients reconnect to the new leader transparently.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-hard-fail-kill-9-power-loss","level":3,"title":"Leader Crash, Hard Fail (Kill -9, Power Loss)","text":"

            What happens: Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted but had not yet replicated can be lost. See the Raft-lite warning in the cluster recipe.

            What you should do: if you need stronger durability, run ctx connection listen on a dedicated \"collector\" project that persists entries locally as a write-ahead backup.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#split-brain-after-rejoin","level":3,"title":"Split-Brain After Rejoin","text":"

            What happens: Raft reconciles: the minority side's uncommitted writes are discarded, and the majority's log is authoritative.

            What you should do: nothing automatic. If you know the minority had important writes, grep for them in <data-dir>/entries.jsonl.rejected (written by the reconciliation pass) and replay them with ctx connection publish.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#auth-and-tokens","level":2,"title":"Auth and Tokens","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#lost-admin-token","level":3,"title":"Lost Admin Token","text":"

            What happens: you cannot register new projects.

            What you should do: retrieve it from <data-dir>/admin.token. If that file is also gone, stop the hub and regenerate. Note that all existing client tokens keep working; only new registrations need the admin token.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-admin-token","level":3,"title":"Compromised Admin Token","text":"

            What happens: anyone with the token can register new projects and publish. They cannot read existing entries without a client token for a project that subscribes.

            What you should do: rotate the admin token (regenerate <data-dir>/admin.token and restart), revoke suspicious client registrations via clients.json, and audit entries.jsonl for unexpected origins.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-client-token","level":3,"title":"Compromised Client Token","text":"

            What happens: the attacker can publish as that project and read anything that project is subscribed to. Because Origin is self-asserted on publish, the attacker can also publish entries tagged with any other project's name, so attribution in entries.jsonl cannot be trusted after a token compromise.

            What you should do: remove the client's entry from clients.json, restart the hub, and re-register the legitimate project with a fresh token. Audit entries.jsonl for entries published after the compromise timestamp and quarantine any that look suspicious; remember that Origin on those entries proves nothing.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-hub-host","level":3,"title":"Compromised Hub Host","text":"

            What happens: <data-dir>/clients.json stores client tokens verbatim (not hashed). Anyone with read access to that file has every client token in hand and can impersonate any registered project until each one is rotated.

            What you should do: treat it as a total hub compromise. Stop the hub, wipe <data-dir> (keep a forensic copy first), regenerate the admin token, and have every client re-register. See Security model for the mitigations that reduce the blast radius while the hashing follow-up is pending.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#clock-skew","level":2,"title":"Clock Skew","text":"

            Hub entries carry a timestamp assigned by the publishing client. The hub does not rewrite timestamps. Clients with significant clock skew will publish entries that look out of order in the shared feed.

            What you should do: run NTP on all client machines. If you see entries dated in the future or far past, the publisher's clock is the culprit.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#the-short-list","level":2,"title":"The Short List","text":"Symptom First thing to check Client can't reach hub Firewall, then ctx hub status \"No leader\" errors Cluster quorum; run ctx hub status on each peer Hub won't start after crash Last line of entries.jsonl Entries missing after restore Check clients.json sequence vs local .sync-state.json Duplicate entries in shared feed Client replayed after restore, safe (dedup by ID) Followers lagging Disk or network on the follower, not the leader","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub Operations
            • ctx Hub security model
            • HA cluster recipe
            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub/","level":1,"title":"Hub Operations","text":"","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#ctx-hub-operations","level":1,"title":"ctx Hub: Operations","text":"

            Running the ctx ctx Hub in production. This page is for operators: people running a hub for themselves or a team, not people writing to a hub someone else is running.

            If you have not read it yet, start with the ctx Hub overview. It explains what the hub is, the two user stories it supports (personal cross-project brain vs small trusted team), and what it does not do. A client-side tour is in Getting Started.

            Operator Cheat Sheet

            • The hub fans out four entry types only: decision, learning, convention, task. Journals, scratchpad, and other local state are out of scope.
            • Identity is per-project, not per-user. Attribution is limited to Origin, which is self-asserted by the publishing client.
            • The data model is an append-only JSONL log plus two small JSON sidecar files. Nothing is rewritten in place.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#data-directory-layout","level":2,"title":"Data Directory Layout","text":"

            The hub stores everything under a single data directory (default ~/.ctx/hub-data/, override with --data-dir).

            <data-dir>/\n  admin.token        # Initial admin token (chmod 600)\n  clients.json       # Registered client tokens and project names\n  meta.json          # Sequence counter, version, cluster metadata\n  entries.jsonl      # Append-only log (single source of truth)\n  hub.pid            # Daemon PID file (daemon mode only)\n  raft/              # Raft state (cluster mode only)\n    log.db\n    stable.db\n    snapshots/\n

            Invariants:

            • entries.jsonl is append-only. Every line is a valid JSON object. Corrupt lines are fatal at startup: fix or truncate before restart.
            • meta.json is authoritative for the next sequence number. On restart, the hub reads the last valid line of entries.jsonl and refuses to start if the sequences disagree.
            • clients.json holds hashed client tokens; losing it invalidates all client registrations.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#starting-and-stopping","level":2,"title":"Starting and Stopping","text":"ForegroundDaemon
            ctx hub start                    # Ctrl-C to stop\nctx hub start --port 8080        # Custom port\nctx hub start --data-dir /srv/ctx-hub\n
            ctx hub start --daemon           # Fork to background\nctx hub stop                      # Graceful shutdown\n

            --stop sends SIGTERM to the PID in hub.pid, waits for in-flight RPCs to drain, then exits. If the daemon is wedged, remove hub.pid and send SIGKILL manually. entries.jsonl is crash-safe, so you will not lose accepted writes.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#systemd-unit","level":2,"title":"Systemd Unit","text":"

            For production single-node deployments, run the hub as a systemd service instead of --daemon:

            # /etc/systemd/system/ctx-hub.service\n[Unit]\nDescription=ctx `ctx` Hub\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nType=simple\nUser=ctx\nGroup=ctx\nExecStart=/usr/local/bin/ctx hub start --port 9900 \\\n    --data-dir /var/lib/ctx-hub\nRestart=on-failure\nRestartSec=5\nNoNewPrivileges=true\nProtectSystem=strict\nProtectHome=true\nReadWritePaths=/var/lib/ctx-hub\nPrivateTmp=true\n\n[Install]\nWantedBy=multi-user.target\n
            sudo systemctl enable --now ctx-hub\nsudo journalctl -u ctx-hub -f\n
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#backup-and-restore","level":2,"title":"Backup and Restore","text":"

            Because entries.jsonl is append-only, backups are trivial:

            # Hot backup, safe while the hub is running.\ncp <data-dir>/entries.jsonl backups/entries-$(date +%F).jsonl\ncp <data-dir>/meta.json      backups/meta-$(date +%F).json\ncp <data-dir>/clients.json   backups/clients-$(date +%F).json\n

            For a consistent snapshot across all three files, stop the hub, copy, then start again, or use a filesystem-level snapshot (LVM, ZFS, Btrfs).

            Restore:

            ctx hub stop                           # Stop the hub\ncp backups/entries-2026-04-10.jsonl <data-dir>/entries.jsonl\ncp backups/meta-2026-04-10.json      <data-dir>/meta.json\ncp backups/clients-2026-04-10.json   <data-dir>/clients.json\nctx hub start --daemon\n

            Clients that pushed sequences above the restored watermark will re-publish on the next listen reconnect, because the hub now reports a lower sequence than what clients have on disk. This is safe; the store deduplicates by entry ID.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#log-rotation","level":2,"title":"Log Rotation","text":"

            entries.jsonl grows unbounded. For long-lived hubs, rotate it offline:

            ctx hub stop\nmv <data-dir>/entries.jsonl <data-dir>/entries-$(date +%F).jsonl.old\n# Replay the last N days into a fresh entries.jsonl if you want a\n# trimmed active log, or leave the old file in place as history.\nctx hub start --daemon\n

            Do not truncate entries.jsonl while the hub is running. The hub holds an open file handle; an in-place truncation confuses the sequence counter and loses writes.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#monitoring","level":2,"title":"Monitoring","text":"

            Liveness probe:

            ctx hub status --exit-code\n

            Exit code 0 means the node is healthy (leader or in-sync follower); non-zero means degraded. Wire this into your monitoring of choice.

            For cluster deployments, watch for:

            • Role flaps: the leader changing more than once per hour suggests network instability or disk contention.
            • Replication lag: ctx hub status shows per-peer sequence offsets. Sustained lag > 100 sequences on a follower is worth investigating.
            • entries.jsonl growth rate: sudden spikes often indicate a misbehaving ctx connection listen reconnect loop.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#upgrading","level":2,"title":"Upgrading","text":"

            The JSONL format is versioned in meta.json. ctx refuses to start against a newer store version than it understands; older store versions are upgraded in place at first start after an upgrade.

            Always back up <data-dir>/ before upgrading.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub failure modes
            • ctx Hub security model
            • ctx serve reference
            • ctx hub reference
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/integrations/","level":1,"title":"AI Tools","text":"","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#ai-tools","level":2,"title":"AI Tools","text":"

            Context works with any AI tool that can read files. This guide covers setup for popular AI coding assistants.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#claude-code-full-integration","level":2,"title":"Claude Code (Full Integration)","text":"

            Claude Code has the deepest integration via the ctx plugin.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup","level":3,"title":"Setup","text":"

            First, install ctx and initialize your project:

            ctx init\n

            Then, install the ctx plugin in Claude Code:

            # From the ctx repository\nclaude /plugin install ./internal/assets/claude\n\n# Or from the marketplace\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

            Ensure the Plugin Is Enabled

            Installing a plugin registers it, but local installs may not auto-enable it globally. Verify ~/.claude/settings.json contains:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Without this, the plugin's hooks and skills won't appear in other projects. Running ctx init auto-enables the plugin; use --no-plugin-enable to skip this step.

            This gives you:

            Component Purpose .context/ All context files CLAUDE.md Bootstrap instructions Plugin hooks Lifecycle automation Plugin skills Agent Skills","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#how-it-works","level":3,"title":"How It Works","text":"
            graph TD\n    A[Session Start] --> B[Claude reads CLAUDE.md]\n    B --> C[PreToolUse hook runs]\n    C --> D[ctx agent loads context]\n    D --> E[Work happens]\n    E --> F[Session End]
            1. Session start: Claude reads CLAUDE.md, which tells it to check .context/
            2. First tool use: PreToolUse hook runs ctx agent and emits the context packet (subsequent invocations within the cooldown window are silent)
            3. Next session: Claude reads context files and continues with context
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#plugin-hooks","level":3,"title":"Plugin Hooks","text":"

            The ctx plugin provides lifecycle hooks implemented as Go subcommands (ctx system *):

            Hook Event Purpose ctx system context-load-gate PreToolUse (.*) Auto-inject context on first tool use ctx system block-non-path-ctx PreToolUse (Bash) Block ./ctx or go run: force $PATH install ctx system qa-reminder PreToolUse (Bash) Remind agent to lint/test before committing ctx system specs-nudge PreToolUse (EnterPlanMode) Nudge agent to use project specs when planning ctx system check-context-size UserPromptSubmit Nudge context assessment as sessions grow ctx system check-ceremonies UserPromptSubmit Nudge /ctx-remember and /ctx-wrap-up adoption ctx system check-persistence UserPromptSubmit Remind to persist learnings/decisions ctx system check-journal UserPromptSubmit Remind to export/enrich journal entries ctx system check-reminders UserPromptSubmit Relay pending reminders at session start ctx system check-version UserPromptSubmit Warn when binary/plugin versions diverge ctx system check-resources UserPromptSubmit Warn when memory/swap/disk/load hit DANGER level ctx system check-knowledge UserPromptSubmit Nudge when knowledge files grow large ctx system check-map-staleness UserPromptSubmit Nudge when ARCHITECTURE.md is stale ctx system heartbeat UserPromptSubmit Session-alive signal with prompt count metadata ctx system post-commit PostToolUse (Bash) Nudge context capture and QA after git commits

            A catch-all PreToolUse hook also runs ctx agent on every tool use (with cooldown) to autoload context.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#hook-configuration","level":3,"title":"Hook Configuration","text":"

            The plugin's hooks.json wires everything automatically: no manual configuration in settings.local.json needed:

            {\n  \"hooks\": {\n    \"PreToolUse\": [\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system context-load-gate\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system block-non-path-ctx\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system qa-reminder\" }\n        ]\n      },\n      {\n        \"matcher\": \"EnterPlanMode\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system specs-nudge\" }\n        ]\n      },\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx agent --budget 4000 2>/dev/null || true\" }\n        ]\n      }\n    ],\n    \"PostToolUse\": [\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system post-commit\" }\n        ]\n      }\n    ],\n    \"UserPromptSubmit\": [\n      {\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system check-context-size\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-ceremonies\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-persistence\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-journal\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-reminders\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-version\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-resources\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-knowledge\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-map-staleness\" },\n          { \"type\": \"command\", \"command\": \"ctx system heartbeat\" }\n        ]\n      }\n    ]\n  }\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#customizing-token-budget-and-cooldown","level":3,"title":"Customizing Token Budget and Cooldown","text":"

            Edit the PreToolUse command to change the token budget or cooldown:

            \"command\": \"ctx agent --budget 8000 --session $PPID >/dev/null || true\"\n\"command\": \"ctx agent --budget 4000 --cooldown 5m --session $PPID >/dev/null || true\"\n

            The --session $PPID flag isolates the cooldown per session: $PPID resolves to the Claude Code process PID, so concurrent sessions don't interfere. The default cooldown is 10 minutes; use --cooldown 0 to disable it.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#verifying-setup","level":3,"title":"Verifying Setup","text":"
            1. Start a new Claude Code session;
            2. Ask: \"Do you remember?\"
            3. Claude should cite specific context:
              • Current tasks from .context/TASKS.md;
              • Recent decisions or learnings;
              • Recent session history from ctx journal.
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#local-plugin-development","level":3,"title":"Local Plugin Development","text":"

            When developing ctx locally (adding skills, hooks, or changing plugin behavior), Claude Code caches the plugin by version. You must bump the version in both files and update the marketplace for changes to take effect:

            1. Bump version in both:
            2. internal/assets/claude/.claude-plugin/plugin.json (plugin manifest), .claude-plugin/marketplace.json (marketplace listing*);

            3. Update the marketplace in Claude Code:

            4. Open the Plugins UI (/plugins or Esc menu),
            5. Go to Marketplaces tab,
            6. Select the activememory-ctx Marketplace,
            7. Choose Update marketplace;

            8. Start a new Claude Code session: skill changes aren't reflected in existing sessions.

            Both Version Files Must Match

            If you only bump plugin.json but not marketplace.json (or vice versa), Claude Code may not detect the update. Always bump both together.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#troubleshooting","level":3,"title":"Troubleshooting","text":"Issue Solution Context not loading Check ctx is in PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list New skill not visible Bump version in both plugin.json files, update marketplace","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-load","level":3,"title":"Manual Context Load","text":"

            If hooks aren't working, manually load context:

            # Get context packet\nctx agent --budget 4000\n\n# Or paste into conversation\ncat .context/TASKS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#agent-skills","level":3,"title":"Agent Skills","text":"

            The ctx plugin ships Agent Skills following the agentskills.io specification.

            These are invoked in Claude Code with /skill-name.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-lifecycle-skills","level":4,"title":"Session Lifecycle Skills","text":"Skill Description /ctx-remember Recall project context at session start (ceremony) /ctx-wrap-up End-of-session context persistence (ceremony) /ctx-status Show context summary (tasks, decisions, learnings) /ctx-agent Get AI-optimized context packet /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Review session and suggest what to persist /ctx-remind Manage session-scoped reminders /ctx-pause Pause context hooks for this session /ctx-resume Resume context hooks after a pause","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-persistence-skills","level":4,"title":"Context Persistence Skills","text":"Skill Description /ctx-task-add Add a task to TASKS.md /ctx-learning-add Add a learning to LEARNINGS.md /ctx-decision-add Add a decision with context/rationale/consequence /ctx-convention-add Add a coding convention to CONVENTIONS.md /ctx-archive Archive completed tasks","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#scratchpad-skills","level":4,"title":"Scratchpad Skills","text":"Skill Description /ctx-pad Manage encrypted scratchpad entries","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-history-skills","level":4,"title":"Session History Skills","text":"Skill Description /ctx-history Browse AI session history /ctx-journal-enrich Enrich a journal entry with frontmatter/tags /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#blogging-skills","level":4,"title":"Blogging Skills","text":"

            Blogging Is a Better Way of Creating Release Notes

            The blogging workflow can also double as generating release notes:

            AI reads your git commit history and creates a \"narrative\", which is essentially what a release note is for.

            Skill Description /ctx-blog Generate blog post from recent activity /ctx-blog-changelog Generate blog post from commit range with theme","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#auditing-health-skills","level":4,"title":"Auditing & Health Skills","text":"Skill Description /ctx-doctor Troubleshoot ctx behavior with structural health checks /ctx-drift Detect and fix context drift (structural + semantic) /ctx-consolidate Merge redundant learnings or decisions into denser entries /ctx-alignment-audit Audit doc claims against playbook instructions /ctx-prompt-audit Analyze session logs for vague prompts /check-links Audit docs for dead internal and external links","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#planning-execution-skills","level":4,"title":"Planning & Execution Skills","text":"Skill Description /ctx-loop Generate a Ralph Loop iteration script /ctx-implement Execute a plan step-by-step with checks /ctx-plan-import Import Claude Code plan files into project specs /ctx-worktree Manage git worktrees for parallel agents /ctx-architecture Build and maintain architecture maps","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples","level":4,"title":"Usage Examples","text":"
            /ctx-status\n/ctx-learning-add \"Token refresh requires explicit cache invalidation\"\n/ctx-journal-enrich twinkly-stirring-kettle\n

            Skills support partial matching where applicable (e.g., session slugs).

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#cursor-ide","level":2,"title":"Cursor IDE","text":"

            Cursor can use context files through its system prompt or by reading files directly.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_1","level":3,"title":"Setup","text":"
            # Generate Cursor configuration\nctx setup cursor\n\n# Initialize context\nctx init --minimal\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration","level":3,"title":"Configuration","text":"

            Add to Cursor settings (.cursor/settings.json):

            // split to multiple lines for readability\n{\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and \n  .context/CONVENTIONS.md before responding. \n  Follow rules in .context/CONSTITUTION.md.\",\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage","level":3,"title":"Usage","text":"
            1. Open your project in Cursor
            2. Context files are available in the file tree
            3. Reference them in prompts: \"Check .context/DECISIONS.md for our approach to...\"
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-injection","level":3,"title":"Manual Context Injection","text":"

            For more control, paste context directly:

            # Get AI-ready packet\nctx agent --budget 4000 | pbcopy  # macOS\nctx agent --budget 4000 | xclip  # Linux\n

            Paste into Cursor's chat.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#aider","level":2,"title":"Aider","text":"

            Aider works well with context files through its --read flag.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_2","level":3,"title":"Setup","text":"
            # Generate Aider configuration\nctx setup aider\n\n# Initialize context\nctx init\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_1","level":3,"title":"Configuration","text":"

            Create .aider.conf.yml:

            read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_1","level":3,"title":"Usage","text":"
            # Start Aider (reads context files automatically)\naider\n\n# Or specify files explicitly\naider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#with-watch-mode","level":3,"title":"With Watch Mode","text":"

            Run ctx watch alongside Aider to capture context updates:

            # Terminal 1: Run Aider\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/aider.log\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#github-copilot","level":2,"title":"GitHub Copilot","text":"

            GitHub Copilot integrates with ctx at three levels: an automated instructions file, a VS Code Chat extension, and manual patterns.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_3","level":3,"title":"Setup","text":"
            # Initialize context\nctx init\n\n# Generate .github/copilot-instructions.md\nctx setup copilot --write\n

            The --write flag creates .github/copilot-instructions.md, which Copilot reads automatically at the start of every session. This file contains your project's constitution rules, current tasks, conventions, and architecture: giving Copilot persistent context without manual copy-paste.

            Re-run ctx setup copilot --write after updating your .context/ files to regenerate the instructions.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#vs-code-chat-extension-ctx","level":3,"title":"VS Code Chat Extension (@ctx)","text":"

            The ctx VS Code extension adds a @ctx chat participant to GitHub Copilot Chat, giving you direct access to all context commands from within the editor.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#installation","level":4,"title":"Installation","text":"
            1. Build from source (requires Node.js 18+):
            cd editors/vscode\nnpm install\nnpm run build\nnpx @vscode/vsce package\n
            1. Install the .vsix file:
            code --install-extension ctx-context-0.8.1.vsix\n
            1. Reload VS Code. Type @ctx in Copilot Chat to verify.
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#slash-commands","level":4,"title":"Slash Commands","text":"Command Description @ctx /init Initialize .context/ directory with template files @ctx /status Show context summary with token estimate @ctx /agent Print AI-ready context packet @ctx /drift Detect stale or invalid context @ctx /journal Browse and search AI session history @ctx /hook Generate AI tool integration configs @ctx /add Add a task, decision, or learning @ctx /load Output assembled context Markdown @ctx /compact Archive completed tasks and clean up @ctx /sync Reconcile context with codebase","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples_1","level":4,"title":"Usage Examples","text":"
            @ctx /init\n@ctx /status\n@ctx /add task Implement user authentication\n@ctx /drift\n@ctx /hook copilot\n@ctx /journal\n

            Typing @ctx without a command shows help with all available commands. The extension also supports natural language: asking @ctx about \"status\" or \"drift\" routes to the correct command automatically.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_2","level":4,"title":"Configuration","text":"Setting Default Description ctx.executablePath ctx Path to the ctx binary. Set this if ctx is not in your PATH.","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#follow-up-suggestions","level":4,"title":"Follow-Up Suggestions","text":"

            After each command, the extension suggests relevant next steps. For example, after /init it suggests /status and /hook; after /drift it suggests /sync.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-persistence","level":3,"title":"Session Persistence","text":"

            ctx init creates a .context/sessions/ directory for storing session data from non-Claude tools. The Markdown session parser scans this directory during ctx journal, enabling session history for Copilot and other tools.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-patterns","level":3,"title":"Manual Patterns","text":"

            These patterns work without the extension, using Copilot's built-in file awareness:

            Pattern 1: Keep context files open

            Open .context/CONVENTIONS.md in a split pane. Copilot will reference it.

            Pattern 2: Reference in comments

            // See .context/CONVENTIONS.md for naming patterns\n// Following decision in .context/DECISIONS.md: Use PostgreSQL\n\nfunction getUserById(id: string) {\n  // Copilot now has context\n}\n

            Pattern 3: Paste context into Copilot Chat

            ctx agent --budget 2000\n

            Paste output into Copilot Chat for context-aware responses.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#windsurf-ide","level":2,"title":"Windsurf IDE","text":"

            Windsurf supports custom instructions and file-based context.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_4","level":3,"title":"Setup","text":"
            # Generate Windsurf configuration\nctx setup windsurf\n\n# Initialize context\nctx init\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_3","level":3,"title":"Configuration","text":"

            Add to Windsurf settings:

            // Split to multiple lines for readability\n{\n  \"ai.customInstructions\": \"Always read .context/CONSTITUTION.md first. \n  Check .context/TASKS.md for current work. \n  Follow patterns in .context/CONVENTIONS.md.\"\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_2","level":3,"title":"Usage","text":"

            Context files appear in the file tree. Reference them when chatting:

            • \"What's in our task list?\" → AI reads .context/TASKS.md
            • \"What convention do we use for naming?\" → AI reads .context/CONVENTIONS.md
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#generic-integration","level":2,"title":"Generic Integration","text":"

            For any AI tool that can read files, use these patterns:

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-loading","level":3,"title":"Manual Context Loading","text":"
            # Get full context\nctx load\n\n# Get AI-optimized packet\nctx agent --budget 8000\n\n# Get specific file\ncat .context/TASKS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#system-prompt-template","level":3,"title":"System Prompt Template","text":"
            You are working on a project with persistent context in .context/\n\nBefore responding:\n1. Read .context/CONSTITUTION.md - NEVER violate these rules\n2. Check .context/TASKS.md for current work\n3. Follow .context/CONVENTIONS.md patterns\n4. Reference .context/DECISIONS.md for architectural choices\n\nWhen you learn something new, note it for .context/LEARNINGS.md\nWhen you make a decision, document it for .context/DECISIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#automated-updates","level":3,"title":"Automated Updates","text":"

            If your AI tool outputs to a log, use ctx watch:

            # Watch log file for context-update commands\nyour-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

            The AI can emit updates like:

            <context-update type=\"complete\">implement caching</context-update>\n<context-update type=\"learning\"\n  context=\"Implementing caching layer\"\n  lesson=\"Important thing learned today\"\n  application=\"Apply this insight going forward\"\n>Caching Insight</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-update-commands","level":2,"title":"Context Update Commands","text":"

            The ctx watch command parses update commands from AI output. Use this format:

            <context-update type=\"TYPE\" [attributes]>Content</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#supported-types","level":3,"title":"Supported Types","text":"Type Target File Required Attributes task TASKS.md None decision DECISIONS.md context, rationale, consequence learning LEARNINGS.md context, lesson, application convention CONVENTIONS.md None complete TASKS.md None","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#simple-format-tasks-conventions-complete","level":3,"title":"Simple Format (Tasks, Conventions, Complete)","text":"
            <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"convention\">Use kebab-case for files</context-update>\n<context-update type=\"complete\">rate limiting</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#structured-format-learnings-decisions","level":3,"title":"Structured Format (Learnings, Decisions)","text":"

            Learnings and decisions support structured attributes for better documentation:

            Learning with full structure:

            <context-update type=\"learning\"\n  context=\"Debugging Claude Code hooks\"\n  lesson=\"Hooks receive JSON via stdin, not environment variables\"\n  application=\"Parse JSON stdin with the host language (Go, Python, etc.): no jq needed\"\n>Hook Input Format</context-update>\n

            Decision with full structure:

            <context-update type=\"decision\"\n  context=\"Need a caching layer for API responses\"\n  rationale=\"Redis is fast, well-supported, and team has experience\"\n  consequence=\"Must provision Redis infrastructure; team training on Redis patterns\"\n>Use Redis for caching</context-update>\n

            Learnings require: context, lesson, application attributes. Decisions require: context, rationale, consequence attributes. Updates missing required attributes are rejected with an error.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#further-reading","level":2,"title":"Further Reading","text":"
            • Skills That Fight the Platform: Common pitfalls in skill design that work against the host tool
            • The Anatomy of a Skill That Works: What makes a skill reliable: the E/A/R framework and quality gates
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/migration/","level":1,"title":"Integration","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#adopting-ctx-in-existing-projects","level":2,"title":"Adopting ctx in Existing Projects","text":"

            Claude Code User?

            You probably want the plugin instead of this page.

            Install ctx from the marketplace: (/plugin → search \"ctx\" → Install) and you're done: hooks, skills, and updates are handled for you.

            See Getting Started for the full walkthrough.

            This guide covers adopting ctx in existing projects regardless of which tools your team uses.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#quick-paths","level":2,"title":"Quick Paths","text":"You have... Command What happens Nothing (greenfield) ctx init Creates .context/, CLAUDE.md, permissions Existing CLAUDE.md ctx init --merge Backs up your file, inserts ctx block after the H1 Existing CLAUDE.md + ctx markers ctx init --force Replaces the ctx block, leaves your content intact .cursorrules / .aider.conf.yml ctx init ctx ignores those files: they coexist cleanly Team repo, first adopter ctx init --merge && git add .context/ CLAUDE.md Initialize and commit for the team","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-claudemd","level":2,"title":"Existing CLAUDE.md","text":"

            This is the most common scenario:

            You have a CLAUDE.md with project-specific instructions and don't want to lose them.

            You Own CLAUDE.md

            After initialization, CLAUDE.md is yours: edit it freely.

            Add project instructions, remove sections you don't need, reorganize as you see fit.

            The only part ctx manages is the block between the <!-- ctx:context --> and <!-- ctx:end --> markers; everything outside those markers is yours to change at any time.

            If you remove the markers, nothing breaks: ctx simply treats the file as having no ctx content and will offer to merge again on the next ctx init.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-ctx-init-does","level":3,"title":"What ctx init Does","text":"

            When ctx init detects an existing CLAUDE.md, it checks for ctx markers (<!-- ctx:context --> ... <!-- ctx:end -->):

            State Default behavior With --merge With --force No CLAUDE.md Creates from template Creates from template Creates from template Exists, no ctx markers Prompts to merge Auto-merges (no prompt) Auto-merges (no prompt) Exists, has ctx markers Skips (already set up) Skips Replaces the ctx block only","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-merge-flag","level":3,"title":"The --merge Flag","text":"

            --merge auto-merges without prompting. The merge process:

            1. Backs up your existing CLAUDE.md to CLAUDE.md.<timestamp>.bak;
            2. Finds the H1 heading (e.g., # My Project) in your file;
            3. Inserts the ctx block immediately after it;
            4. Preserves everything else untouched.

            Your content before and after the ctx block remains exactly as it was.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#before-after-example","level":3,"title":"Before / After Example","text":"

            Before: your existing CLAUDE.md:

            # My Project\n\n## Build Commands\n\n-`npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

            After ctx init --merge:

            # My Project\n\n<!-- ctx:context -->\n<!-- DO NOT REMOVE: This marker indicates ctx-managed content -->\n\n## IMPORTANT: You Have Persistent Memory\n\nThis project uses Context (`ctx`) for context persistence across sessions.\n...\n\n<!-- ctx:end -->\n\n## Build Commands\n\n- `npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

            Your build commands and code style sections are untouched. The ctx block sits between markers and can be updated independently.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-force-flag","level":3,"title":"The --force Flag","text":"

            If your CLAUDE.md already has ctx markers (from a previous ctx init), the default behavior is to skip it. Use --force to replace the ctx block with the latest template: This is useful after upgrading ctx:

            ctx init --force\n

            This only replaces content between <!-- ctx:context --> and <!-- ctx:end -->. Your own content outside the markers is preserved. A timestamped backup is created before any changes.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#undoing-a-merge","level":3,"title":"Undoing a Merge","text":"

            Every merge creates a backup:

            $ ls CLAUDE.md*.bak\nCLAUDE.md.1738000000.bak\n

            To restore:

            cp CLAUDE.md.1738000000.bak CLAUDE.md\n

            Or if you are using git, simply:

            git checkout CLAUDE.md\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-cursorrules-aider-copilot","level":2,"title":"Existing .cursorrules / Aider / Copilot","text":"

            ctx doesn't touch tool-specific config files. It creates its own files (.context/, CLAUDE.md) and coexists with whatever you already have.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-does-ctx-create","level":3,"title":"What Does ctx Create?","text":"ctx creates ctx does NOT touch .context/ directory .cursorrules CLAUDE.md (or merges into) .aider.conf.yml .claude/settings.local.json (seeded by ctx init; the plugin manages hooks and skills) .github/copilot-instructions.md .windsurfrules Any other tool-specific config

            Claude Code hooks and skills are provided by the ctx plugin, installed from the Claude Code marketplace (/plugin → search \"ctx\" → Install).

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#running-ctx-alongside-other-tools","level":3,"title":"Running ctx Alongside Other Tools","text":"

            The .context/ directory is the source of truth. Tool-specific configs point to it:

            • Cursor: Reference .context/ files in your system prompt (see Cursor setup)
            • Aider: Add .context/ files to the read: list in .aider.conf.yml (see Aider setup)
            • Copilot: Keep .context/ files open or reference them in comments (see Copilot setup)

            You can generate a tool-specific configuration with:

            ctx setup cursor    # Generate Cursor config snippet\nctx setup aider     # Generate .aider.conf.yml\nctx setup copilot   # Generate Copilot tips\nctx setup windsurf  # Generate Windsurf config\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#migrating-content-into-context","level":3,"title":"Migrating Content into .context/","text":"

            If you have project knowledge scattered across .cursorrules or custom prompt files, consider migrating it:

            1. Rules / invariants → .context/CONSTITUTION.md
            2. Code patterns → .context/CONVENTIONS.md
            3. Architecture notes → .context/ARCHITECTURE.md
            4. Known issues / tips → .context/LEARNINGS.md

            You don't need to delete the originals: ctx and tool-specific files can coexist. But centralizing in .context/ means every tool gets the same context.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#team-adoption","level":2,"title":"Team Adoption","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#context-is-designed-to-be-committed","level":3,"title":".context/ Is Designed to Be Committed","text":"

            The context files (tasks, decisions, learnings, conventions, architecture) are meant to live in version control. However, some subdirectories are personal or sensitive and should not be committed.

            ctx init automatically adds these .gitignore entries:

            # Journals contain full session transcripts: personal, potentially large\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Runtime state and logs (ephemeral, machine-specific):\n.context/state/\n.context/logs/\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

            With those in place, committing is straightforward:

            # One person initializes\nctx init --merge\n\n# Commit context files (journals and keys are already gitignored)\ngit add .context/ CLAUDE.md\ngit commit -m \"Add ctx context management\"\ngit push\n

            Teammates pull and immediately have context. No per-developer setup needed.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-about-claude","level":3,"title":"What about .claude/?","text":"

            The .claude/ directory contains permissions that ctx init seeds. Hooks and skills are provided by the ctx plugin (not per-project files).

            File Commit? Why .claude/settings.local.json No Machine-specific, accumulates session permissions .claude/settings.golden.json Yes Curated permission snapshot (via ctx permission snapshot)","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#merge-conflicts-in-context-files","level":3,"title":"Merge Conflicts in Context Files","text":"

            Context files are plain Markdown. Resolve conflicts the same way you would for any other documentation file:

            # After a conflicting pull\ngit diff .context/TASKS.md    # See both sides\n# Edit to keep both sets of tasks, then:\ngit add .context/TASKS.md\ngit commit\n

            Common conflict scenarios:

            • TASKS.md: Two people added tasks: Keep both.
            • DECISIONS.md: Same decision recorded differently: Unify the entry.
            • LEARNINGS.md: Parallel discoveries: Keep both, remove duplicates.
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#gradual-adoption","level":3,"title":"Gradual Adoption","text":"

            You don't need the whole team to switch at once:

            1. One person runs ctx init --merge and commits;
            2. CLAUDE.md instructions work immediately for Claude Code users;
            3. Other tool users can adopt at their own pace using ctx setup <tool>;
            4. Context files benefit everyone who reads them, even without tool integration.
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verifying-it-worked","level":2,"title":"Verifying It Worked","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#check-status","level":3,"title":"Check Status","text":"
            ctx status\n

            You should see your context files listed with token counts and no warnings.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#test-memory","level":3,"title":"Test Memory","text":"

            Start a new AI session and ask: \"Do you remember?\"

            The AI should cite specific context:

            • Current tasks from .context/TASKS.md;
            • Recent decisions or learnings;
            • Session history (if you've had prior sessions);

            If it responds with generic \"I don't have memory\", check that ctx is in your PATH (which ctx) and that hooks are configured (see Troubleshooting).

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verify-the-merge","level":3,"title":"Verify the Merge","text":"

            If you used --merge, check that your original content is intact:

            # Your original content should still be there\ncat CLAUDE.md\n\n# The ctx block should be between markers\ngrep -c \"ctx:context\" CLAUDE.md  # Should print 1\ngrep -c \"ctx:end\" CLAUDE.md      # Should print 1\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#further-reading","level":2,"title":"Further Reading","text":"
            • Getting Started: Full setup walkthrough
            • Context Files: What each .context/ file does
            • Integrations: Per-tool setup (Claude Code, Cursor, Aider, Copilot)
            • CLI Reference: All ctx commands and flags
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/release/","level":1,"title":"Cutting a Release","text":"

            Full Release Checklist

            This page covers the mechanics of cutting a release (bump, tag, push). For the complete pre-release ceremony (audits, tests, verification, and post-release steps), see the Release Checklist runbook.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#prerequisites","level":2,"title":"Prerequisites","text":"

            Before you can cut a release you need:

            • Push access to origin (GitHub)
            • GPG signing configured (make gpg-test)
            • Go installed (version in go.mod)
            • Zensical installed (make site-setup)
            • A clean working tree (git status shows nothing to commit)
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#step-by-step","level":2,"title":"Step-by-Step","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#1-update-the-version-file","level":3,"title":"1. Update the VERSION File","text":"
            echo \"0.9.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.9.0\"\n

            The VERSION file uses bare semver (0.9.0), no v prefix. The release script adds the v prefix for git tags.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#2-generate-release-notes","level":3,"title":"2. Generate Release Notes","text":"

            In Claude Code:

            /_ctx-release-notes\n

            This analyzes commits since the last tag and writes dist/RELEASE_NOTES.md. The release script refuses to proceed without this file.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#3-verify-docs-and-commit-any-remaining-changes","level":3,"title":"3. Verify Docs and Commit Any Remaining Changes","text":"
            /ctx-link-check    # audit docs for dead links\nmake audit          # full check: fmt, vet, lint, style, test\ngit status          # must be clean\n
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#4-run-the-release","level":3,"title":"4. Run the Release","text":"
            make release\n

            Or, if you are in a Claude Code session:

            /_ctx-release\n

            The release script does everything in order:

            Step What happens 1 Reads VERSION, verifies release notes exist 2 Verifies working tree is clean 3 Updates version in 4 config files (plugin.json, marketplace.json, VS Code package.json + lock) 4 Updates download URLs in 3 doc files (index.md, getting-started.md, integrations.md) 5 Adds new row to versions.md 6 Rebuilds the documentation site (make site) 7 Commits all version and docs updates 8 Runs make test and make smoke 9 Builds binaries for all 6 platforms via hack/build-all.sh 10 Creates a signed git tag (v0.9.0) 11 Pushes the tag to origin 12 Updates and pushes the latest tag","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#5-github-ci-takes-over","level":3,"title":"5. GitHub CI Takes Over","text":"

            Pushing a v* tag triggers .github/workflows/release.yml:

            1. Checks out the tagged commit
            2. Runs the full test suite
            3. Builds binaries for all platforms
            4. Creates a GitHub Release with auto-generated notes
            5. Uploads binaries and SHA256 checksums
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#6-verify","level":3,"title":"6. Verify","text":"
            • GitHub Releases shows the new version
            • All 6 binaries are attached (linux/darwin x amd64/arm64, windows x amd64)
            • SHA256 files are attached
            • Release notes look correct
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#what-gets-updated-automatically","level":2,"title":"What Gets Updated Automatically","text":"

            The release script updates 8 files so you do not have to:

            File What changes internal/assets/claude/.claude-plugin/plugin.json Plugin version .claude-plugin/marketplace.json Marketplace version (2 fields) editors/vscode/package.json VS Code extension version editors/vscode/package-lock.json VS Code lock version (2 fields) docs/index.md Download URLs docs/home/getting-started.md Download URLs docs/operations/integrations.md VSIX filename version docs/reference/versions.md New version row + latest pointer

            The Go binary version is injected at build time via -ldflags from the VERSION file. No source file needs editing.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#build-targets-reference","level":2,"title":"Build Targets Reference","text":"Target What it does make release Full release (script + tag + push) make build Build binary for current platform make build-all Build all 6 platform binaries make test Unit tests make smoke Integration smoke tests make audit Full check (fmt + vet + lint + drift + docs + test) make site Rebuild documentation site","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#release-notes-not-found","level":3,"title":"\"Release Notes Not Found\"","text":"
            ERROR: dist/RELEASE_NOTES.md not found.\n

            Run /_ctx-release-notes in Claude Code first, or write dist/RELEASE_NOTES.md manually.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#working-tree-is-not-clean","level":3,"title":"\"Working Tree Is Not Clean\"","text":"
            ERROR: Working tree is not clean.\n

            Commit or stash all changes before running make release.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#tag-already-exists","level":3,"title":"\"Tag Already Exists\"","text":"
            ERROR: Tag v0.9.0 already exists.\n

            You cannot release the same version twice. Either bump VERSION to a new version, or delete the old tag if the release was incomplete:

            git tag -d v0.9.0\ngit push origin :refs/tags/v0.9.0\n
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#ci-build-fails-after-tag-push","level":3,"title":"CI Build Fails After Tag Push","text":"

            The tag is already published. Fix the issue, bump to a patch version (e.g. 0.9.1), and release again. Do not force-push tags that others may have already fetched.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/upgrading/","level":1,"title":"Upgrade","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade","level":2,"title":"Upgrade","text":"

            New versions of ctx may ship updated permissions, CLAUDE.md directives, or plugin hooks and skills.

            Claude Code User?

            The marketplace can update skills, hooks, and prompts independently: /plugin → select ctx → Update now (or enable auto-update).

            The ctx binary is separate: rebuild from source or download a new release when one is available, then run ctx init --force --merge. Knowledge files are preserved automatically.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#tldr","level":2,"title":"TL:DR","text":"
            # Plugin users (Claude Code)\n# /plugin → select ctx → Update now\n# Then update the binary and reinitialize:\nctx init --force --merge\n\n# From-source / manual users\n# install new ctx binary, then:\nctx init --force --merge\n# /plugin → select ctx → Update now   (if using Claude Code)\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-changes-between-versions","level":2,"title":"What Changes between Versions","text":"

            ctx init generates two categories of files:

            Category Examples Changes between versions? Infrastructure .claude/settings.local.json (permissions), ctx-managed sections in CLAUDE.md, ctx plugin (hooks + skills) Yes Knowledge .context/TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md, ARCHITECTURE.md, GLOSSARY.md, CONSTITUTION.md, AGENT_PLAYBOOK.md No: this is your data

            Infrastructure is regenerated by ctx init and plugin updates. Knowledge files are yours and should never be overwritten.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade-steps","level":2,"title":"Upgrade Steps","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#1-install-the-new-version","level":3,"title":"1. Install the New Version","text":"

            Build from source or download the binary:

            cd /path/to/ctx-source\ngit pull\nmake build\nsudo make install\nctx --version   # verify\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#2-reinitialize","level":3,"title":"2. Reinitialize","text":"
            ctx init --force --merge\n
            • --force regenerates infrastructure files (permissions, ctx-managed sections in CLAUDE.md).
            • --merge preserves your content outside ctx markers.

            Knowledge files (.context/TASKS.md, DECISIONS.md, etc.) are preserved automatically: ctx init only overwrites infrastructure, never your data.

            Encryption key: The encryption key lives at ~/.ctx/.ctx.key (outside the project). Reinit does not affect it. If you have a legacy key at .context/.ctx.key or ~/.local/ctx/keys/, copy it manually (see Syncing Scratchpad Notes).

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#3-update-the-ctx-plugin","level":3,"title":"3. Update the ctx Plugin","text":"

            If you use Claude Code, update the plugin to get new hooks and skills:

            1. Open /plugin in Claude Code.
            2. Select ctx.
            3. Click Update now.

            Or enable auto-update so the plugin stays current without manual steps.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#4-review-custom-settings","level":3,"title":"4. Review Custom Settings","text":"

            If you added custom permissions to .claude/settings.local.json beyond what ctx init provides, diff and merge:

            diff .claude.bak/settings.local.json .claude/settings.local.json\n

            Manually add back any custom entries that the new init dropped.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#5-verify","level":3,"title":"5. Verify","text":"
            ctx status          # context files intact\nctx drift           # no broken references\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#6-clean-up","level":3,"title":"6. Clean Up","text":"

            If you made manual backups, remove them once satisfied:

            rm -rf .context.bak .claude.bak CLAUDE.md.bak\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-if-i-skip-the-upgrade","level":2,"title":"What If I Skip the Upgrade?","text":"

            The old binary still works with your existing .context/ files. But you may miss:

            • New plugin hooks that enforce better practices or catch mistakes;
            • Updated skill prompts that produce better results;
            • New .gitignore entries for directories added in newer versions;
            • Bug fixes in the CLI itself.

            The plugin and the binary can be updated independently. You can update the plugin (for new hooks/skills) even if you stay on an older binary, and vice versa.

            Context files are plain Markdown: They never break between versions.

            The surrounding infrastructure is what evolves.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/","level":1,"title":"Architecture Exploration","text":"","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#architecture-exploration","level":1,"title":"Architecture Exploration","text":"

            Systematically build architecture documentation across one or more repositories using ctx skills. Each invocation does one unit of work; a simple loop drives the agent through all phases.

            When to use: When onboarding to a new codebase, performing architecture reviews, or building up .context/ documentation across a workspace of repos.

            Prerequisites: ctx installed, repos cloned under a shared workspace directory (e.g., ~/WORKSPACE/).

            Companion skills:

            • /ctx-architecture: structural baseline and principal analysis
            • /ctx-architecture-enrich: code intelligence enrichment via GitNexus
            • /ctx-architecture-failure-analysis: adversarial failure analysis
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#overview","level":2,"title":"Overview","text":"

            The agent progresses through phases per repo, depth-first:

            Phase Skill What it does bootstrap ctx init + /ctx-architecture Initialize context and build structural baseline principal /ctx-architecture principal Deep analysis: vision, bottlenecks, alternatives enriched /ctx-architecture-enrich Quantify with code intelligence (blast radius, flows) frontier-N /ctx-architecture (re-run) Explore unexplored areas found in convergence report lens-* /ctx-architecture with lens Focused exploration through conceptual lenses

            Exploration stops when convergence >= 0.85, frontier runs plateau, or all lenses are exhausted.

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#setup","level":2,"title":"Setup","text":"

            Create a tracking directory in your workspace root:

            cd ~/WORKSPACE\nmkdir -p .arch-explorer\n

            Create .arch-explorer/manifest.json listing your repos:

            {\n  \"repos\": [\"ctx\", \"portal\", \"infra\"],\n  \"current_repo_index\": 0,\n  \"progress\": {}\n}\n

            Create .arch-explorer/run-log.md (empty, the agent appends to it).

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#prompt","level":2,"title":"Prompt","text":"

            Save this as .arch-explorer/PROMPT.md and invoke with your agent. The prompt is self-contained: the agent reads the manifest, picks the next unit of work, executes it, updates tracking, and stops.

            You are an autonomous architecture exploration agent. Your job is to\nsystematically build and evolve architecture documentation across all\nrepositories in this workspace using ctx skills.\n\n## Execution Protocol\n\n### Step 1: Read State\n\nRead `.arch-explorer/manifest.json`. This tells you:\n- Which repos exist and their order\n- What has been done per repo (`progress` object)\n- Which repo to work on next (`current_repo_index`)\n\n### Step 2: Pick the Next Unit of Work\n\n**Strategy: depth-first, sequential.**\n\nFind the current repo (by `current_repo_index`). Determine its next\nphase from the progression below. If all phases are exhausted for this\nrepo (convergence score >= 0.85 or 3+ frontier runs with no new\nfindings), advance `current_repo_index` and pick the next repo.\n\n### Phase Progression (per repo)\n\nEach repo progresses through these phases in order:\n\n| Phase | Skill | Prerequisite |\n|-------|-------|-------------|\n| `bootstrap` | `ctx init` + `/ctx-architecture` | None |\n| `principal` | `/ctx-architecture principal` | bootstrap done |\n| `enriched` | `/ctx-architecture-enrich` | principal done, GitNexus indexed |\n| `frontier-N` | `/ctx-architecture` (re-run) | enriched done |\n\n**`bootstrap` is a single composite unit:** `ctx init` followed by\nstructural analysis. This is the ONLY phase that combines two actions.\nNo other phase may chain actions.\n\n**Frontier runs** are numbered: `frontier-1`, `frontier-2`, etc.\nEach frontier run reads CONVERGENCE-REPORT.md and picks unexplored\nareas. The skill handles this automatically.\n\nAfter the third frontier run OR when convergence >= 0.85, apply\n**conceptual lenses** (one per run):\n\n| Lens | Focus Areas |\n|------|-------------|\n| `security` | Auth flows, input validation, secrets, attack surfaces, trust boundaries |\n| `performance` | Hot paths, caching, concurrency, resource lifecycle, allocation patterns |\n| `stability` | Error handling, retries, graceful degradation, circuit breakers, timeouts |\n| `observability` | Logging, metrics, tracing, alerting, debugging affordances |\n| `data-integrity` | Storage, serialization, migrations, consistency, backup, recovery |\n\nFor lens runs, prepend the lens context as an explicit instruction to\nthe skill invocation:\n\n> \"Focus exploration on security: auth flows, input validation, secrets,\n> attack surfaces, trust boundaries.\"\n\nDo NOT wait for the skill to ask what to explore. Provide the lens\nfocus as input upfront.\n\n### Step 3: Do the Work\n\n1. `cd` into the repo directory (`~/WORKSPACE/<repo-name>`)\n2. If phase is `bootstrap`:\n    - Run `ctx init`, confirm `.context/` exists\n    - Then run `/ctx-architecture` (structural baseline)\n3. If phase is `principal` or `frontier-*`:\n    - Run `/ctx-architecture` (add `principal` argument for principal phase)\n    - The skill will read existing artifacts and build on them\n4. If phase is `enriched`:\n    - Verify GitNexus is connected: call `mcp__gitnexus__list_repos`\n    - Success = non-empty list returned with no error\n    - If GitNexus unavailable, log as `enriched-skipped` and advance\n      to `frontier-1`\n    - Run `/ctx-architecture-enrich`\n5. If phase is a lens run (`lens-security`, etc.):\n    - Run `/ctx-architecture` with lens focus prepended as instruction\n      (see lens table above for exact wording)\n\n### Step 4: Extract Results\n\nAfter the skill completes, gather:\n\n- **Convergence score**: from `map-tracking.json`, computed as:\n  average of all module `confidence` values (0.0-1.0). If\n  `map-tracking.json` is missing or has no confidence values,\n  record `null` and log a warning.\n- **Frontier count**: from CONVERGENCE-REPORT.md, count the number\n  of listed unexplored areas. If CONVERGENCE-REPORT.md is missing,\n  record `frontier_count: null` and log a warning. Treat missing\n  as \"exploration should continue\" (do not stall).\n- **Key findings**: 2-3 bullet points of what was discovered or\n  changed in this run (new modules mapped, danger zones found, etc.)\n- **New artifacts**: list any new files created in `.context/`\n\n### Step 5: Update Tracking\n\nUpdate `.arch-explorer/manifest.json`:\n\n```json\n{\n  \"progress\": {\n    \"ctx\": {\n      \"phases_completed\": [\"bootstrap\", \"principal\"],\n      \"current_phase\": \"enriched\",\n      \"lenses_explored\": [],\n      \"last_run\": \"2026-04-07T14:00:00Z\",\n      \"convergence_score\": 0.72,\n      \"frontier_count\": 3,\n      \"total_runs\": 2,\n      \"findings_summary\": \"14 modules mapped, 3 danger zones, 2 extension points\"\n    }\n  }\n}\n```\n\nAppend to `.arch-explorer/run-log.md`:\n\n```markdown\n## 2026-04-07T14:00:00Z / ctx / principal\n\n**Phase:** principal\n**Convergence:** 0.45 -> 0.72\n**Frontiers remaining:** 3\n**Key findings:**\n- Identified CLI dispatch as primary bottleneck (fan-out to 12 subsystems)\n- Security: context files readable by any process (no access control)\n- Strategic recommendation: extract context engine into library package\n\n**Artifacts updated:** ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md, map-tracking.json\n```\n\n### Step 6: Report and Stop\n\nPrint this exact format as the FINAL output of the invocation:\n\n```\n[arch-explorer] DONE\n  repo: ctx\n  phase: principal\n  convergence: 0.72\n  frontiers: 3\n  runs_on_repo: 3\n  next: ctx / enriched\n```\n\nThe `[arch-explorer] DONE` line is the terminal marker. After printing\nit, produce no further output. Execution is complete.\n\n## Rules\n\n1. **One unit per invocation.** The only composite unit is `bootstrap`\n   (init + structural). All other phases are exactly one skill run.\n2. **Additive only.** Never delete or overwrite existing artifacts.\n   The skills already handle incremental updates.\n3. **No duplicated work.** Read manifest before acting. If a phase is\n   already recorded as completed, skip it.\n4. **Log everything.** Every run gets a run-log entry, even failures\n   and skips.\n5. **Fail gracefully.** If a skill fails (missing GitNexus, broken repo,\n   etc.), log the failure with reason and advance to the next phase or\n   repo. Don't retry in the same invocation.\n6. **Respect ctx conventions.** Each repo gets its own `.context/`\n   directory. Never write architecture artifacts outside `.context/`.\n\n## Stopping Logic\n\nA repo is considered \"explored\" when ANY of these is true:\n- Convergence score >= 0.85 (from map-tracking.json)\n- 3+ frontier runs produced no new findings (frontier_count unchanged\n  across consecutive runs)\n- All 5 lenses have been applied\n- Convergence score is `null` after 3 attempts (artifacts aren't being\n  generated properly; log warning and move on)\n\nWhen a repo is explored, advance `current_repo_index` in the manifest.\n\n## When All Repos Are Done\n\nWhen every repo has reached its stopping condition, print:\n\n```\n[arch-explorer] ALL DONE\n  - ctx: 0.92 convergence, 8 runs, 5 lenses\n  - portal: 0.87 convergence, 6 runs, 3 lenses\n  ...\n```\n
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#invocation","level":2,"title":"Invocation","text":"

            Single run (safest for quota):

            cd ~/WORKSPACE\nclaude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n

            Batch of N runs:

            cd ~/WORKSPACE\nfor i in $(seq 1 5); do\n  claude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n  echo \"--- Run $i complete ---\"\ndone\n

            Resume after interruption:

            Just run again. The manifest tracks state; the agent picks up where it left off.

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#tips","level":2,"title":"Tips","text":"
            • Start small: list 1-2 repos in the manifest first. Add more once you're confident in the output quality.
            • GitNexus is optional: the enrichment phase is skipped gracefully if GitNexus isn't connected. You still get structural and principal analysis.
            • Review between batches: check the run-log and generated artifacts between batch runs. The agent is additive-only, but early course correction saves wasted runs.
            • Lens runs are the payoff: the first three phases build the map; lens runs find the interesting things (security gaps, performance cliffs, stability risks).
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#history","level":2,"title":"History","text":"
            • 2026-04-07: Original prompt created as hack/agents/architecture-explorer.md.
            • 2026-04-16: Moved to docs as a runbook for discoverability.
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/","level":1,"title":"Breaking Migration","text":"","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#breaking-migration-guide","level":1,"title":"Breaking Migration Guide","text":"

            Template for upgrading across breaking CLI renames or behavior changes. Use this as a starting point when writing migration notes for a specific release, or hand it to your agent as context for generating release-specific guidance.

            When to use: When a release includes breaking changes (command renames, removed flags, changed defaults) that require user action.

            Companion: Upgrade guide covers the general upgrade flow. This runbook covers the breaking-change specifics.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-1-identify-what-changed","level":2,"title":"Step 1: Identify What Changed","text":"

            Ask your agent to diff the CLI surface between the old and new version:

            Compare the CLI command surface between the previous release tag\nand HEAD. For each change, categorize as: renamed, removed,\nnew, or changed-behavior. Include old and new command signatures.\n

            Or use the /_ctx-command-audit skill after the rename.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-2-regenerate-infrastructure","level":2,"title":"Step 2: Regenerate Infrastructure","text":"
            # Install the new binary\nmake build && sudo make install\n\n# Regenerate CLAUDE.md and permissions\nctx init --force --merge\n

            --merge preserves your knowledge files (TASKS.md, DECISIONS.md, etc.) while regenerating infrastructure (permissions, CLAUDE.md managed sections).

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-3-update-the-plugin","level":2,"title":"Step 3: Update the Plugin","text":"
            /plugin -> select ctx -> Update now\n

            Or, if using a local clone:

            make plugin-reload\n# restart Claude Code\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-4-update-personal-scripts","level":2,"title":"Step 4: Update Personal Scripts","text":"

            Search your scripts and aliases for old command names:

            # Example: find references to old command names\ngrep -r \"ctx old-command\" ~/scripts/ ~/.zshrc ~/.bashrc\n

            Replace with the new names per the changelog.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-5-update-hook-configs","level":2,"title":"Step 5: Update Hook Configs","text":"

            If you have custom hooks in .claude/settings.local.json that reference ctx commands, update them:

            jq '.hooks' .claude/settings.local.json | grep \"ctx \"\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
            ctx status          # context files intact\nctx drift           # no broken references\nmake test           # if you're a contributor\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#writing-release-specific-migration-notes","level":2,"title":"Writing Release-Specific Migration Notes","text":"

            When preparing a release with breaking changes, create a section in the release notes using this template:

            ## Breaking Changes\n\n### `old-command` renamed to `new-command`\n\n**What changed**: `ctx old-command` is now `ctx new-command`.\nThe old name is removed (no deprecation alias).\n\n**Action required**:\n1. Run `ctx init --force --merge` to update CLAUDE.md\n2. Update any scripts referencing `ctx old-command`\n3. Update hook configs if applicable\n\n**Why**: [brief rationale for the rename]\n

            Repeat for each breaking change. Users should be able to follow the notes mechanically without needing to understand the codebase.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/codebase-audit/","level":1,"title":"Codebase Audit","text":"","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#codebase-audit","level":1,"title":"Codebase Audit","text":"

            A structured audit of the codebase: dead code, magic strings, documentation drift, security surface, and roadmap opportunities.

            When to run: Before a release, after a long YOLO sprint, quarterly, or when planning the next phase of work.

            Time: ~15-30 minutes with a team of agents.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#how-to-use-this-runbook","level":2,"title":"How to Use This Runbook","text":"

            Start a Claude Code session with a clean git state (git stash or commit first). Paste or adapt the prompt below. The agent does the analysis; you triage the findings.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#prompt","level":2,"title":"Prompt","text":"
            I want you to create an agent team to audit this codebase. Save each report as\na separate markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable: every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (session mining)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (godoc + inline)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check package-level comments match\npackage names. Output: drift items ranked by severity with exact file:line refs.\n\n### 3. Maintainability\nLook for: functions >80 lines that have logical split points; switch blocks\nwith >5 cases that could be table-driven or extracted; inline comments that\nsay \"step 1\", \"step 2\" or similar (sign the block wants to be a function);\nfiles with >400 lines; packages with flat structure that could benefit from\nsub-packages; functions that seem misplaced in their file. Do NOT flag\nthings that are fine as-is just because they could theoretically be different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app: focus on CLI-relevant attack surface, not web OWASP:\nfile path traversal (does user input flow into file paths unsanitized?),\ncommand injection (does user input flow into exec calls?), symlink following\n(does the tool follow symlinks when writing to .context/?), permission\nhandling (are file permissions set correctly?), sensitive data in outputs\n(do any commands leak secrets or session content?). Output: findings with\nseverity ratings and exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git log,\nrecent session discussions, and DECISIONS.md for story arcs worth writing\nabout. Suggest 3-5 blog post themes with: title, angle, target audience,\nkey commits/sessions to reference, and a 2-sentence pitch. Prioritize\nthemes that build a coherent narrative across posts.\n\n### 6. Roadmap & Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses:\nwhat are the highest-value improvements? Consider: user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with effort/impact estimates (not time estimates).\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and any user docs. Suggest improvements\nstructured as use-case pages: the problem, how ctx solves it, typical\nworkflow, gotchas. Identify gaps where a user would get stuck without\nreading source code. Output: list of documentation gaps and suggested\npage outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each: team composition (roles, agent types),\ntask distribution strategy, coordination approach, and which types of work\nit suits. Ground suggestions in actual project patterns, not generic advice.\n
            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#tips","level":2,"title":"Tips","text":"
            • Clean state matters: the prompt says \"no code changes\" but accidents happen. Start from a clean git state so you can git checkout . if needed.

            • Adjust scope: drop analyses you don't need. Analyses 1-4 are the most actionable. Analyses 5-8 are planning/creative and can be skipped if you just want a technical audit.

            • Reports feed TASKS.md: after the audit, read each report and create tasks in the appropriate Phase section. The reports are input, not output.

            • ideas/ is gitignored: reports saved there won't be committed. Move specific findings to TASKS.md, DECISIONS.md, or LEARNINGS.md to persist them.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#history","level":2,"title":"History","text":"
            • 2026-02-08: Original prompt created after a codebase audit sprint.
            • 2026-02-17: Improved with read-only agents, report structure template, CLI-scoped security review, and maintainability thresholds.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/","level":1,"title":"Docs Semantic Audit","text":"","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#documentation-semantic-audit","level":1,"title":"Documentation Semantic Audit","text":"

            Find structural problems that linters and link checkers cannot: weak pages that should be merged, heavy pages that should be split, missing cross-links, and narrative arcs that don't land.

            When to run: Before a release, after adding several new pages, when the site feels sprawling, or when you suspect narrative gaps.

            Time: ~20-40 minutes with an agent session.

            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#why-this-is-a-runbook","level":2,"title":"Why This Is a Runbook","text":"

            These judgments are inherently subjective and context-dependent. A page is \"weak\" relative to its neighbors; a narrative arc only matters if the docs intend to tell a story. Deterministic tools (broken-link checkers, word counters) can't do this. An LLM reading the full doc set can.

            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#prompt","level":2,"title":"Prompt","text":"

            Paste or adapt the following into a Claude Code session. The agent needs read access to docs/ and the site nav structure.

            Read every file under docs/ (including docs/blog/ and docs/recipes/).\nFor each file, note: title, word count, outbound links, inbound links\n(how many other pages link to it), and a one-line summary of its purpose.\n\nThen produce a report with these sections:\n\n## 1. Weak Dangling Pages\n\nPages that are thin, isolated, or redundant. Signs:\n- Under ~300 words with no unique content (just restates what another page says)\n- Zero or one inbound links (orphaned in the nav)\n- Content that would be stronger merged into an adjacent page\n- \"Try it in 5 minutes\" sections that assume installation already happened\n- Pages whose title doesn't work as a nav entry (too long, too vague)\n\nFor each: identify the page, explain why it's weak, and recommend\nmerge target or deletion.\n\n## 2. Overly Heavy Pages\n\nPages doing too much. Signs:\n- Over ~1500 words with multiple distinct topics\n- More than 4 H2 sections that could stand alone\n- Reader has to scroll past irrelevant content to find what they need\n- Mixed audience (beginner setup + advanced config on same page)\n\nFor each: identify the page, list the distinct topics, and suggest\nsplit points.\n\n## 3. Missing Cross-Links\n\nPlaces where a reader would naturally want to jump to related content\nbut no link exists. Look for:\n- Concepts mentioned but not linked (e.g., \"scratchpad\" without linking\n  to the scratchpad page)\n- Blog posts that describe features without linking to the reference docs\n- Recipes that reference workflows without linking to the relevant\n  getting-started section\n- Pages that end without a \"Next Up\" or \"See Also\" pointer\n\nFor each: source page, anchor text, suggested link target.\n\n## 4. Narrative Gaps\n\nThe docs should tell a coherent story: problem -> install -> first session\n-> daily workflow -> advanced patterns -> contributing. Look for:\n- Gaps in the progression (e.g., no bridge from \"first session\" to\n  \"daily habits\")\n- Blog posts that introduce concepts the reference docs don't cover\n- Recipes that assume knowledge no other page teaches\n- Features documented in CLI reference but missing from workflows/recipes\n\nFor each: describe the gap and suggest what page or section would fill it.\n\n## 5. Blog Cross-Linking Opportunities\n\nBlog posts are often written in isolation. Look for:\n- Posts that cover the same theme but don't reference each other\n- Posts that describe the evolution of a feature (natural \"part 1 / part 2\")\n- Posts that would benefit from a \"Related posts\" footer\n- Thematic clusters that could be linked from a recipe or reference page\n\nFor each: list the posts, the shared theme, and the suggested links.\n\n## Output Format\n\nFor every finding, include:\n- File path (docs/whatever.md)\n- Severity: high (actively confusing), medium (missed opportunity),\n  low (nice to have)\n- Concrete recommendation (merge into X, split at H2 Y, add link to Z)\n\nEnd with a prioritized action list: what to fix first.\n
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#after-the-audit","level":2,"title":"After the Audit","text":"
            1. Triage findings: not everything needs fixing. Focus on high severity.
            2. Merge weak pages first: fewer pages is almost always better.
            3. Add cross-links: cheapest improvement, highest reader impact.
            4. File split decisions in DECISIONS.md: page splits are architectural.
            5. Regenerate the site and spot-check nav after structural changes.
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#history","level":2,"title":"History","text":"
            • 2026-02-17: Created after merging docs/re-explaining.md into docs/about.md, which surfaced the pattern of weak standalone pages that dilute rather than add.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/hub-deployment/","level":1,"title":"Hub Deployment","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#hub-deployment","level":1,"title":"Hub Deployment","text":"

            Linear runbook for setting up a ctx Hub for yourself or a team. Consolidates pieces currently scattered across hub recipes and operations docs.

            When to use: First-time hub setup, or when onboarding a new team onto an existing hub.

            Prerequisites: ctx binary installed, network connectivity between hub and clients.

            Companion docs:

            • Hub overview: what the hub is and is not
            • Hub operations: data directory, systemd, backup, monitoring
            • Hub failure modes: what can go wrong
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"Quick Start (foreground)Production (systemd)
            ctx hub start\n

            See Hub Operations: Systemd Unit for the full unit file.

            sudo systemctl enable --now ctx-hub\n

            The hub creates admin.token on first start. Save this token; it is the only way to register clients.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-2-generate-the-admin-token","level":2,"title":"Step 2: Generate the Admin Token","text":"

            On first start, the hub writes admin.token to the data directory (default ~/.ctx/hub-data/):

            cat ~/.ctx/hub-data/admin.token\n

            This token has full admin privileges. Keep it secret.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-3-register-clients","level":2,"title":"Step 3: Register Clients","text":"

            For each client (person or machine) that will connect:

            # On the hub machine\nctx hub register --name \"volkan-laptop\" --admin-token <admin-token>\n

            This returns a client token. Distribute it securely to the client.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-4-connect-clients","level":2,"title":"Step 4: Connect Clients","text":"

            On each client machine:

            ctx connect <hub-address> --token <client-token>\n

            Verify the connection:

            ctx connection status\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-5-verify-sync","level":2,"title":"Step 5: Verify Sync","text":"

            Push a test entry from one client and verify it arrives:

            # Client A\nctx add learning \"Hub sync test\" --context \"Verifying hub setup\"\n\n# Client B (after a moment)\nctx status   # should show the new learning\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-6-configure-backup","level":2,"title":"Step 6: Configure Backup","text":"

            Set up regular backups of the hub data directory. See Hub Operations: Backup and Restore.

            Minimum:

            # Add to cron\n0 */6 * * * cp ~/.ctx/hub-data/entries.jsonl ~/backups/entries-$(date +\\%F).jsonl\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-7-configure-tls-when-available","level":2,"title":"Step 7: Configure TLS (When Available)","text":"

            Coming Soon

            TLS support is planned (H-01/H-02). Until then, run the hub on a trusted network or behind a reverse proxy with TLS termination.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#team-onboarding-checklist","level":2,"title":"Team Onboarding Checklist","text":"

            When adding a new team member to an existing hub:

            • Generate a client token (ctx hub register --name \"<name>\")
            • Share the token and hub address securely
            • Have them run ctx connect <hub-address> --token <token>
            • Verify with ctx connection status
            • Point them to the Hub Getting Started recipe
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#connection-refused","level":3,"title":"\"Connection Refused\"","text":"

            The hub isn't running or the port is wrong. Check:

            ctx hub status          # on the hub machine\nss -tlnp | grep 9900   # default port\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#authentication-failed","level":3,"title":"\"Authentication Failed\"","text":"

            The client token is wrong or was never registered. Re-register:

            ctx hub register --name \"<name>\" --admin-token <admin-token>\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#entries-not-syncing","level":3,"title":"Entries Not Syncing","text":"

            Check that the client is listening:

            ctx connection status\n

            If connected but not syncing, check the hub logs for sequence mismatch errors. See Hub Failure Modes for details.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/new-contributor/","level":1,"title":"New Contributor","text":"","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#new-contributor-onboarding","level":1,"title":"New Contributor Onboarding","text":"

            Step-by-step onboarding sequence for new contributors. Consolidates setup instructions currently scattered across the README, contributing guide, and setup docs.

            When to use: First-time contributor setup, or when verifying your development environment after a major upgrade.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-1-clone-the-repository","level":2,"title":"Step 1: Clone the Repository","text":"
            git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n

            Or fork first on GitHub, then clone your fork.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-2-initialize-context","level":2,"title":"Step 2: Initialize Context","text":"
            ctx init\n

            This creates the .context/ directory with knowledge files and the .claude/ directory with agent configuration. If ctx is not yet installed, proceed to Step 3 first, then come back.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-3-build-and-install","level":2,"title":"Step 3: Build and Install","text":"
            make build\nsudo make install\n

            Verify:

            ctx --version\n
            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-4-install-the-plugin-claude-code-users","level":2,"title":"Step 4: Install the Plugin (Claude Code Users)","text":"

            If you use Claude Code, install the plugin from your local clone so skills and hooks reflect your working tree:

            1. Launch claude
            2. Type /plugin and press Enter
            3. Select Marketplaces -> Add Marketplace
            4. Enter the absolute path to your clone (e.g., ~/WORKSPACE/ctx)
            5. Back in /plugin, select Install and choose ctx

            Verify:

            claude /plugin list   # should show ctx\n

            See Contributing: Install the Plugin for details on cache clearing.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-5-switch-to-dev-profile","level":2,"title":"Step 5: Switch to Dev Profile","text":"
            ctx config switch dev\n

            This enables verbose logging and notify events (useful during development).

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-6-verify-hooks","level":2,"title":"Step 6: Verify Hooks","text":"

            Start a Claude Code session and check that hooks fire:

            claude\n

            You should see ctx session hooks (ceremonies reminder, context loading) on session start. If not, check that the plugin is installed correctly (Step 4).

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-7-run-your-first-session","level":2,"title":"Step 7: Run Your First Session","text":"

            In Claude Code:

            /ctx-status\n

            This should show context file health, active tasks, and recent decisions. If it works, your setup is complete.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-8-verify-context-persistence","level":2,"title":"Step 8: Verify Context Persistence","text":"

            End the session and start a new one:

            /ctx-remember\n

            The agent should recall what happened in the previous session. This confirms that context persistence is working end-to-end.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-9-run-tests","level":2,"title":"Step 9: Run Tests","text":"
            make test     # unit tests\nmake audit    # full check: fmt + vet + lint + drift + docs + test\n

            All tests should pass with a clean clone.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#quick-reference","level":2,"title":"Quick Reference","text":"Task Command Build make build Install sudo make install Test make test Full audit make audit Rebuild docs site make site Serve docs locally make site-serve Clear plugin cache make plugin-reload Switch config profile ctx config switch dev","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#next-steps","level":2,"title":"Next Steps","text":"
            • Read the contributing guide for project layout, code style, and PR process
            • Check TASKS.md for open work items
            • Ask /ctx-next for suggested work
            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/plugin-release/","level":1,"title":"Plugin Release","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#plugin-release","level":1,"title":"Plugin Release","text":"

            Plugin-specific release procedure. The general release checklist covers the full ctx release; this runbook covers the plugin-specific steps that are not part of that flow.

            When to use: When releasing plugin changes (new skills, hook updates, permission changes) independently of a ctx binary release, or as a sub-procedure within the full release.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#what-ships-in-the-plugin","level":2,"title":"What Ships in the Plugin","text":"

            The plugin lives at internal/assets/claude/ and includes:

            Component Path What it does Skills internal/assets/claude/skills/ User-facing /ctx-* slash commands Hooks internal/assets/claude/hooks/ Pre/post tool-use hooks Plugin manifest internal/assets/claude/.claude-plugin/plugin.json Declares skills, hooks, version Marketplace .claude-plugin/marketplace.json Points Claude Code to the plugin","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-1-update-hooksjson-if-hooks-changed","level":2,"title":"Step 1: Update hooks.json (If Hooks Changed)","text":"

            If you added, removed, or modified hooks:

            # Verify hook definitions match implementations\nmake audit\n

            Check that plugin.json lists all hooks correctly. Missing hooks silently fail to fire.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-2-bump-version","level":2,"title":"Step 2: Bump Version","text":"

            Update the version in three places:

            • internal/assets/claude/.claude-plugin/plugin.json
            • .claude-plugin/marketplace.json (two fields)
            • editors/vscode/package.json + package-lock.json (if VS Code extension is affected)

            The Release Script Does This

            If you're running make release, the script bumps these automatically from VERSION. Only bump manually if you're releasing the plugin independently.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-3-test-against-a-fresh-install","level":2,"title":"Step 3: Test Against a Fresh Install","text":"
            # Clear cached plugin\nmake plugin-reload\n\n# Restart Claude Code, then:\nclaude /plugin list    # verify version\n

            Test the critical paths:

            • /ctx-status works
            • Session hooks fire (ceremonies, context loading)
            • At least one user-facing skill works end-to-end
            • Pre-tool-use hooks block when they should
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-4-test-against-a-clean-project","level":2,"title":"Step 4: Test Against a Clean Project","text":"

            Create a temporary project to verify the plugin works outside the ctx repo:

            mkdir /tmp/test-ctx-plugin && cd /tmp/test-ctx-plugin\ngit init\nctx init\nclaude   # start a session, verify hooks fire\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-5-verify-skill-count","level":2,"title":"Step 5: Verify Skill Count","text":"

            The plugin manifest declares all user-invocable skills. Verify the count matches:

            # Count skills in plugin.json\njq '.skills | length' internal/assets/claude/.claude-plugin/plugin.json\n\n# Count skill directories\nls -d internal/assets/claude/skills/ctx-*/ | wc -l\n

            These numbers should match (some skills are not user-invocable and won't appear in both counts).

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-6-commit-and-tag","level":2,"title":"Step 6: Commit and Tag","text":"

            If releasing independently of a binary release:

            git add internal/assets/claude/ .claude-plugin/\ngit commit -m \"chore: release plugin v0.X.Y\"\ngit tag plugin-v0.X.Y\ngit push origin main --tags\n

            If part of a full release, the release checklist handles this.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#skills-dont-appear-after-update","level":3,"title":"Skills Don't Appear After Update","text":"

            Claude Code caches plugin files aggressively:

            make plugin-reload    # clears cache\n# restart Claude Code\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#hooks-dont-fire","level":3,"title":"Hooks Don't Fire","text":"

            Check that the hook is registered in plugin.json and that the command it calls exists:

            jq '.hooks' internal/assets/claude/.claude-plugin/plugin.json\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#version-mismatch","level":3,"title":"Version Mismatch","text":"

            If claude /plugin list shows an old version after updating:

            make plugin-reload\n# restart Claude Code\nclaude /plugin list   # should show new version\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/release-checklist/","level":1,"title":"Release Checklist","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release-checklist","level":1,"title":"Release Checklist","text":"

            The canonical pre-release sequence. This runbook ties together the audits, tests, and release steps that are otherwise scattered across docs and the operator's head.

            When to run: Before every release. No exceptions.

            Companion: The /_ctx-release skill automates the tag-and-push portion; this checklist covers everything before and after that automation.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#pre-release","level":2,"title":"Pre-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#1-run-the-codebase-audit","level":3,"title":"1. Run the Codebase Audit","text":"

            Use the codebase audit runbook prompt with your agent. Focus on analyses 1-4 (extractable patterns, documentation drift, maintainability, security). Triage findings into TASKS.md; anything blocking ships before the release.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#2-run-the-docs-semantic-audit","level":3,"title":"2. Run the Docs Semantic Audit","text":"

            Use the docs semantic audit runbook prompt. Fix high-severity findings (weak pages, broken narrative arcs). Medium-severity items can be deferred.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#3-sanitize-permissions","level":3,"title":"3. Sanitize Permissions","text":"

            Follow the sanitize permissions runbook. Clean up .claude/settings.local.json before it gets committed as part of the release.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#4-run-the-full-test-suite","level":3,"title":"4. Run the Full Test Suite","text":"
            make audit    # fmt + vet + lint + drift + docs + test\nmake smoke    # integration smoke tests\n

            All tests must pass. No exceptions.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#5-check-context-health","level":3,"title":"5. Check Context Health","text":"
            ctx drift          # broken references, stale patterns\nctx status         # context file health\n/ctx-link-check    # dead links in docs\n

            Fix anything flagged.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#6-review-tasksmd","level":3,"title":"6. Review TASKS.md","text":"

            Scan for incomplete tasks tagged as release-blocking. Either finish them or explicitly defer with a reason in the task note.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release","level":2,"title":"Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#7-bump-version","level":3,"title":"7. Bump Version","text":"
            echo \"0.X.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.X.0\"\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#8-generate-release-notes","level":3,"title":"8. Generate Release Notes","text":"

            In Claude Code:

            /_ctx-release-notes\n

            Review dist/RELEASE_NOTES.md. Ensure it captures all user-visible changes.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#9-cut-the-release","level":3,"title":"9. Cut the Release","text":"
            make release\n

            Or in Claude Code: /_ctx-release. See Cutting a Release for the full step-by-step.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#post-release","level":2,"title":"Post-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#10-verify-the-github-release","level":3,"title":"10. Verify the GitHub Release","text":"
            • GitHub Releases shows the new version
            • All 6 binaries are attached
            • SHA256 checksums are attached
            • Release notes render correctly
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#11-update-the-plugin-marketplace","level":3,"title":"11. Update the Plugin Marketplace","text":"

            If the plugin version changed, verify the marketplace entry:

            claude /plugin list   # shows updated version\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#12-announce","level":3,"title":"12. Announce","text":"

            Post in the project's communication channels. Reference the release notes.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#13-clean-up","level":3,"title":"13. Clean Up","text":"
            rm dist/RELEASE_NOTES.md   # consumed by the release script\ngit stash pop              # if you stashed earlier\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/","level":1,"title":"Sanitize Permissions","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#sanitize-permissions","level":1,"title":"Sanitize Permissions","text":"

            Manual procedure for cleaning up .claude/settings.local.json. The agent may analyze and recommend, but you make every edit.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#why-manual-not-automated","level":2,"title":"Why Manual, Not Automated","text":"

            settings.local.json controls what the agent can do without asking. An agent that can edit its own permission file is a self-escalation vector, especially if the skill is auto-accepted. Keep this manual.

            When to run: After busy sessions where you clicked \"Allow\" many times, weekly hygiene (pair with ctx drift), or before committing .claude/settings.local.json.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-1-snapshot","level":2,"title":"Step 1: Snapshot","text":"
            cp .claude/settings.local.json /tmp/settings-backup-$(date +%Y%m%d).json\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-2-extract-the-allow-list","level":2,"title":"Step 2: Extract the Allow List","text":"
            jq '.permissions.allow[]' .claude/settings.local.json | sort\n

            Eyeball it. You're looking for four categories:

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-3-identify-problems","level":2,"title":"Step 3: Identify Problems","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#a-garbage-nonsense","level":3,"title":"A. Garbage / Nonsense","text":"

            Entries that are clearly broken or meaningless:

            Bash(done)\nBash(__NEW_LINE_aa838494a90279c4__ echo \"\")\n

            Action: Delete.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#b-one-off-commands-session-debris","level":3,"title":"B. One-Off Commands (Session Debris)","text":"

            Entries with hardcoded paths, literal arguments, or exact commands that were accepted during a specific debugging session:

            Bash(git -C /home/jose/WORKSPACE/ctx log --oneline --all -20)\nBash(/home/jose/WORKSPACE/ctx/ctx add decision \"Use PostgreSQL\" --context ...)\n

            Signs of a one-off:

            • Full absolute paths to specific files
            • Literal string arguments (not wildcards)
            • Very specific flag combinations
            • Commands that look like they came from a single task

            Action: Delete unless you want to promote to a wildcard pattern.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#c-subsumed-entries-redundant","level":3,"title":"C. Subsumed Entries (Redundant)","text":"

            A narrow entry that's already covered by a broader one:

            # Narrow (redundant):\nBash(ctx journal source)\nBash(git -C /home/jose/WORKSPACE/ctx log --oneline -5)\n\n# Broad (already covers the above):\nBash(ctx journal source:*)\nBash(git -C:*)\n

            To find these, look for entries where removing the specific args would match an existing wildcard entry.

            Action: Delete the narrow entry.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#d-duplicate-intent-different-spelling","level":3,"title":"D. Duplicate Intent, Different Spelling","text":"

            Same command with env vars in different order, or slight variations:

            Bash(CGO_ENABLED=0 CTX_SKIP_PATH_CHECK=1 go test:*)\nBash(CTX_SKIP_PATH_CHECK=1 CGO_ENABLED=0 go test:*)\n

            Action: Keep one, delete the other.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-4-check-for-security-concerns","level":2,"title":"Step 4: Check for Security Concerns","text":"

            While you're in here, also flag:

            Pattern Risk Bash(git push:*) Bypasses block-git-push.sh hook Bash(rm -rf:*) Recursive delete, no confirmation Bash(sudo:*) Privilege escalation Bash(echo:*), Bash(cat:*) Can compose into writes to sensitive files Bash(curl:*), Bash(wget:*) Arbitrary network access Any write to .claude/ paths Agent self-modification

            See the /ctx-permission-sanitize skill for the full threat matrix.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-5-edit","level":2,"title":"Step 5: Edit","text":"

            Edit .claude/settings.local.json directly in your editor. Remove flagged entries. Keep the JSON valid.

            # Validate JSON after editing\njq . .claude/settings.local.json > /dev/null && echo \"valid\" || echo \"BROKEN\"\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
            # Compare before/after\ndiff /tmp/settings-backup-$(date +%Y%m%d).json .claude/settings.local.json\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-7-optionally-commit","level":2,"title":"Step 7: Optionally Commit","text":"
            git add .claude/settings.local.json\ngit commit -m \"chore: sanitize agent permissions\"\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#asking-the-agent-for-help","level":2,"title":"Asking the Agent for Help","text":"

            You can safely ask the agent to analyze the file:

            \"Look at my settings.local.json and tell me which permissions look like one-offs or are redundant.\"

            The agent can read and report. You do the edits.

            Do not add these to your allow list:

            • Skill(ctx-permission-sanitize)
            • Edit(.claude/settings.local.json)
            • Any Bash(...) pattern that writes to .claude/
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#history","level":2,"title":"History","text":"
            • 2026-02-15: Created as manual-only procedure after deciding against a self-modifying skill.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"recipes/","level":1,"title":"Recipes","text":"

            Workflow recipes combining ctx commands and skills to solve specific problems.

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#getting-started","level":2,"title":"Getting Started","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#guide-your-agent","level":3,"title":"Guide Your Agent","text":"

            How commands, skills, and conversational patterns work together. Train your agent to be proactive through ask, guide, reinforce.

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#setup-across-ai-tools","level":3,"title":"Setup across AI Tools","text":"

            Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf. Includes shell completion, watch mode for non-native tools, and verification.

            Uses: ctx init, ctx setup, ctx agent, ctx completion, ctx watch

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#multilingual-session-parsing","level":3,"title":"Multilingual Session Parsing","text":"

            Parse session journal entries written in other languages. Configure recognized session-header prefixes so the journal pipeline works for Turkish, Japanese, and any other locale.

            Uses: ctx journal source, ctx journal import, session_prefixes in .ctxrc

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#keeping-context-in-a-separate-repo","level":3,"title":"Keeping Context in a Separate Repo","text":"

            Store context files outside the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or multi-repo setups.

            Uses: ctx init, --context-dir, --allow-outside-cwd, .ctxrc, /ctx-status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#sessions","level":2,"title":"Sessions","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#the-complete-session","level":3,"title":"The Complete Session","text":"

            Walk through a full ctx session from start to finish:

            • Loading context,
            • Picking what to work on,
            • Committing with context,
            • Capturing, reflecting, and saving a snapshot.

            Uses: ctx status, ctx agent, /ctx-remember, /ctx-next, /ctx-commit, /ctx-reflect

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-ceremonies","level":3,"title":"Session Ceremonies","text":"

            The two bookend rituals for every session: /ctx-remember at the start to load and confirm context, /ctx-wrap-up at the end to review the session and persist learnings, decisions, and tasks.

            Uses: /ctx-remember, /ctx-wrap-up, /ctx-commit, ctx agent, ctx add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#browsing-and-enriching-past-sessions","level":3,"title":"Browsing and Enriching Past Sessions","text":"

            Export your AI session history to a browsable journal site. Enrich entries with metadata and search across months of work.

            Uses: ctx journal source/import, ctx journal site, ctx journal obsidian, ctx serve, /ctx-history, /ctx-journal-enrich, /ctx-journal-enrich-all

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-reminders","level":3,"title":"Session Reminders","text":"

            Leave a message for your next session. Reminders surface automatically at session start and repeat until dismissed. Date-gate reminders to surface only after a specific date.

            Uses: ctx remind, ctx remind list, ctx remind dismiss, ctx system check-reminders

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#reviewing-session-changes","level":3,"title":"Reviewing Session Changes","text":"

            See what moved since your last session: context file edits, code commits, directories touched. Auto-detects session boundaries from state markers.

            Uses: ctx change, ctx agent, ctx status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#pausing-context-hooks","level":3,"title":"Pausing Context Hooks","text":"

            Silence all nudge hooks for a quick task that doesn't need ceremony overhead. Session-scoped: Other sessions are unaffected. Security hooks still fire.

            Uses: ctx hook pause, ctx hook resume, /ctx-pause, /ctx-resume

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#knowledge-and-tasks","level":2,"title":"Knowledge and Tasks","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#persisting-decisions-learnings-and-conventions","level":3,"title":"Persisting Decisions, Learnings, and Conventions","text":"

            Record architectural decisions with rationale, capture gotchas and lessons learned, and codify conventions so they survive across sessions and team members.

            Uses: ctx add decision, ctx add learning, ctx add convention, ctx decision reindex, ctx learning reindex, /ctx-decision-add, /ctx-learning-add, /ctx-convention-add, /ctx-reflect

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#tracking-work-across-sessions","level":3,"title":"Tracking Work across Sessions","text":"

            Add, prioritize, complete, snapshot, and archive tasks. Keep TASKS.md focused as your project evolves across dozens of sessions.

            Uses: ctx add task, ctx task complete, ctx task archive, ctx task snapshot, /ctx-task-add, /ctx-archive, /ctx-next

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#using-the-scratchpad","level":3,"title":"Using the Scratchpad","text":"

            Use the encrypted scratchpad for quick notes, working memory, and sensitive values during AI sessions. Natural language in, encrypted storage out.

            Uses: ctx pad, /ctx-pad, ctx pad show, ctx pad edit

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#syncing-scratchpad-notes-across-machines","level":3,"title":"Syncing Scratchpad Notes across Machines","text":"

            Distribute your scratchpad encryption key, push and pull encrypted notes via git, and resolve merge conflicts when two machines edit simultaneously.

            Uses: ctx init, ctx pad, ctx pad resolve, scp

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#bridging-claude-code-auto-memory","level":3,"title":"Bridging Claude Code Auto Memory","text":"

            Mirror Claude Code's auto memory (MEMORY.md) into .context/ for version control, portability, and drift detection. Import entries into structured context files with heuristic classification.

            Uses: ctx memory sync, ctx memory status, ctx memory diff, ctx memory import, ctx memory publish, ctx system check-memory-drift

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hooks-and-notifications","level":2,"title":"Hooks and Notifications","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-output-patterns","level":3,"title":"Hook Output Patterns","text":"

            Choose the right output pattern for your Claude Code hooks: VERBATIM relay for user-facing reminders, hard gates for invariants, agent directives for nudges, and five more patterns across the spectrum.

            Uses: ctx plugin hooks, settings.local.json

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#customizing-hook-messages","level":3,"title":"Customizing Hook Messages","text":"

            Customize what hooks say without changing what they do. Override the QA gate for Python (pytest instead of make lint), silence noisy ceremony nudges, or tailor post-commit instructions for your stack.

            Uses: ctx hook message list, ctx hook message show, ctx hook message edit, ctx hook message reset

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-sequence-diagrams","level":3,"title":"Hook Sequence Diagrams","text":"

            Mermaid sequence diagrams for every system hook: entry conditions, state reads, output, throttling, and exit points. Includes throttling summary table and state file reference.

            Uses: All ctx system hooks

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#auditing-system-hooks","level":3,"title":"Auditing System Hooks","text":"

            The 12 system hooks that run invisibly during every session: what each one does, why it exists, and how to verify they're actually firing. Covers webhook-based audit trails, log inspection, and detecting silent hook failures.

            Uses: ctx system, ctx hook notify, .context/logs/, .ctxrc notify.events

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

            Get push notifications when loops complete, hooks fire, or agents hit milestones. Webhook URL is encrypted: never stored in plaintext. Works with IFTTT, Slack, Discord, ntfy.sh, or any HTTP endpoint.

            Uses: ctx hook notify setup, ctx hook notify test, ctx hook notify --event, .ctxrc notify.events

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

            Switch between dev and base runtime configurations without editing .ctxrc by hand. Verbose logging and webhooks for debugging, clean defaults for normal sessions.

            Uses: ctx config switch, ctx config status, /ctx-config

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#maintenance","level":2,"title":"Maintenance","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#detecting-and-fixing-drift","level":3,"title":"Detecting and Fixing Drift","text":"

            Keep context files accurate by detecting structural drift (stale paths, missing files, stale file ages) and task staleness.

            Uses: ctx drift, ctx sync, ctx compact, ctx status, /ctx-drift, /ctx-status, /ctx-prompt-audit

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#state-directory-maintenance","level":3,"title":"State Directory Maintenance","text":"

            Clean up session tombstones from .context/state/. Prune old per-session files, identify stale global markers, and keep the state directory lean.

            Uses: ctx prune

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#troubleshooting","level":3,"title":"Troubleshooting","text":"

            Diagnose hook failures, noisy nudges, stale context, and configuration issues. Start with ctx doctor for a structural health check, then use /ctx-doctor for agent-driven analysis of event patterns.

            Uses: ctx doctor, ctx hook event, /ctx-doctor

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#claude-code-permission-hygiene","level":3,"title":"Claude Code Permission Hygiene","text":"

            Keep .claude/settings.local.json clean: recommended safe defaults, what to never pre-approve, and a maintenance workflow for cleaning up session debris.

            Uses: ctx init, /ctx-drift, /ctx-permission-sanitize, ctx permission snapshot, ctx permission restore

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#permission-snapshots","level":3,"title":"Permission Snapshots","text":"

            Capture a known-good permission baseline as a golden image, then restore at session start to automatically drop session-accumulated permissions.

            Uses: ctx permission snapshot, ctx permission restore, /ctx-permission-sanitize

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#turning-activity-into-content","level":3,"title":"Turning Activity into Content","text":"

            Generate blog posts from project activity, write changelog posts from commit ranges, and publish a browsable journal site from your session history.

            The output is generic Markdown, but the skills are tuned for the ctx-style blog artifacts you see on this website.

            Uses: ctx journal site, ctx journal obsidian, ctx serve, ctx journal import, /ctx-blog, /ctx-blog-changelog, /ctx-journal-enrich

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#importing-claude-code-plans","level":3,"title":"Importing Claude Code Plans","text":"

            Import Claude Code plan files (~/.claude/plans/*.md) into specs/ as permanent project specs. Filter by date, select interactively, and optionally create tasks referencing each imported spec.

            Uses: /ctx-plan-import, /ctx-task-add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#design-before-coding","level":3,"title":"Design Before Coding","text":"

            Front-load design with a four-skill chain: brainstorm the approach, spec the design, task the work, implement step-by-step. Each step produces an artifact that feeds the next.

            Uses: /ctx-brainstorm, /ctx-spec, /ctx-task-add, /ctx-implement, /ctx-decision-add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#agents-and-automation","level":2,"title":"Agents and Automation","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#building-project-skills","level":3,"title":"Building Project Skills","text":"

            Encode repeating workflows into reusable skills the agent loads automatically. Covers the full cycle: identify a pattern, create the skill, test with realistic prompts, and iterate until it triggers correctly.

            Uses: /ctx-skill-create, ctx init

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#running-an-unattended-ai-agent","level":3,"title":"Running an Unattended AI Agent","text":"

            Set up a loop where an AI agent works through tasks overnight without you at the keyboard, using ctx for persistent memory between iterations.

            This recipe shows how ctx supports long-running agent loops without losing context or intent.

            Uses: ctx init, ctx loop, ctx watch, ctx load, /ctx-loop, /ctx-implement

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#when-to-use-a-team-of-agents","level":3,"title":"When to Use a Team of Agents","text":"

            Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

            This recipe covers the file overlap test, when teams make things worse, and what ctx provides at each level.

            Uses: /ctx-worktree, /ctx-next, ctx status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#parallel-agent-development-with-git-worktrees","level":3,"title":"Parallel Agent Development with Git Worktrees","text":"

            Split a large backlog across 3-4 agents using git worktrees, each on its own branch and working directory. Group tasks by file overlap, work in parallel, merge back.

            Uses: /ctx-worktree, /ctx-next, git worktree, git merge

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#architecture-deep-dive","level":3,"title":"Architecture Deep Dive","text":"

            Three-pass pipeline for understanding a codebase: map what exists, enrich with code intelligence, then hunt for where it will silently fail. Produces architecture docs, quantified dependency data, and ranked failure hypotheses.

            Uses: /ctx-architecture, /ctx-architecture-enrich, /ctx-architecture-failure-analysis

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#writing-steering-files","level":3,"title":"Writing Steering Files","text":"

            Tell your AI assistant how to behave with rule-based prompt injection that fires automatically when prompts match a description. Walks through scaffolding a steering file, previewing matches, and syncing to each AI tool's native format.

            Uses: ctx steering add, ctx steering preview, ctx steering list, ctx steering sync

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#authoring-lifecycle-triggers","level":3,"title":"Authoring Lifecycle Triggers","text":"

            Run executable shell scripts at session-start, pre-tool-use, file-save, and other lifecycle events. Script-based automation (complementary to steering's rule-based prompts), with a security-first workflow: scaffold disabled, test with mock input, enable only after review.

            Uses: ctx trigger add, ctx trigger test, ctx trigger enable, ctx trigger disable, ctx trigger list

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hub","level":2,"title":"Hub","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hub-overview","level":3,"title":"Hub Overview","text":"

            Mental model and three user stories for the ctx Hub. What flows, what doesn't, and when not to use it. Read this before any of the other Hub recipes.

            Uses: ctx hub, ctx connection, ctx add --share

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-getting-started","level":3,"title":"ctx Hub: Getting Started","text":"

            Stand up a single-node hub on localhost, register two projects, publish a decision from one, and watch it appear in the other. End-to-end in under five minutes.

            Uses: ctx hub start, ctx connection register, ctx connection subscribe, ctx connection sync, ctx connection listen, ctx add --share, ctx agent --include-hub

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

            Story 1 day-to-day workflow: one developer, many projects, one hub on localhost. Records a learning in project A, watches it show up automatically in project B. Walks through a realistic day of using the hub as passive infrastructure (no manual sync, no git push, no ceremony).

            Uses: ctx add --share, ctx connection subscribe, ctx agent --include-hub

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#team-knowledge-bus","level":3,"title":"Team Knowledge Bus","text":"

            Story 2 day-to-day workflow: a small trusted team sharing decisions, learnings, and conventions via a hub on an internal server. Covers the team publishing culture, what belongs on the hub vs. local, token management, and the social rules that make a shared knowledge stream stay signal-rich.

            Uses: ctx add --share, ctx connection status, ctx connection subscribe, ctx hub status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-multi-machine","level":3,"title":"ctx Hub: Multi-Machine","text":"

            Run the hub on a LAN host as a daemon and connect from project directories on other workstations. Firewall guidance, TLS via a reverse proxy, and safe daemon restart semantics.

            Uses: ctx hub start --daemon, ctx hub stop, ctx connection register, ctx connection status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-ha-cluster","level":3,"title":"ctx Hub: HA Cluster","text":"

            Raft-based leader election across three or more nodes for redundancy. Covers bootstrap, runtime peer management, graceful stepdown, and the Raft-lite durability caveat.

            Uses: ctx hub start --peers, ctx hub status, ctx hub peer add/remove, ctx hub stepdown

            ","path":["Recipes"],"tags":[]},{"location":"recipes/architecture-deep-dive/","level":1,"title":"Architecture Deep Dive","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-problem","level":2,"title":"The Problem","text":"

            Understanding a codebase at the surface level is easy. Understanding where it will break under real-world conditions takes three passes: mapping what exists, quantifying how it connects, and hunting for where it silently fails. Most teams stop at the first pass.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tldr","level":2,"title":"TL;DR","text":"
            # Pass 1: Map the system\n/ctx-architecture\n\n# Pass 2: Enrich with code intelligence\n/ctx-architecture-enrich\n\n# Pass 3: Hunt for failure modes\n/ctx-architecture-failure-analysis\n

            Each pass builds on the previous one. Run them in order. The output accumulates in .context/; each pass reads the prior artifacts and extends them.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-architecture Skill Map modules, dependencies, data flow, patterns /ctx-architecture-enrich Skill Verify blast radius and flows with code intel /ctx-architecture-failure-analysis Skill Generate falsifiable incident hypotheses ctx drift CLI Detect stale paths and broken references ctx status CLI Quick structural overview","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-1-map-what-exists","level":3,"title":"Pass 1: Map What Exists","text":"
            /ctx-architecture\n

            Produces:

            • ARCHITECTURE.md: succinct project map (< 4000 tokens), loaded at every session start
            • DETAILED_DESIGN*.md: deep per-module reference with exported API, data flow, danger zones, extension points
            • CHEAT-SHEETS.md: lifecycle flow diagrams
            • map-tracking.json: coverage state with confidence scores

            This pass forces deep code reading. No shortcuts, no code intelligence tools; the agent reads every module it analyzes. That forced reading is what makes the subsequent passes useful.

            When to run: First time on a codebase, or after significant structural changes (new packages, moved files, changed dependencies).

            Principal mode: Add principal to get strategic analysis (ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md from P4):

            /ctx-architecture principal\n
            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-2-enrich-with-code-intelligence","level":3,"title":"Pass 2: Enrich with Code Intelligence","text":"
            /ctx-architecture-enrich\n

            Takes the Pass 1 artifacts as baseline and layers on verified, graph-backed data from GitNexus:

            • Blast radius numbers for key functions
            • Execution flow traces through hot paths
            • Domain clustering validation
            • Registration site discovery

            This pass does not replace reading; it quantifies what reading found. If Pass 1 says \"module X depends on module Y,\" Pass 2 says \"module X has 47 callers in module Y, and changing function Z would affect 12 downstream consumers.\"

            When to run: After Pass 1, when you need quantified confidence for refactoring decisions or risk assessment.

            Requires: GitNexus MCP server connected.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-3-hunt-for-failure-modes","level":3,"title":"Pass 3: Hunt for Failure Modes","text":"
            /ctx-architecture-failure-analysis\n

            The adversarial pass. Reads all prior artifacts, then systematically hunts for correctness bugs across 9 failure categories:

            1. Concurrency (races, deadlocks, goroutine leaks)
            2. Ordering assumptions (init, registration, shutdown)
            3. Cache staleness (TTL-less, read-your-writes, cross-process)
            4. Fan-out amplification (N+1, retry storms)
            5. Ownership and lifecycle (orphans, double-close)
            6. Error handling (silent swallowing, partial failure)
            7. Scaling cliffs (quadratic, unbounded, global locks)
            8. Idempotency failures (duplicate processing, retry mutations)
            9. State machine drift (illegal states, unvalidated transitions)

            Every finding must meet an evidence standard: code path, trigger, failure path, silence reason, and code evidence. A mandatory challenge phase attempts to disprove each finding before it is accepted. Findings carry a confidence level (High/Medium/Low) and explicit risk score.

            Produces DANGER-ZONES.md, a ranked inventory of findings split into Critical and Elevated tiers.

            When to run: Before releases, after major refactors, when investigating incident categories, or when onboarding.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#what-you-get","level":2,"title":"What You Get","text":"

            After all three passes, .context/ contains:

            File From Purpose ARCHITECTURE.md Pass 1 System map (session-start context) DETAILED_DESIGN*.md Pass 1 Module-level deep reference CHEAT-SHEETS.md Pass 1 Lifecycle flow diagrams map-tracking.json Pass 1 Coverage and confidence data CONVERGENCE-REPORT.md Pass 1 What's covered, what's not DANGER-ZONES.md Pass 3 Ranked failure hypotheses

            Pass 2 enriches Pass 1 artifacts in-place rather than creating new files.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tips","level":2,"title":"Tips","text":"
            • Run Pass 1 with focus areas if the codebase is large. The skill asks what to go deep on, so name the modules you're about to change.
            • You don't need all three passes every time. Pass 1 is the foundation. Pass 2 and 3 are for when you need quantified confidence or adversarial rigor.
            • Re-run Pass 1 incrementally. It tracks coverage in map-tracking.json and only re-analyzes stale modules.
            • Pass 3 is most valuable before releases. The ranked DANGER-ZONES.md is a pre-release checklist.
            • The trilogy maps to a question progression: How does it work? How well does it connect? Where will it break?
            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#see-also","level":2,"title":"See Also","text":"

            See also: Detecting and Fixing Context Drift to keep architecture artifacts fresh between deep-dive sessions.

            See also: Detecting and Fixing Context Drift for structural checks that complement architecture analysis.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/autonomous-loops/","level":1,"title":"Running an Unattended AI Agent","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-problem","level":2,"title":"The Problem","text":"

            You have a project with a clear list of tasks, and you want an AI agent to work through them autonomously: overnight, unattended, without you sitting at the keyboard.

            Each iteration needs to remember what the previous one did, mark tasks as completed, and know when to stop.

            Without persistent memory, every iteration starts fresh and the loop collapses. With ctx, each iteration can pick up where the last one left off, but only if the agent persists its context as part of the work.

            Unattended operation works because the agent treats context persistence as a first-class deliverable, not an afterthought.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                                    # 1. init context\n# Edit TASKS.md with phased work items\nctx loop --tool claude --max-iterations 10  # 2. generate loop.sh\n./loop.sh 2>&1 | tee /tmp/loop.log &        # 3. run the loop\nctx watch --log /tmp/loop.log               # 4. process context updates\n# Next morning:\nctx status && ctx load                      # 5. review the results\n

            Read on for permissions, isolation, and completion signals.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init Command Initialize project context and prompt templates ctx loop Command Generate the loop shell script ctx watch Command Monitor AI output and persist context updates ctx load Command Display assembled context (for debugging) /ctx-loop Skill Generate loop script from inside Claude Code /ctx-implement Skill Execute a plan step-by-step with verification","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-1-initialize-for-unattended-operation","level":3,"title":"Step 1: Initialize for Unattended Operation","text":"

            Start by creating a .context/ directory configured so the agent can work without human input.

            ctx init\n

            This creates .context/ with the template files (including a loop prompt at .context/loop.md), and seeds Claude Code permissions in .claude/settings.local.json. Install the ctx plugin for hooks and skills.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-2-populate-tasksmd-with-phased-work","level":3,"title":"Step 2: Populate TASKS.md with Phased Work","text":"

            Open .context/TASKS.md and organize your work into phases. The agent works through these systematically, top to bottom, using priority tags to break ties.

            # Tasks\n\n## Phase 1: Foundation\n\n- [ ] Set up project structure and build system `#priority:high`\n- [ ] Configure testing framework `#priority:high`\n- [ ] Create CI pipeline `#priority:medium`\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Hardening\n\n- [ ] Add rate limiting to API endpoints `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n- [ ] Write integration tests `#priority:medium`\n

            Phased organization matters because it gives the agent natural boundaries. Phase 1 tasks should be completable without Phase 2 code existing yet.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-3-configure-the-loop-prompt","level":3,"title":"Step 3: Configure the Loop Prompt","text":"

            The loop prompt at .context/loop.md instructs the agent to operate autonomously:

            1. Read .context/CONSTITUTION.md first (hard rules, never violated)
            2. Load context from .context/ files
            3. Pick one task per iteration
            4. Complete the task and update context files
            5. Commit changes (including .context/)
            6. Signal status with a completion signal

            You can customize .context/loop.md for your project. The critical parts are the one-task-per-iteration discipline, proactive context persistence, and completion signals at the end:

            ## Signal Status\n\nEnd your response with exactly ONE of:\n\n* `SYSTEM_CONVERGED`: All tasks in `TASKS.md` are complete (*this is the\n  signal the loop script detects by default*)\n* `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n* (*no signal*): More work remains, continue to the next iteration\n\nNote: the loop script only checks for `SYSTEM_CONVERGED` by default.\n`SYSTEM_BLOCKED` is a convention for the human reviewing the log.\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-4-configure-permissions","level":3,"title":"Step 4: Configure Permissions","text":"

            An unattended agent needs permission to use tools without prompting. By default, Claude Code asks for confirmation on file writes, bash commands, and other operations, which stops the loop and waits for a human who is not there.

            There are two approaches.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-a-explicit-allowlist-recommended","level":4,"title":"Option A: Explicit Allowlist (Recommended)","text":"

            Grant only the permissions the agent needs. In .claude/settings.local.json:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Bash(ctx:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

            Adjust the Bash patterns for your project's toolchain. The agent can run make, go, git, and ctx commands but cannot run arbitrary shell commands.

            This is recommended even in sandboxed environments because it limits blast radius.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-b-skip-all-permission-checks","level":4,"title":"Option B: Skip All Permission Checks","text":"

            Claude Code supports a --dangerously-skip-permissions flag that disables all permission prompts:

            claude --dangerously-skip-permissions -p \"$(cat .context/loop.md)\"\n

            This Flag Means What It Says

            With --dangerously-skip-permissions, the agent can execute any shell command, write to any file, and make network requests without confirmation.

            Only use this on a sandboxed machine: ideally a virtual machine with no access to host credentials, no SSH keys, and no access to production systems.

            If you would not give an untrusted intern sudo on this machine, do not use this flag.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#enforce-isolation-at-the-os-level","level":4,"title":"Enforce Isolation at the OS Level","text":"

            The only controls an agent cannot override are the ones enforced by the operating system, the container runtime, or the hypervisor.

            Do Not Skip This Section

            This is not optional hardening:

            An unattended agent with unrestricted OS access is an unattended shell with unrestricted OS access.

            The allowlist above is a strong first layer, but do not rely on a single runtime boundary.

            For unattended runs, enforce isolation at the infrastructure level:

            Layer What to enforce User account Run the agent as a dedicated unprivileged user with no sudo access and no membership in privileged groups (docker, wheel, adm). Filesystem Restrict the project directory via POSIX permissions or ACLs. The agent should have no access to other users' files or system directories. Container Run inside a Docker/Podman sandbox. Mount only the project directory. Drop capabilities (--cap-drop=ALL). Disable network if not needed (--network=none). Never mount the Docker socket and do not run privileged containers. Prefer rootless containers. Virtual machine Prefer a dedicated VM with no shared folders, no host passthrough, and no keys to other machines. Network If the agent does not need the internet, disable outbound access entirely. If it does, restrict to specific domains via firewall rules. Resource limits Apply CPU, memory, and disk limits (cgroups/container limits). A runaway loop should not fill disk or consume all RAM. Self-modification Make instruction files read-only. CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md should not be writable by the agent user. If using project-local hooks, protect those too.

            A minimal Docker setup for overnight runs:

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh 2>&1 | tee /tmp/loop.log\n

            Defense in Depth

            Use multiple layers together: OS-level isolation (the boundary the agent cannot cross), a permission allowlist (what Claude Code will do within that boundary), and CONSTITUTION.md (a soft nudge for the common case).

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-5-generate-the-loop-script","level":3,"title":"Step 5: Generate the Loop Script","text":"

            Use ctx loop to generate a loop.sh tailored to your AI tool:

            # Generate for Claude Code with a 10-iteration cap\nctx loop --tool claude --max-iterations 10\n\n# Generate for Aider\nctx loop --tool aider --max-iterations 10\n\n# Custom prompt file and output filename\nctx loop --tool claude --prompt my-prompt.md --output my-loop.sh\n

            The generated script reads .context/loop.md, runs the tool, checks for completion signals, and loops until done or the cap is reached.

            You can also use the /ctx-loop skill from inside Claude Code.

            A Shell Loop Is the Best Practice

            The shell loop approach spawns a fresh AI process each iteration, so the only state that carries between iterations is what lives in .context/ and git.

            Claude Code's built-in /loop runs iterations within the same session, which can allow context window state to leak between iterations. This can be convenient for short runs, but it is less reliable for unattended loops.

            See Shell Loop vs Built-in Loop for details.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-6-run-with-watch-mode","level":3,"title":"Step 6: Run with Watch Mode","text":"

            Open two terminals. In the first, run the loop. In the second, run ctx watch to process context updates from the AI output.

            # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

            The watch command parses XML context-update commands from the AI output and applies them:

            <context-update type=\"complete\">user registration</context-update>\n<context-update type=\"learning\"\n  context=\"Setting up user registration\"\n  lesson=\"Email verification needs SMTP configured\"\n  application=\"Add SMTP setup to deployment checklist\"\n>SMTP Requirement</context-update>\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-7-completion-signals-end-the-loop","level":3,"title":"Step 7: Completion Signals End the Loop","text":"

            The generated script checks for one completion signal per run. By default this is SYSTEM_CONVERGED. You can change it with the --completion flag:

            ctx loop --tool claude --completion BOOTSTRAP_COMPLETE --max-iterations 5\n

            The following signals are conventions used in .context/loop.md:

            Signal Convention How the script handles it SYSTEM_CONVERGED All tasks in TASKS.md are done Detected by default (--completion default value) SYSTEM_BLOCKED Agent cannot proceed Only detected if you set --completion to this BOOTSTRAP_COMPLETE Initial scaffolding done Only detected if you set --completion to this

            The script uses grep -q on the agent's output, so any string works as a signal. If you need to detect multiple signals in one run, edit the generated loop.sh to add additional grep checks.

            When you return in the morning, check the log and the context files:

            tail -100 /tmp/loop.log\nctx status\nctx load\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-8-use-ctx-implement-for-plan-execution","level":3,"title":"Step 8: Use /ctx-implement for Plan Execution","text":"

            Within each iteration, the agent can use /ctx-implement to execute multi-step plans with verification between steps. This is useful for complex tasks that touch multiple files.

            The skill breaks a plan into atomic, verifiable steps:

            Step 1/6: Create user model .................. OK\nStep 2/6: Add database migration ............. OK\nStep 3/6: Implement registration handler ..... OK\nStep 4/6: Write unit tests ................... OK\nStep 5/6: Run test suite ..................... FAIL\n  -> Fixed: missing test dependency\n  -> Re-verify ............................... OK\nStep 6/6: Update TASKS.md .................... OK\n

            Each step is verified (build, test, syntax check) before moving to the next.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A typical overnight run:

            ctx init\n# Edit TASKS.md and .context/loop.md\n\nctx loop --tool claude --max-iterations 20\n\n./loop.sh 2>&1 | tee /tmp/loop.log &\nctx watch --log /tmp/loop.log\n\n# Next morning:\nctx status\nctx load\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#why-autonomous-loops-work-proactive-context-persistence","level":2,"title":"Why Autonomous Loops Work: Proactive Context Persistence","text":"

            The autonomous loop pattern works because the agent persists context as part of the job.

            Without proactive persistence, the loop degrades into disconnected iterations that repeat work, contradict decisions, and lose track of progress. The agent cannot rely on a human to prompt it. It must treat context updates as part of every task, not as a separate step.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-agent-playbook-self-check-drives-autonomy","level":3,"title":"The Agent Playbook Self-Check Drives Autonomy","text":"

            The Agent Playbook defines a self-check prompt the agent applies continuously:

            \"If this session ended right now, would the next session know what happened?\"\n

            In interactive sessions, a human might remind the agent to save context. When the agent runs unattended, nobody is there to remind it.

            The self-check becomes the trigger: after meaningful work, the agent evaluates whether the context files reflect reality and updates them immediately if they do not.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#what-the-agent-does-proactively-between-iterations","level":3,"title":"What the Agent Does Proactively between Iterations","text":"

            At milestones within an iteration, the agent persists without waiting for instructions:

            Milestone What the agent does proactively Completes a task Marks it [x] in TASKS.md Discovers a gotcha Adds it to LEARNINGS.md Makes a design choice Records it in DECISIONS.md with rationale Identifies follow-up work Creates new tasks in TASKS.md with #added timestamp Hits an unexpected failure Documents the root cause before moving on","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#example-what-proactive-persistence-looks-like","level":3,"title":"Example: What Proactive Persistence Looks Like","text":"

            Here is what the agent does during a single iteration beyond writing code:

            Iteration 4:\n  1. Read TASKS.md -> pick \"Add email verification\" (#priority:high)\n  2. Add #started:2026-01-25-030012 to the task\n  3. Implement the feature (code, tests, docs if needed)\n  4. Tests pass -> mark task [x]\n  5. Add learning: \"SMTP config must be set before verification handler registers. Order matters in init().\"\n  6. Add decision: \"Use token-based verification links (not codes) because links work better in automated tests.\"\n  7. Create follow-up task: \"Add rate limiting to verification endpoint\" #added:...\n  8. Commit all changes including `.context/`\n  9. No signal emitted -> loop continues to iteration 5\n

            Steps 2, 4, 5, 6, and 7 are proactive context persistence:

            The agent was not asked to do any of them.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#context-persistence-at-milestones","level":3,"title":"Context Persistence at Milestones","text":"

            For long autonomous runs, the agent persists context at natural boundaries, often at phase transitions or after completing a cluster of related tasks. It updates TASKS.md, DECISIONS.md, and LEARNINGS.md as it goes.

            If the loop crashes at 4 AM, the context files tell you exactly where to resume. You can also use ctx journal source to review the session transcripts.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-persistence-contract","level":3,"title":"The Persistence Contract","text":"

            The autonomous loop has an implicit contract:

            1. Every iteration reads context: TASKS.md, DECISIONS.md, LEARNINGS.md
            2. Every iteration writes context: task updates, new learnings, decisions
            3. Every commit includes .context/ so the next iteration sees changes
            4. Context stays current: if the loop stopped right now, nothing important is lost

            Break any part of this contract and the loop degrades.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tips","level":2,"title":"Tips","text":"

            Markdown Is Not Enforcement

            Your real guardrails are permissions and isolation, not Markdown. CONSTITUTION.md can nudge the agent, but it is probabilistic.

            The permission allowlist and OS isolation are deterministic:

            For unattended runs, trust the sandbox and the allowlist, not the prose.

            • Start with a small iteration cap. Use --max-iterations 5 on your first run.
            • Keep tasks atomic. Each task should be completable in a single iteration.
            • Check signal discipline. If the loop runs forever, the agent is not emitting SYSTEM_CONVERGED or SYSTEM_BLOCKED. Make the signal requirement explicit in .context/loop.md.
            • Commit after context updates. Finish code, update .context/, commit including .context/, then signal.
            • Set up webhook notifications to get notified when the loop completes, hits max iterations, or when hooks fire nudges. The generated loop script includes ctx hook notify calls automatically.
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#next-up","level":2,"title":"Next Up","text":"

            When to Use a Team of Agents →: Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#see-also","level":2,"title":"See Also","text":"
            • Autonomous Loops: loop pattern, prompt templates, troubleshooting
            • CLI Reference: ctx loop: flags and options
            • CLI Reference: ctx watch: watch mode details
            • CLI Reference: ctx init: init flags
            • The Complete Session: interactive workflow
            • Tracking Work Across Sessions: structuring TASKS.md
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/building-skills/","level":1,"title":"Building Project Skills","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-problem","level":2,"title":"The Problem","text":"

            You have workflows your agent needs to repeat across sessions: a deploy checklist, a review protocol, a release process. Each time, you re-explain the steps. The agent gets it mostly right but forgets edge cases you corrected last time.

            Skills solve this by encoding domain knowledge into a reusable document the agent loads automatically when triggered. A skill is not code - it is a structured prompt that captures what took you sessions to learn.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-skill-create\n

            The skill-creator walks you through: identify a repeating workflow, draft a skill, test with realistic prompts, iterate until it triggers correctly and produces good output.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-skill-create Skill Interactive skill creation and improvement workflow ctx init Command Deploys template skills to .claude/skills/ on first setup","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-1-identify-a-repeating-pattern","level":3,"title":"Step 1: Identify a Repeating Pattern","text":"

            Good skill candidates:

            • Checklists you repeat: deploy steps, release prep, code review
            • Decisions the agent gets wrong: if you keep correcting the same behavior, encode the correction
            • Multi-step workflows: anything with a sequence of commands and conditional branches
            • Domain knowledge: project-specific terminology, architecture constraints, or conventions the agent cannot infer from code alone

            Not good candidates: one-off instructions, things the platform already handles (file editing, git operations), or tasks too narrow to reuse.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-2-create-the-skill","level":3,"title":"Step 2: Create the Skill","text":"

            Invoke the skill-creator:

            You: \"I want a skill for our deploy process\"\n\nAgent: [Asks about the workflow: what steps, what tools,\n        what edge cases, what the output should look like]\n

            Or capture a workflow you just did:

            You: \"Turn what we just did into a skill\"\n\nAgent: [Extracts the steps from conversation history,\n        confirms understanding, drafts the skill]\n

            The skill-creator produces a SKILL.md file in .claude/skills/your-skill/.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-3-test-with-realistic-prompts","level":3,"title":"Step 3: Test with Realistic Prompts","text":"

            The skill-creator proposes 2-3 test prompts - the kind of thing a real user would say. It runs each one and shows the result alongside a baseline (same prompt without the skill) so you can compare.

            Agent: \"Here are test prompts I'd try:\n        1. 'Deploy to staging'\n        2. 'Ship the hotfix'\n        3. 'Run the release checklist'\n        Want to adjust these?\"\n
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-4-iterate-on-the-description","level":3,"title":"Step 4: Iterate on the Description","text":"

            The description field in frontmatter determines when a skill triggers. Claude tends to undertrigger - descriptions need to be specific and slightly \"pushy\":

            # Weak - too vague, will undertrigger\ndescription: \"Use for deployments\"\n\n# Strong - covers situations and synonyms\ndescription: >-\n  Use when deploying to staging or production, running the release\n  checklist, or when the user says 'ship it', 'deploy this', or\n  'push to prod'. Also use after merging to main when a deploy\n  is expected.\n

            The skill-creator helps you tune this iteratively.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-5-deploy-as-template-optional","level":3,"title":"Step 5: Deploy as Template (Optional)","text":"

            If the skill should be available to all projects (not just this one), place it in internal/assets/claude/skills/ so ctx init deploys it to new projects automatically.

            Most project-specific skills stay in .claude/skills/ and travel with the repo.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#skill-anatomy","level":2,"title":"Skill Anatomy","text":"
            my-skill/\n  SKILL.md         # Required: frontmatter + instructions (<500 lines)\n  scripts/         # Optional: deterministic code the skill can execute\n  references/      # Optional: detail loaded on demand (not always)\n  assets/          # Optional: output templates, not loaded into context\n

            Key sections in SKILL.md:

            Section Purpose Required? Frontmatter Name, description (trigger) Yes When to Use Positive triggers Yes When NOT to Use Prevents false activations Yes Process Steps and commands Yes Examples Good/bad output pairs Recommended Quality Checklist Verify before reporting completion For complex skills","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tips","level":2,"title":"Tips","text":"
            • Description is everything. A great skill with a vague description never fires. Spend time on trigger coverage - synonyms, concrete situations, edge cases.
            • Stay under 500 lines. If your skill is growing past this, move detail into references/ files and point to them from SKILL.md.
            • Do not duplicate the platform. If the agent already knows how to do something (edit files, run git commands), do not restate it. Tag paragraphs as Expert/Activation/Redundant and delete Redundant ones.
            • Explain why, not just what. \"Sort by date because users want recent results first\" beats \"ALWAYS sort by date.\" The agent generalizes from reasoning better than from rigid rules.
            • Test negative triggers. Make sure the skill does not fire on unrelated prompts. A skill that activates too broadly becomes noise.
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#next-up","level":2,"title":"Next Up","text":"

            Parallel Agent Development with Git Worktrees ->: Split work across multiple agents using git worktrees.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: full listing of all bundled and project-local skills
            • Guide Your Agent: how commands, skills, and conversational patterns work together
            • Design Before Coding: the four-skill chain for front-loading design work
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/claude-code-permissions/","level":1,"title":"Claude Code Permission Hygiene","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code's .claude/settings.local.json controls what the agent can do without asking. Over time, this file accumulates one-off permissions from individual sessions: Exact commands with hardcoded paths, duplicate entries, and stale skill references.

            A noisy \"allowlist\" makes it harder to spot dangerous permissions and increases the surface area for unintended behavior.

            Since settings.local.json is .gitignored, it drifts independently of your codebase. There is no PR review, no CI check: just whatever you clicked \"Allow\" on.

            This recipe shows what a well-maintained permission file looks like and how to keep it clean.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                            # seeds safe defaults\n/ctx-drift                          # detects missing/stale permissions\n/ctx-permission-sanitize               # audits for dangerous patterns\n

            See Recommended Defaults for the full list.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Populates default ctx permissions /ctx-drift Detects missing or stale permission entries /ctx-permission-sanitize Audits for dangerous patterns (security-focused)","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#recommended-defaults","level":2,"title":"Recommended Defaults","text":"

            After running ctx init, your settings.local.json will have the ctx defaults pre-populated. Here is an opinionated safe starting point for a Go project using ctx:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(/tmp/ctx-*:*)\",\n      \"Bash(CGO_ENABLED=0 go build:*)\",\n      \"Bash(CGO_ENABLED=0 go test:*)\",\n      \"Bash(ctx:*)\",\n      \"Bash(git add:*)\",\n      \"Bash(git branch:*)\",\n      \"Bash(git check-ignore:*)\",\n      \"Bash(git checkout:*)\",\n      \"Bash(git commit:*)\",\n      \"Bash(git diff:*)\",\n      \"Bash(git log:*)\",\n      \"Bash(git remote:*)\",\n      \"Bash(git restore:*)\",\n      \"Bash(git show:*)\",\n      \"Bash(git stash:*)\",\n      \"Bash(git status:*)\",\n      \"Bash(git tag:*)\",\n      \"Bash(go build:*)\",\n      \"Bash(go fmt:*)\",\n      \"Bash(go test:*)\",\n      \"Bash(go vet:*)\",\n      \"Bash(golangci-lint run:*)\",\n      \"Bash(grep:*)\",\n      \"Bash(ls:*)\",\n      \"Bash(make:*)\",\n      \"Skill(ctx-convention-add)\",\n      \"Skill(ctx-decision-add)\",\n      \"Skill(ctx-learning-add)\",\n      \"Skill(ctx-task-add)\",\n      \"Skill(ctx-agent)\",\n      \"Skill(ctx-archive)\",\n      \"Skill(ctx-blog)\",\n      \"Skill(ctx-blog-changelog)\",\n      \"Skill(absorb)\",\n      \"Skill(ctx-commit)\",\n      \"Skill(ctx-drift)\",\n      \"Skill(ctx-implement)\",\n      \"Skill(ctx-journal-enrich)\",\n      \"Skill(ctx-journal-enrich-all)\",\n      \"Skill(ctx-loop)\",\n      \"Skill(ctx-next)\",\n      \"Skill(ctx-pad)\",\n      \"Skill(ctx-prompt-audit)\",\n      \"Skill(ctx-history)\",\n      \"Skill(ctx-reflect)\",\n      \"Skill(ctx-remember)\",\n      \"Skill(ctx-status)\",\n      \"Skill(ctx-worktree)\",\n      \"WebSearch\"\n    ],\n    \"deny\": [\n      \"Bash(sudo *)\",\n      \"Bash(git push *)\",\n      \"Bash(git push)\",\n      \"Bash(rm -rf /*)\",\n      \"Bash(rm -rf ~*)\",\n      \"Bash(curl *)\",\n      \"Bash(wget *)\",\n      \"Bash(chmod 777 *)\",\n      \"Read(**/.env)\",\n      \"Read(**/.env.*)\",\n      \"Read(**/*credentials*)\",\n      \"Read(**/*secret*)\",\n      \"Read(**/*.pem)\",\n      \"Read(**/*.key)\",\n      \"Edit(**/.env)\",\n      \"Edit(**/.env.*)\"\n    ]\n  }\n}\n

            This Is a Starting Point, Not a Mandate

            Your project may need more or fewer entries.

            The goal is intentional permissions: Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#design-principles","level":3,"title":"Design Principles","text":"

            Use wildcards for trusted binaries: If you trust the binary (your own project's CLI, make, go), a single wildcard like Bash(ctx:*) beats twenty subcommand entries. It reduces noise and means new subcommands work without re-prompting.

            Keep git commands granular: Unlike ctx or make, git has both safe commands (git log, git status) and destructive ones (git reset --hard, git clean -f). Listing safe commands individually prevents accidentally pre-approving dangerous ones.

            Pre-approve all ctx- skills: Skills shipped with ctx (Skill(ctx-*)) are safe to pre-approve. They are part of your project and you control their content. This prevents the agent from prompting on every skill invocation.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#default-deny-rules","level":3,"title":"Default Deny Rules","text":"

            ctx init automatically populates permissions.deny with rules that block dangerous operations. Deny rules are evaluated before allow rules: A denied pattern always prompts the user, even if it also matches an allow entry.

            The defaults block:

            Pattern Why Bash(sudo *) Cannot enter password; will hang Bash(git push *) Must be explicit user action Bash(rm -rf /*) etc. Recursive delete of system/home directories Bash(curl *) / wget Arbitrary network requests Bash(chmod 777 *) World-writable permissions Read/Edit(**/.env*) Secrets and credentials Read(**/*.pem, *.key) Private keys

            Read/Edit Deny Rules

            Read() and Edit() deny rules have known upstream enforcement issues (claude-code#6631,#24846).

            They are included as defense-in-depth and intent documentation.

            Blocked by default deny rules: no action needed, ctx init handles these:

            Pattern Risk Bash(git push:*) Must be explicit user action Bash(sudo:*) Privilege escalation Bash(rm -rf:*) Recursive delete with no confirmation Bash(curl:*) / Bash(wget:*) Arbitrary network requests

            Requires manual discipline: Never add these to allow:

            Pattern Risk Bash(git reset:*) Can discard uncommitted work Bash(git clean:*) Deletes untracked files Skill(ctx-permission-sanitize) Edits this file: self-modification vector Skill(release) Runs the release pipeline: high impact","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#hooks-regex-safety-net","level":2,"title":"Hooks: Regex Safety Net","text":"

            Deny rules handle prefix-based blocking natively. Hooks complement them by catching patterns that require regex matching: Things deny rules can't express.

            The ctx plugin ships these blocking hooks:

            Hook What it blocks ctx system block-non-path-ctx Running ctx from wrong path

            Project-local hooks (not part of the plugin) catch regex edge cases:

            Hook What it blocks block-dangerous-commands.sh Mid-command sudo/git push (after &&), copies to bin dirs, absolute-path ctx

            Pre-Approved + Hook-Blocked = Silent Block

            If you pre-approve a command that a hook blocks, the user never sees the confirmation dialog. The agent gets a block response and must handle it, which is confusing.

            It's better not to pre-approve commands that hooks are designed to intercept.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-maintenance-workflow","level":2,"title":"The Maintenance Workflow","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#after-busy-sessions","level":3,"title":"After Busy Sessions","text":"

            Permissions accumulate fastest during debugging and exploration sessions. After a session where you clicked \"Allow\" many times:

            1. Open .claude/settings.local.json in your editor;
            2. Look for entries at the bottom of the allowlist (new entries append there);
            3. Delete anything that looks session-specific:
              • Exact commands with hardcoded paths,
              • Commands with literal string arguments,
              • Entries that duplicate an existing wildcard.

            See the Sanitize Permissions runbook for a step-by-step procedure.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#periodically","level":3,"title":"Periodically","text":"

            Run /ctx-drift to catch permission drift:

            • Missing Bash(ctx:*) wildcard;
            • Missing Skill(ctx-*) entries for installed skills;
            • Stale Skill(ctx-*) entries for removed skills;
            • Granular Bash(ctx <subcommand>:*) entries that should be consolidated.

            Run /ctx-permission-sanitize to catch security issues:

            • Hook bypass patterns
            • Destructive commands
            • Overly broad permissions
            • Injection vectors
            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#when-adding-new-skills","level":3,"title":"When Adding New Skills","text":"

            If you create a custom ctx-* skill, add its Skill() entry to the allowlist manually.

            ctx init only populates the default permissions: It won't pick up custom skills.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#golden-image-snapshots","level":3,"title":"Golden Image Snapshots","text":"

            If manual cleanup is too tedious, use a golden image to automate it:

            Snapshot a curated permission set, then restore at session start to automatically drop session-accumulated permissions. See the Permission Snapshots recipe for the full workflow.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#adapting-for-other-languages","level":2,"title":"Adapting for Other Languages","text":"

            The recommended defaults above are Go-specific. For other stacks, swap the build/test tooling:

            Node.js / TypeScript:

            \"Bash(npm run:*)\",\n\"Bash(npm test:*)\",\n\"Bash(npx:*)\",\n\"Bash(node:*)\"\n

            Python:

            \"Bash(pytest:*)\",\n\"Bash(python:*)\",\n\"Bash(pip show:*)\",\n\"Bash(ruff:*)\"\n

            Rust:

            \"Bash(cargo build:*)\",\n\"Bash(cargo test:*)\",\n\"Bash(cargo clippy:*)\",\n\"Bash(cargo fmt:*)\"\n

            The ctx, git, and skill entries remain the same across all stacks.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#next-up","level":2,"title":"Next Up","text":"

            Permission Snapshots →: Save and restore permission baselines for reproducible setups.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#see-also","level":2,"title":"See Also","text":"
            • Setting Up ctx Across AI Tools: full setup recipe including settings.local.json creation
            • Context Health: keeping .context/ files accurate
            • Sanitize Permissions runbook: manual cleanup procedure
            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/configuration-profiles/","level":1,"title":"Configuration Profiles","text":"","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#configuration-profiles","level":1,"title":"Configuration Profiles","text":"

            Switch between dev and base runtime configurations without editing .ctxrc by hand. Useful when you want verbose logging and webhook notifications during development, then clean defaults for normal sessions.

            Uses: ctx config switch, ctx config status, /ctx-config

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#how-it-works","level":2,"title":"How It Works","text":"

            The ctx repo ships two source profiles committed to git:

            File Profile Description .ctxrc.base base All defaults, notifications off .ctxrc.dev dev Verbose logging, webhook notifications on

            The working copy (.ctxrc) is gitignored. Switching profiles copies the source file over .ctxrc, so your runtime configuration is always a clean snapshot of one of the two sources.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#switching-profiles","level":2,"title":"Switching Profiles","text":"
            # Switch to dev (verbose logging, notifications)\nctx config switch dev\n\n# Switch to base (defaults)\nctx config switch base\n\n# Toggle to the opposite profile\nctx config switch\n\n# \"prod\" is an alias for \"base\"\nctx config switch prod\n

            The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#checking-the-active-profile","level":2,"title":"Checking the Active Profile","text":"
            ctx config status\n

            Output examples:

            active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n
            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#typical-workflow","level":2,"title":"Typical Workflow","text":"
            1. Start of a debugging session: switch to dev for verbose logging and webhook notifications so you can trace hook activity and get push alerts.
            ctx config switch dev\n
            1. Work through the issue: hooks log verbosely, webhooks fire on key events (commits, ceremony nudges, drift warnings).

            2. Done debugging: switch back to base to silence the noise.

            ctx config switch base\n
            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#customizing-profiles","level":2,"title":"Customizing Profiles","text":"

            Edit the source files directly:

            • .ctxrc.dev: add any .ctxrc keys you want active during development (e.g., log_level: debug, notify.events, notify.webhook_url).
            • .ctxrc.base: keep this minimal. It represents your \"production\" defaults.

            After editing a source file, re-run ctx config switch <profile> to apply the changes to the working copy.

            Commit Your Profiles

            Both .ctxrc.base and .ctxrc.dev should be committed to git so team members share the same profile definitions. The working copy .ctxrc stays gitignored.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#using-the-skill","level":2,"title":"Using the Skill","text":"

            In a Claude Code session, say any of:

            • \"switch to dev mode\"
            • \"switch to base\"
            • \"what profile am I on?\"
            • \"toggle verbose logging\"

            The /ctx-config skill handles the rest.

            See also: ctx config reference, Configuration

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/context-health/","level":1,"title":"Detecting and Fixing Drift","text":"","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-problem","level":2,"title":"The Problem","text":"

            ctx files drift: you rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist, TASKS.md is 80 percent completed checkboxes, and CONVENTIONS.md describes patterns you stopped using two months ago.

            Stale context is worse than no context:

            An AI tool that trusts outdated references will hallucinate confidently.

            This recipe shows how to detect drift, fix it, and keep your .context/ directory lean and accurate.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tldr","level":2,"title":"TL;DR","text":"
            ctx drift                      # detect problems\nctx drift --fix                # auto-fix the easy ones\nctx sync --dry-run && ctx sync # reconcile after refactors\nctx compact --archive          # archive old completed tasks\nctx fmt                        # normalize line widths\nctx status                     # verify\n

            Or just ask your agent: \"Is our context clean?\"

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx drift Command Detect stale paths, missing files, violations ctx drift --fix Command Auto-fix simple issues ctx sync Command Reconcile context with codebase structure ctx compact Command Archive completed tasks, clean up empty sections ctx fmt Command Normalize context files to 80-char line width ctx status Command Quick health overview /ctx-drift Skill Structural plus semantic drift detection /ctx-architecture Skill Refresh ARCHITECTURE.md from actual codebase /ctx-status Skill In-session context summary /ctx-prompt-audit Skill Audit prompt quality and token efficiency","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-workflow","level":2,"title":"The Workflow","text":"

            The best way to maintain context health is conversational: Ask your agent, guide it, and let it detect problems, explain them, and fix them with your approval. CLI commands exist for CI pipelines, scripting, and fine-grained control.

            For day-to-day maintenance, talk to your agent.

            Your Questions Reinforce the Pattern

            Asking \"is our context clean?\" does two things:

            • It triggers a drift check right now
            • It reinforces the habit

            This is reinforcement, not enforcement.

            Do not wait for the agent to be proactive on its own:

            Guide your agent, especially in early sessions.

            Over time, you will ask less and the agent will start offering more.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-1-ask-your-agent","level":3,"title":"Step 1: Ask Your Agent","text":"

            The simplest way to check context health:

            Is our context clean?\nAnything stale?\nHow healthy are our context files?\n

            Or invoke the skill directly:

            /ctx-drift\n

            The agent performs two layers of analysis:

            Layer 1, structural checks (via ctx drift): Dead paths, missing files, completed task counts, constitution violations. Fast and programmatic.

            Layer 2, semantic analysis (agent-driven): Does CONVENTIONS.md describe patterns the code no longer follows? Does DECISIONS.md contain entries whose rationale no longer applies? Are there learnings about bugs that are now fixed? This is where the agent adds value the CLI cannot: It reads both context files and source code and compares them.

            The agent reports both layers together, explains each finding in plain language, and offers to fix what it can.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-2-maintenance-at-session-start","level":3,"title":"Step 2: Maintenance at Session Start","text":"

            You do not need to ask explicitly.

            Using Claude Code

            ctx ships with Claude Code hooks that remind the agent at the right time to take initiative.

            Checking context health at the session start, offering to persist learnings before you quit, and flagging drift when it matters. The agent stays proactive without you having to prompt it:

            Agent: Good morning. I've loaded the context files. A few things\n       before we start:\n\n       - ARCHITECTURE.md references `pkg/auth/` which is now empty\n       - DECISIONS.md hasn't been updated in 40 days\n       - There are 18 completed tasks ready for archival\n\n       Want me to run a quick maintenance pass, or should we jump\n       straight into today's work?\n

            ☝️️ this is what persistent, initiative-driven sessions feel like when context is treated as a system instead of a prompt.

            If the agent does not offer this on its own, a gentle nudge is enough:

            Anything stale before we start?\nHow's the context looking?\n

            This turns maintenance from a scheduled chore into a conversation that happens when it matters.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-3-real-time-detection-during-work","level":3,"title":"Step 3: Real-Time Detection during Work","text":"

            Agents can notice drift while working: When a mismatch is directly in the path of their current task. If an agent reads ARCHITECTURE.md to find where to add a handler and internal/handlers/ doesn't exist, it will notice because the stale reference blocks its work:

            Agent: ARCHITECTURE.md references `internal/handlers/` but that directory\n       doesn't exist. I'll look at the actual source tree to find where\n       handlers live now.\n

            This happens reliably when the drift intersects the task. What is less reliable is the agent generalizing from one mismatch to \"there might be more stale references; let me run drift detection\" That leap requires the agent to know /ctx-drift exists and to decide the current task should pause for maintenance.

            If you want that behavior, reinforce it:

            Good catch. Yes, run /ctx-drift and clean up any other stale references.\n

            Over time, agents that have seen this pattern will start offering proactively. But do not expect it from a cold start.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-4-archival-and-cleanup","level":3,"title":"Step 4: Archival and Cleanup","text":"

            ctx drift detects when TASKS.md has more than 10 completed items and flags it as a staleness warning. Running ctx drift --fix archives completed tasks automatically.

            You can also run /ctx-archive to compact on demand.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#knowledge-health-flow","level":3,"title":"Knowledge Health Flow","text":"

            Over time, LEARNINGS.md and DECISIONS.md accumulate entries that overlap or partially repeat each other. The check-persistence hook detects when entry counts exceed a configurable threshold and surfaces a nudge:

            \"LEARNINGS.md has 25+ entries. Consider running /ctx-consolidate to merge overlapping items.\"

            The consolidation workflow:

            1. Review: /ctx-consolidate groups entries by keyword similarity and presents candidate merges for your approval.
            2. Merge: Approved groups are combined into single entries that preserve the key information from each original.
            3. Archive: Originals move to .context/archive/, not deleted -- the full history is preserved in git and the archive directory.
            4. Verify: Run ctx drift after consolidation to confirm no cross-references were broken by the merge.

            This replaces ad-hoc cleanup with a repeatable, nudge-driven cycle: detect accumulation, review candidates, merge with approval, archive originals.

            See also: Knowledge Capture for the recording workflow that feeds into this maintenance cycle.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-doctor-the-superset-check","level":2,"title":"ctx doctor: The Superset Check","text":"

            ctx doctor combines drift detection with hook auditing, configuration checks, event logging status, and token size reporting in a single command. If you want one command that covers structural health, hooks, and state:

            ctx doctor          # everything in one pass\nctx doctor --json   # machine-readable for scripting\n

            Use /ctx-doctor Too

            For agent-driven diagnosis that adds semantic analysis on top of the structural checks, use /ctx-doctor.

            See the Troubleshooting recipe for the full workflow.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#cli-reference","level":2,"title":"CLI Reference","text":"

            The conversational approach above uses CLI commands under the hood. When you need direct control, use the commands directly.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift","level":3,"title":"ctx drift","text":"

            Scan context files for structural problems:

            ctx drift\n

            Sample output:

            Drift Report\n============\n\nWarnings (3):\n  ARCHITECTURE.md:14  path \"internal/api/router.go\" does not exist\n  ARCHITECTURE.md:28  path \"pkg/auth/\" directory is empty\n  CONVENTIONS.md:9    path \"internal/handlers/\" not found\n\nViolations (1):\n  TASKS.md            31 completed tasks (recommend archival)\n\nStaleness:\n  DECISIONS.md        last modified 45 days ago\n  LEARNINGS.md        last modified 32 days ago\n\nExit code: 1 (warnings found)\n
            Level Meaning Action Warning Stale path references, missing files Fix or remove Violation Constitution rule heuristic failures, heavy clutter Fix soon Staleness Files not updated recently Review content

            Exit codes: 0 equals clean, 1 equals warnings, 3 equals violations.

            For CI integration:

            ctx drift --json | jq '.warnings | length'\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift-fix","level":3,"title":"ctx drift --fix","text":"

            Auto-fix mechanical issues:

            ctx drift --fix\n

            This handles removing dead path references, updating unambiguous renames, clearing empty sections. Issues requiring judgment are flagged but left for you.

            Run ctx drift again afterward to confirm what remains.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-sync","level":3,"title":"ctx sync","text":"

            After a refactor, reconcile context with the actual codebase structure:

            ctx sync --dry-run   # preview first\nctx sync             # apply\n

            ctx sync scans for structural changes, compares with ARCHITECTURE.md, checks for new dependencies worth documenting, and identifies context referring to code that no longer exists.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-compact","level":3,"title":"ctx compact","text":"

            Consolidate completed tasks and clean up empty sections:

            ctx compact            # move completed tasks to Completed section,\n                       # remove empty sections\nctx compact --archive  # also archive old tasks to .context/archive/\n
            • Tasks: moves completed items (with all subtasks done) into the Completed section of TASKS.md
            • All files: removes empty sections left behind
            • With --archive: writes tasks older than 7 days to .context/archive/tasks-YYYY-MM-DD.md

            Without --archive, nothing is deleted: Tasks are reorganized in place.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-fmt","level":3,"title":"ctx fmt","text":"

            Normalize context file line widths:

            ctx fmt              # wrap long lines to 80 chars\nctx fmt --check      # CI: exit 1 if files need formatting\n

            Long task descriptions, decision rationale, and learning entries accumulate as single-line entries. ctx fmt wraps them at word boundaries with 2-space continuation indent for list items. Headings, tables, and comments are preserved.

            Idempotent: safe to run repeatedly.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-status","level":3,"title":"ctx status","text":"

            Quick health overview:

            ctx status --verbose\n

            Shows file counts, token estimates, modification times, and drift warnings in a single glance.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

            Checks whether your context files are readable, compact, and token-efficient for the model.

            /ctx-prompt-audit\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Conversational approach (recommended):

            Is our context clean?  -> agent runs structural plus semantic checks\nFix what you can       -> agent auto-fixes and proposes edits\nArchive the done tasks -> agent runs ctx compact --archive\nHow's token usage?     -> agent checks ctx status\n

            CLI approach (for CI, scripts, or direct control):

            ctx drift                      # 1. Detect problems\nctx drift --fix                # 2. Auto-fix the easy ones\nctx sync --dry-run && ctx sync # 3. Reconcile after refactors\nctx compact --archive          # 4. Archive old completed tasks\nctx fmt                        # 5. Normalize line widths\nctx status                     # 6. Verify\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tips","level":2,"title":"Tips","text":"

            Agents cross-reference context files with source code during normal work. When drift intersects their current task, they will notice: a renamed package, a deleted directory, a path that doesn't resolve. But they rarely generalize from one mismatch to a full audit on their own. Reinforce the pattern: when an agent mentions a stale reference, ask it to run /ctx-drift. Over time, it starts offering.

            When an agent says \"this reference looks stale,\" it is usually right.

            Semantic drift is more damaging than structural drift: ctx drift catches dead paths. But CONVENTIONS.md describing a pattern your code stopped following three weeks ago is worse. When you ask \"is our context clean?\", the agent can do both checks.

            Use ctx status as a quick check: It shows file counts, token estimates, and drift warnings in a single glance. Good for a fast \"is everything ok?\" before diving into work.

            Drift detection in CI: add ctx drift --json to your CI pipeline and fail on exit code 3 (violations). This catches constitution-level problems before they reach upstream.

            Do not over-compact: Completed tasks have historical value. The --archive flag preserves them in .context/archive/ so you can search past work without cluttering active context.

            Sync is cautious by default: Use --dry-run after large refactors, then apply.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#next-up","level":2,"title":"Next Up","text":"

            Claude Code Permission Hygiene →: Recommended permission defaults and maintenance workflow for Claude Code.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#see-also","level":2,"title":"See Also","text":"
            • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
            • Tracking Work Across Sessions: task lifecycle and archival
            • Persisting Decisions, Learnings, and Conventions: keeping knowledge files current
            • The Complete Session: where maintenance fits in the daily workflow
            • CLI Reference: full flag documentation for all commands
            • Context Files: structure and purpose of each .context/ file
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/customizing-hook-messages/","level":1,"title":"Customizing Hook Messages","text":"","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-problem","level":2,"title":"The Problem","text":"

            ctx hooks speak ctx's language, not your project's. The QA gate says \"lint the ENTIRE project\" and \"make build,\" but your Python project uses pytest and ruff. The post-commit nudge suggests running lints, but your project uses npm test. You could remove the hook entirely, but then you lose the logic (counting, state tracking, adaptive frequency) just to change the words.

            How do you customize what hooks say without removing what they do?

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tldr","level":2,"title":"TL;DR","text":"
            ctx hook message list                     # see all hooks and their messages\nctx hook message show qa-reminder gate    # view the current template\nctx hook message edit qa-reminder gate    # copy default to .context/ for editing\nctx hook message reset qa-reminder gate   # revert to embedded default\n
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx hook message list CLI command Show all hook messages with category and override status ctx hook message show CLI command Print the effective message template ctx hook message edit CLI command Copy embedded default to .context/ for editing ctx hook message reset CLI command Delete user override, revert to default","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#how-it-works","level":2,"title":"How It Works","text":"

            Hook messages use a 3-tier fallback:

            1. User override: .context/hooks/messages/{hook}/{variant}.txt
            2. Embedded default: compiled into the ctx binary
            3. Hardcoded fallback: belt-and-suspenders safety net

            The hook logic (when to fire, counting, state tracking, cooldowns) is unchanged. Only the content (what text gets emitted) comes from the template. You customize what the hook says without touching how it decides to speak.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#finding-the-original-templates","level":3,"title":"Finding the Original Templates","text":"

            The default templates live in the ctx source tree at:

            internal/assets/hooks/messages/{hook}/{variant}.txt\n

            You can also browse them on GitHub: internal/assets/hooks/messages/

            Or use ctx hook message show to print any template without digging through source code:

            ctx hook message show qa-reminder gate        # QA gate instructions\nctx hook message show check-persistence nudge  # persistence nudge\nctx hook message show post-commit nudge        # post-commit reminder\n

            The show output includes the template source and available variables -- everything you need to write a replacement.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables","level":3,"title":"Template Variables","text":"

            Some messages use Go text/template variables for dynamic content:

            No context files updated in {{.PromptsSinceNudge}}+ prompts.\nHave you discovered learnings, made decisions,\nestablished conventions, or completed tasks\nworth persisting?\n

            The show and edit commands list available variables for each message. When writing a replacement, keep the same {{.VariableName}} placeholders to preserve dynamic content. Variables that you omit render as <no value>: no error, but the output may look odd.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#intentional-silence","level":3,"title":"Intentional Silence","text":"

            An empty template file (0 bytes or whitespace-only) means \"don't emit a message\". The hook still runs its logic but produces no output. This lets you silence specific messages without removing the hook from hooks.json.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-python-project-qa-gate","level":2,"title":"Example: Python Project QA Gate","text":"

            The default QA gate says \"lint the ENTIRE project\" and references make lint. For a Python project, you want pytest and ruff:

            # See the current default\nctx hook message show qa-reminder gate\n\n# Copy it to .context/ for editing\nctx hook message edit qa-reminder gate\n\n# Edit the override\n

            Replace the content in .context/hooks/messages/qa-reminder/gate.txt:

            HARD GATE! DO NOT COMMIT without completing ALL of these steps first:\n(1) Run the full test suite: pytest -x\n(2) Run the linter: ruff check .\n(3) Verify a clean working tree\nRun tests and linter BEFORE every git commit, no exceptions.\n

            The hook still fires on every Edit call. The logic is identical. Only the instructions changed.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-silencing-ceremony-nudges","level":2,"title":"Example: Silencing Ceremony Nudges","text":"

            The ceremony check nudges you to use /ctx-remember and /ctx-wrap-up. If your team has a different workflow and finds these noisy:

            ctx hook message edit check-ceremonies both\nctx hook message edit check-ceremonies remember\nctx hook message edit check-ceremonies wrapup\n

            Then empty each file:

            echo -n \"\" > .context/hooks/messages/check-ceremonies/both.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/remember.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/wrapup.txt\n

            The hooks still track ceremony usage internally, but they no longer emit any visible output.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-javascript-project-post-commit","level":2,"title":"Example: JavaScript Project Post-Commit","text":"

            The default post-commit nudge mentions generic \"lints and tests.\" For a JavaScript project:

            ctx hook message edit post-commit nudge\n

            Replace with:

            Commit succeeded. 1. Offer context capture to the user: Decision (design\nchoice?), Learning (gotcha?), or Neither. 2. Ask the user: \"Want me to\nrun npm test and eslint before you push?\" Do NOT push. The user pushes\nmanually.\n
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-two-categories","level":2,"title":"The Two Categories","text":"

            Not all messages are equal. The list command shows each message's category:

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#customizable-17-messages","level":3,"title":"Customizable (17 Messages)","text":"

            Messages that are opinions: project-specific wording that benefits from customization. These are the primary targets for override.

            Hook Variant Description check-backup-age warning Backup staleness warning check-freshness stale Technology constant freshness warning check-ceremonies both Both ceremonies missing check-ceremonies remember Start-of-session ceremony check-ceremonies wrapup End-of-session ceremony check-context-size checkpoint Context capacity warning check-context-size oversize Injection oversize nudge check-context-size window Context window usage warning (>80%) check-journal both Unimported sessions + unenriched entries check-journal unenriched Unenriched journal entries check-journal unimported Unimported sessions check-knowledge warning Knowledge file growth check-map-staleness stale Architecture map staleness check-persistence nudge Context persistence nudge post-commit nudge Post-commit context capture qa-reminder gate Pre-commit QA gate","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#ctx-specific-10-messages","level":3,"title":"ctx-Specific (10 Messages)","text":"

            Messages specific to ctx's own development workflow. You can customize them, but edit will warn you first.

            Hook Variant Description block-dangerous-commands cp-to-bin Block copy to bin dirs block-dangerous-commands install-to-local-bin Block copy to ~/.local/bin block-dangerous-commands mid-git-push Block git push block-dangerous-commands mid-sudo Block sudo block-non-path-ctx absolute-path Block absolute path invocation block-non-path-ctx dot-slash Block ./ctx invocation block-non-path-ctx go-run Block go run invocation check-reminders reminders Pending reminders relay check-resources alert Resource pressure alert check-version key-rotation Key rotation nudge check-version mismatch Version mismatch","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables-reference","level":2,"title":"Template Variables Reference","text":"Hook Variant Variables check-backup-age warning {{.Warnings}} check-freshness stale {{.StaleFiles}} check-context-size checkpoint (none) check-context-size oversize {{.TokenCount}} check-context-size window {{.TokenCount}}, {{.Percentage}} check-ceremonies both, remember, wrapup (none) check-journal both {{.UnimportedCount}}, {{.UnenrichedCount}} check-journal unenriched {{.UnenrichedCount}} check-journal unimported {{.UnimportedCount}} check-knowledge warning {{.FileWarnings}} check-map-staleness stale {{.LastRefreshDate}}, {{.ModuleCount}} check-persistence nudge {{.PromptsSinceNudge}} check-reminders reminders {{.ReminderList}} check-resources alert {{.AlertMessages}} check-version key-rotation {{.KeyAgeDays}} check-version mismatch {{.BinaryVersion}}, {{.PluginVersion}} post-commit nudge (none) qa-reminder gate (none) block-dangerous-commands all variants (none) block-non-path-ctx all variants (none)

            Templates that reference undefined variables render <no value>: no error, graceful degradation.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tips","level":2,"title":"Tips","text":"
            • Override files are version-controlled: they live in .context/ alongside your other context files. Team members get the same customized messages.
            • Start with show: always check the current default before editing. The embedded template is the baseline your override replaces.
            • Use reset to undo: if a customization causes confusion, reset reverts to the embedded default instantly.
            • Empty file = silence: you don't need to delete the hook. An empty override file silences the message while preserving the hook's logic.
            • JSON output for scripting: ctx hook message list --json returns structured data for automation.
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#see-also","level":2,"title":"See Also","text":"
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Auditing System Hooks: verifying hooks are running and auditing their output
            • Configuration: project-level settings via .ctxrc
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/design-before-coding/","level":1,"title":"Design Before Coding","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-problem","level":2,"title":"The Problem","text":"

            You start coding a feature. Halfway through, you realize the approach doesn't handle a key edge case. You refactor. Then you discover the CLI interface doesn't fit the existing patterns. More refactoring.

            The design work happened during implementation, mixed in with debugging and trial-and-error. The result works, but the spec was never written down, the trade-offs were never recorded, and the next session has no idea why things are shaped this way.

            How do you front-load design so the implementation is straightforward?

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-brainstorm          # explore the design space\n/ctx-spec                # write the spec document\n/ctx-task-add            # break it into tasks\n/ctx-implement           # execute step-by-step\n

            Four skills, used in sequence. Each produces an artifact that feeds the next.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-brainstorm Skill Structured design dialogue: explore approaches /ctx-spec Skill Scaffold and fill out a spec from the template /ctx-task-add Skill Add implementation tasks to TASKS.md /ctx-implement Skill Execute a plan step-by-step with verification /ctx-decision-add Skill Record design choices made during brainstorming","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-1-brainstorm-the-design","level":3,"title":"Step 1: Brainstorm the Design","text":"

            Start with /ctx-brainstorm when you have a rough idea but haven't committed to an approach. The skill follows a gated process:

            1. Understand context: what exists today, what's the gap
            2. Clarify the idea: one question at a time, not a wall of questions
            3. Surface non-functional requirements: performance, security, compatibility
            4. Lock understanding: confirm before exploring solutions
            5. Explore 2-3 approaches: with trade-offs for each
            6. Stress-test: poke holes in the chosen approach
            7. Present the design: detailed enough to implement from
            You: \"I want to add RSS feed generation to the blog.\"\n\nAgent: \"Before we design, let me understand: is this for the journal\n  site, the docs site, or both?\"\n\nYou: \"Journal site only.\"\n\nAgent: [asks about update frequency, content scope, existing infra...]\n       [presents 3 approaches with trade-offs]\n\nYou: \"Option 2 - generate at build time.\"\n\nAgent: \"Let me stress-test that. What happens when...\"\n       [probes edge cases]\n       \"Design looks solid. Want me to record the approach as a\n       decision and write a spec?\"\n

            The brainstorm produces a validated design in conversation. It doesn't produce a file - that's what the spec is for.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-2-write-the-spec","level":3,"title":"Step 2: Write the Spec","text":"

            Once the design is clear, use /ctx-spec to produce the written artifact. The skill reads the project's spec template (specs/tpl/spec-template.md) and walks through each section:

            You: /ctx-spec (rss feed generation)\n\nAgent: \"Creating specs/rss-feed.md from template.\"\n\n       \"Problem: What user-visible problem does this solve? Why now?\"\n\nYou: \"Users can't subscribe to blog updates. They have to check\n      the site manually.\"\n\nAgent: [writes the Problem section, moves to Approach]\n\n       \"Approach: High-level, how does this work?\"\n\nYou: \"Generate feed.xml during ctx journal site. Atom format.\"\n\nAgent: [writes Approach, moves to Happy Path, then Edge Cases...]\n

            The skill spends extra time on Edge Cases and Error Handling: These are where specs earn their value. The skill pushes for at least three edge cases and won't accept \"none\" without a challenge.

            Sections that don't apply can be skipped. The result is a complete spec at specs/{feature-name}.md.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-3-break-into-tasks","level":3,"title":"Step 3: Break into Tasks","text":"

            After the spec is written, the skill offers to create tasks:

            Agent: \"Spec written to specs/rss-feed.md. Want me to break this\n  into tasks in TASKS.md?\"\n\nYou: \"Yes.\"\n\nAgent: [creates a Phase section referencing the spec, adds tasks]\n       \"Added Phase 0.8: RSS/Atom Feed Generation with 4 tasks:\n         - [ ] Implement feed.xml generation in ctx journal site\n         - [ ] Add --feed flag and .ctxrc feed config\n         - [ ] Write tests for feed generation\n         - [ ] Add recipe and update docs\"\n

            Each task is independently completable and references the spec via Spec: specs/rss-feed.md in the Phase header.

            You can also invoke /ctx-task-add directly at any point to add individual tasks.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-4-implement-step-by-step","level":3,"title":"Step 4: Implement Step-by-Step","text":"

            Pick up tasks with /ctx-implement. The skill loads the spec, breaks work into atomic steps, and checkpoints after every 3-5 steps with a build and test verification:

            You: /ctx-implement (specs/rss-feed.md)\n\nAgent: \"Loaded spec. Breaking into steps:\n         1. Add FeedEntry type to journal package\n         2. Implement feed.xml template\n         3. Wire into site generation pipeline\n         [builds and tests after step 3]\n         4. Add --feed flag to ctx journal site\n         5. Add .ctxrc feed configuration\n         [builds and tests after step 5]\n         ...\"\n

            If a build or test fails, the agent stops, diagnoses, and fixes before continuing.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#when-to-skip-steps","level":2,"title":"When to Skip Steps","text":"

            Not every feature needs all four steps. Use your judgment:

            Situation Start at Vague idea, multiple valid approaches Step 1: Brainstorm Clear approach, need to document it Step 2: Spec Spec already exists, need to plan work Step 3: Tasks Tasks exist, ready to code Step 4: Implement

            A brainstorm without a spec is fine for small decisions. A spec without a brainstorm is fine when the design is obvious. The full chain is for features complex enough to warrant front-loaded design.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need skill names. Natural language works:

            You say What happens \"Let's think through this feature\" /ctx-brainstorm \"Spec this out\" /ctx-spec \"Write a design doc for...\" /ctx-spec \"Break this into tasks\" /ctx-task-add \"Implement the spec\" /ctx-implement \"Let's design before we build\" Starts at brainstorm","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tips","level":2,"title":"Tips","text":"
            • Brainstorm first when uncertain. If you can articulate the approach in two sentences, skip to spec. If you can't, brainstorm.
            • Specs prevent scope creep. The Non-Goals section is as important as the approach. Writing down what you won't do keeps implementation focused.
            • Edge cases are the point. A spec that only describes the happy path isn't a spec - it's a wish. The /ctx-spec skill pushes for at least 3 edge cases because that's where designs break.
            • Record decisions during brainstorming. When you choose between approaches, the agent offers to persist the trade-off via /ctx-decision-add. Accept - future sessions need to know why, not just what.
            • Specs are living documents. Update them when implementation reveals new constraints. A spec that diverges from reality is worse than no spec.
            • The spec template is customizable. Edit specs/tpl/spec-template.md to match your project's needs. The /ctx-spec skill reads whatever template it finds there.
            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: /ctx-brainstorm: structured design dialogue
            • Skills Reference: /ctx-spec: spec scaffolding from template
            • Skills Reference: /ctx-implement: step-by-step execution with verification
            • Tracking Work Across Sessions: task lifecycle and archival
            • Importing Claude Code Plans: turning ephemeral plans into permanent specs
            • Persisting Decisions, Learnings, and Conventions: capturing design trade-offs
            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/external-context/","level":1,"title":"Keeping Context in a Separate Repo","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-problem","level":2,"title":"The Problem","text":"

            ctx files contain project-specific decisions, learnings, conventions, and tasks. By default, they live in .context/ inside the project tree, and that works well when the context can be public.

            But sometimes you need the context outside the project:

            • Open-source projects with private context: Your architectural notes, internal task lists, and scratchpad entries shouldn't ship with the public repo.
            • Compliance or IP concerns: Context files reference sensitive design rationale that belongs in a separate access-controlled repository.
            • Personal preference: You want a single context repo that covers multiple projects, or you just prefer keeping notes separate from code.

            ctx supports this through three configuration methods. This recipe shows how to set them up and how to tell your AI assistant where to find the context.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tldr","level":2,"title":"TL;DR","text":"

            First --allow-outside-cwd in your project:

            mkdir ~/repos/myproject-context && cd ~/repos/myproject-context && git init\ncd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context --allow-outside-cwd init\n

            Then, create a .ctxrc in your project root to specify the new .context folder location:

            context_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

            All ctx commands now use the external directory automatically.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context directory --context-dir Global flag Point ctx at a non-default directory --allow-outside-cwd Global flag Permit context outside the project root .ctxrc Config file Persist the context directory setting CTX_DIR Env variable Override context directory per-session /ctx-status Skill Verify context is loading correctly","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-1-create-the-private-context-repo","level":3,"title":"Step 1: Create the Private Context Repo","text":"

            Create a separate repository for your context files. This can live anywhere: a private GitHub repo, a shared drive, a sibling directory:

            # Create the context repo\nmkdir ~/repos/myproject-context\ncd ~/repos/myproject-context\ngit init\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-2-initialize-ctx-pointing-at-it","level":3,"title":"Step 2: Initialize ctx Pointing at It","text":"

            From your project root, initialize ctx with --context-dir pointing to the external location. Because the directory is outside your project tree, you also need --allow-outside-cwd:

            cd ~/repos/myproject\nctx --context-dir ~/repos/myproject-context \\\n    --allow-outside-cwd \\\n    init\n

            This creates the full .context/-style file set inside ~/repos/myproject-context/ instead of ~/repos/myproject/.context/.

            Boundary Validation

            ctx validates that the .context directory is within the current working directory.

            If your external directory is truly outside the project root:

            • Either every ctx command needs --allow-outside-cwd,
            • or you can persist the setting in .ctxrc (next step).
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-3-make-it-stick","level":3,"title":"Step 3: Make It Stick","text":"

            Typing --context-dir and --allow-outside-cwd on every command is tedious. Pick one of these methods to make the configuration permanent.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-a-ctxrc-recommended","level":4,"title":"Option A: .ctxrc (Recommended)","text":"

            Create a .ctxrc file in your project root:

            # .ctxrc: committed to the project repo\ncontext_dir: ~/repos/myproject-context\nallow_outside_cwd: true\n

            ctx reads .ctxrc automatically. Every command now uses the external directory without extra flags:

            ctx status          # reads from ~/repos/myproject-context\nctx add learning \"Redis MULTI doesn't roll back on error\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            Commit .ctxrc

            .ctxrc belongs in the project repo. It contains no secrets: It's just a path and a boundary override.

            .ctxrc lets teammates share the same configuration.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-b-ctx_dir-environment-variable","level":4,"title":"Option B: CTX_DIR Environment Variable","text":"

            Good for CI pipelines, temporary overrides, or when you don't want to commit a .ctxrc:

            # In your shell profile (~/.bashrc, ~/.zshrc)\nexport CTX_DIR=~/repos/myproject-context\n

            Or for a single session:

            CTX_DIR=~/repos/myproject-context ctx status\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-c-shell-alias","level":4,"title":"Option C: Shell Alias","text":"

            If you prefer a shell alias over .ctxrc:

            # ~/.bashrc or ~/.zshrc\nalias ctx='ctx --context-dir ~/repos/myproject-context --allow-outside-cwd'\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#priority-order","level":4,"title":"Priority Order","text":"

            When multiple methods are set, ctx resolves the context directory in this order (highest priority first):

            1. --context-dir flag
            2. CTX_DIR environment variable
            3. context_dir in .ctxrc
            4. Default: .context/
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-4-agent-auto-discovery-via-bootstrap","level":3,"title":"Step 4: Agent Auto-Discovery via Bootstrap","text":"

            When context lives outside the project tree, your AI assistant needs to know where to find it. The ctx system bootstrap command resolves the configured context directory and communicates it to the agent automatically:

            $ ctx system bootstrap\nctx system bootstrap\n====================\n\ncontext_dir: /home/user/repos/myproject-context\n\nFiles:\n  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...\n

            The CLAUDE.md template generated by ctx init already instructs the agent to run ctx system bootstrap at session start. Because .ctxrc is in the project root, your agent inherits the external path automatically via the ctx system bootstrap call instruction.

            Here is the relevant section from CLAUDE.md for reference:

            <!-- CLAUDE.md -->\n1. **Run `ctx system bootstrap`**: CRITICAL, not optional.\n   This tells you where the context directory is. If it fails or returns\n   no context_dir, STOP and warn the user.\n

            Moreover, every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: /home/user/repos/myproject-context footer, so the agent remains anchored to the correct directory even in long sessions.

            If you use CTX_DIR instead of .ctxrc, export it in your shell profile so the hook process inherits it:

            export CTX_DIR=~/repos/myproject-context\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-5-share-with-teammates","level":3,"title":"Step 5: Share with Teammates","text":"

            Teammates clone both repos and set up .ctxrc:

            # Clone the project\ngit clone git@github.com:org/myproject.git\ncd myproject\n\n# Clone the private context repo\ngit clone git@github.com:org/myproject-context.git ~/repos/myproject-context\n

            If .ctxrc is already committed to the project, they're done: ctx commands will find the external context automatically.

            If teammates use different paths, each developer sets their own CTX_DIR:

            export CTX_DIR=~/my-own-path/myproject-context\n

            For encryption key distribution across the team, see the Syncing Scratchpad Notes recipe.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-6-day-to-day-sync","level":3,"title":"Step 6: Day-to-Day Sync","text":"

            The external context repo has its own git history. Treat it like any other repo: Commit and push after sessions:

            cd ~/repos/myproject-context\n\n# After a session\ngit add -A\ngit commit -m \"Session: refactored auth module, added rate-limit learning\"\ngit push\n

            Your AI assistant can do this too. When ending a session:

            You: \"Save what we learned and push the context repo.\"\n\nAgent: [runs ctx add learning, then commits and pushes the context repo]\n

            You can also set up a post-session habit: project code gets committed to the project repo, context gets committed to the context repo.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember the flags; simply ask your assistant:

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#set-up-your-system-using-natural-language","level":3,"title":"Set Up Your System Using Natural Language","text":"
            You: \"Set up ctx to use ~/repos/myproject-context as the context directory.\"\n\nAgent: \"I'll create a .ctxrc in the project root pointing to that path.\n       I'll also update CLAUDE.md so future sessions know where to find\n       context. Want me to initialize the context files there too?\"\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#configure-separate-repo-for-context-folder-using-natural-language","level":3,"title":"Configure Separate Repo for .context Folder Using Natural Language","text":"
            You: \"My context is in a separate repo. Can you load it?\"\n\nAgent: [reads .ctxrc, finds the path, loads context from the external dir]\n       \"Loaded. You have 3 pending tasks, last session was about the auth\n       refactor.\"\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tips","level":2,"title":"Tips","text":"
            • Start simple. If you don't need external context yet, don't set it up. The default .context/ in-tree is the easiest path. Move to an external repo when you have a concrete reason.
            • One context repo per project. Sharing a single context directory across multiple projects creates confusion. Keep the mapping 1:1.
            • Use .ctxrc over env vars when the path is stable. It's committed, documented, and works for the whole team without per-developer shell setup.
            • Don't forget the boundary flag. The most common error is Error: context directory is outside the project root. Set allow_outside_cwd: true in .ctxrc or pass --allow-outside-cwd.
            • Commit both repos at session boundaries. Context without code history (or code without context history) loses half the value.
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#next-up","level":2,"title":"Next Up","text":"

            The Complete Session →: Walk through a full ctx session from start to finish.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#see-also","level":2,"title":"See Also","text":"
            • Setting Up ctx Across AI Tools: initial setup recipe
            • Syncing Scratchpad Notes Across Machines: distribute encryption keys when context is shared
            • CLI Reference: all global flags including --context-dir and --allow-outside-cwd
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/guide-your-agent/","level":1,"title":"Guide Your Agent","text":"

            Commands vs. Skills

            Commands (ctx status, ctx add task) run in your terminal.

            Skills (/ctx-reflect, /ctx-next) run inside your AI coding assistant.

            Recipes combine both.

            Think of commands as structure and skills as behavior.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#proactive-behavior","level":2,"title":"Proactive Behavior","text":"

            These recipes show explicit commands and skills, but agents trained on the ctx playbook are proactive: They offer to save learnings after debugging, record decisions after trade-offs, create follow-up tasks after completing work, and suggest what to work on next.

            Your questions train the agent. Asking \"what have we learned?\" or \"is our context clean?\" does two things:

            • It triggers the workflow right now,
            • and it reinforces the pattern.

            The more you guide, the more the agent habituates the behavior and begins offering on its own.

            Each recipe includes a Conversational Approach section showing these natural-language patterns.

            Tip

            Don't wait passively for proactive behavior: especially in early sessions.

            Ask, guide, reinforce. Over time, you ask less and the agent offers more.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#next-up","level":2,"title":"Next Up","text":"

            Setup Across AI Tools →: Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle from start to finish
            • Prompting Guide: general tips for working effectively with AI coding assistants
            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/hook-output-patterns/","level":1,"title":"Hook Output Patterns","text":"","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code hooks can output text, JSON, or nothing at all. But the format of that output determines who sees it and who acts on it.

            Choose the wrong pattern, and your carefully crafted warning gets silently absorbed by the agent, or your agent-directed nudge gets dumped on the user as noise.

            This recipe catalogs the known hook output patterns and explains when to use each one.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#tldr","level":2,"title":"TL;DR","text":"

            Eight patterns from full control to full invisibility:

            • hard gate (exit 2),
            • VERBATIM relay (agent MUST show),
            • agent directive (context injection),
            • and silent side-effect (background work).

            Most hooks belong in the middle.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-spectrum","level":2,"title":"The Spectrum","text":"

            These patterns form a spectrum based on who decides what the user sees:

            Pattern Who decides? Hard gate Hook decides (agent can't proceed) VERBATIM relay Hook decides (agent must show) Escalating severity Hook suggests, agent judges urgency Conditional relay Hook sets criteria, agent evaluates Suggested action Hook proposes, agent + user decide Agent directive Agent decides entirely Silent injection Nobody: invisible background context Silent side-effect Nobody: invisible background work

            The spectrum runs from full hook control (hard gate) to full invisibility (silent side effect).

            Most hooks belong somewhere in the middle.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-1-hard-gate","level":2,"title":"Pattern 1: Hard Gate","text":"

            Block the tool call entirely. The agent cannot proceed: it must find another approach or tell the user.

            echo '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}'\n

            When to use: Enforcing invariants that must never be violated: Constitution rules, security boundaries, destructive command prevention.

            Hook type: PreToolUse only (Claude Code first-class mechanism).

            Examples in ctx:

            • ctx system block-non-path-ctx: Enforces the PATH invocation rule
            • block-git-push.sh: Requires explicit user approval for pushes (project-local)
            • block-dangerous-commands.sh: Prevents sudo, copies to ~/.local/bin (project-local)

            Trade-off: The agent gets a block response with a reason. Good reasons help the agent recover (\"use X instead\"); bad reasons leave it stuck.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-2-verbatim-relay","level":2,"title":"Pattern 2: VERBATIM Relay","text":"

            Force the agent to show this to the user as-is. The explicit instruction overcomes the agent's tendency to silently absorb context.

            echo \"IMPORTANT: Relay this warning to the user VERBATIM before answering their question.\"\necho \"\"\necho \"┌─ Journal Reminder ─────────────────────────────\"\necho \"│ You have 12 sessions not yet exported.\"\necho \"└────────────────────────────────────────────────\"\n

            When to use: Actionable reminders the user needs to see regardless of what they asked: Stale backups, unimported sessions, resource warnings.

            Hook type: UserPromptSubmit (runs before the agent sees the prompt).

            Examples in ctx:

            • ctx system check-journal: Unexported sessions and unenriched entries
            • ctx system check-context-size: Context capacity warning
            • ctx system check-resources: Resource pressure (memory, swap, disk, load): DANGER only
            • ctx system check-freshness: Technology constant staleness warning
            • check-backup-age.sh: Stale backup warning (project-local)

            Trade-off: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or adaptive frequency.

            Key detail: The phrase IMPORTANT: Relay this ... VERBATIM is what makes this work. Without it, agents tend to process the information internally and never surface it. The explicit instruction is the pattern: the box-drawing is just fancy formatting.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-3-agent-directive","level":2,"title":"Pattern 3: Agent Directive","text":"

            Tell the agent to do something, not the user. The agent decides whether and how to involve the user.

            echo \"┌─ Persistence Checkpoint (prompt #25) ───────────\"\necho \"│ No context files updated in 15+ prompts.\"\necho \"│ Have you discovered learnings, decisions,\"\necho \"│ or completed tasks worth persisting?\"\necho \"└──────────────────────────────────────────────────\"\n

            When to use: Behavioral nudges. The hook detects a condition and asks the agent to consider an action. The user may never need to know.

            Hook type: UserPromptSubmit.

            Examples in ctx:

            • ctx system check-persistence: Nudges the agent to persist context

            Trade-off: No guarantee the agent acts. The nudge is one signal among many in the context window. Strong phrasing helps (\"Have you...?\" is better than \"Consider...\"), but ultimately the agent decides.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-4-silent-context-injection","level":2,"title":"Pattern 4: Silent Context Injection","text":"

            Load context with no visible output. The agent gets enriched without either party noticing.

            ctx agent --budget 4000 >/dev/null || true\n

            When to use: Background context loading that should be invisible. The agent benefits from the information, but neither it, nor the user needs to know it happened.

            Hook type: PreToolUse with .* matcher (runs on every tool call).

            Examples in ctx:

            • The ctx agent PreToolUse hook: injects project context silently

            Trade-off: Adds latency to every tool call. Keep the injected content small and fast to generate.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-5-silent-side-effect","level":2,"title":"Pattern 5: Silent Side-Effect","text":"

            Do work, produce no output: Housekeeping that needs no acknowledgment.

            find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

            When to use: Cleanup, log rotation, temp file management. Anything where the action is the point and nobody needs to know it happened.

            Hook type: Any hook where output is irrelevant.

            Examples in ctx:

            • Log rotation, marker file cleanup, state directory maintenance

            Trade-off: None, if the action is truly invisible. If it can fail in a way that matters, consider logging.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-6-conditional-relay","level":3,"title":"Pattern 6: Conditional Relay","text":"

            Tell the agent to relay only if a condition holds in context.

            echo \"If the user's question involves modifying .context/ files,\"\necho \"relay this warning VERBATIM:\"\necho \"\"\necho \"┌─ Context Integrity ─────────────────────────────\"\necho \"│ CONSTITUTION.md has not been verified in 7 days.\"\necho \"└────────────────────────────────────────────────\"\necho \"\"\necho \"Otherwise, proceed normally.\"\n

            When to use: Warnings that only matter in certain contexts. Avoids noise when the user is doing unrelated work.

            Trade-off: Depends on the agent's judgment about when the condition holds. More fragile than VERBATIM relay, but less noisy.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-7-suggested-action","level":3,"title":"Pattern 7: Suggested Action","text":"

            Give the agent a specific command to propose to the user.

            echo \"┌─ Stale Dependencies ──────────────────────────\"\necho \"│ go.sum is 30+ days newer than go.mod.\"\necho \"│ Suggested: run \\`go mod tidy\\`\"\necho \"│ Ask the user before proceeding.\"\necho \"└───────────────────────────────────────────────\"\n

            When to use: The hook detects a fixable condition and knows the fix. Goes beyond a nudge: Gives the agent a concrete next step. The agent still asks for permission but knows exactly what to propose.

            Trade-off: The suggestion might be wrong or outdated. The \"ask the user before proceeding\" part is critical.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-8-escalating-severity","level":3,"title":"Pattern 8: Escalating Severity","text":"

            Different urgency tiers with different relay expectations.

            # INFO: agent processes silently, mentions if relevant\necho \"INFO: Last test run was 3 days ago.\"\n\n# WARN: agent should mention to user at next natural pause\necho \"WARN: 12 uncommitted changes across 3 branches.\"\n\n# CRITICAL: agent must relay immediately, before any other work\necho \"CRITICAL: Relay VERBATIM before answering. Disk usage at 95%.\"\n

            When to use: When you have multiple hooks producing output and need to avoid overwhelming the user. INFO gets absorbed, WARN gets mentioned, CRITICAL interrupts.

            Examples in ctx:

            • ctx system check-resources: Uses two tiers (WARNING/DANGER) internally but only fires the VERBATIM relay at DANGER level: WARNING is silent. See ctx system for the user-facing command that shows both tiers.

            Trade-off: Requires agent training or convention to recognize the tiers. Without a shared protocol, the prefixes are just text.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#choosing-a-pattern","level":2,"title":"Choosing a Pattern","text":"
            Is the agent about to do something forbidden?\n  └─ Yes → Hard gate\n\nDoes the user need to see this regardless of what they asked?\n  └─ Yes → VERBATIM relay\n  └─ Sometimes → Conditional relay\n\nShould the agent consider an action?\n  └─ Yes, with a specific fix → Suggested action\n  └─ Yes, open-ended → Agent directive\n\nIs this background context the agent should have?\n  └─ Yes → Silent injection\n\nIs this housekeeping?\n  └─ Yes → Silent side-effect\n
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#design-tips","level":2,"title":"Design Tips","text":"

            Throttle aggressively: VERBATIM relays that fire every prompt will be ignored or resented. Use once-per-day markers (touch $REMINDED), adaptive frequency (every Nth prompt), or staleness checks (only fire if condition persists).

            Include actionable commands: \"You have 12 unimported sessions\" is less useful than \"You have 12 unimported sessions. Run: ctx journal import --all.\" Give the user (or agent) the exact next step.

            Use box-drawing for visual structure: The ┌─ ─┐ │ └─ ─┘ pattern makes hook output visually distinct from agent prose. It also signals \"this is machine-generated, not agent opinion.\"

            Test the silence path: Most hook runs should produce no output (the condition isn't met). Make sure the common case is fast and silent.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

            Lessons from 19 days of hook debugging in ctx. Every one of these was encountered, debugged, and fixed in production.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#silent-misfire-wrong-key-name","level":3,"title":"Silent Misfire: Wrong Key Name","text":"
            { \"PreToolUseHooks\": [ ... ] }\n

            The key is PreToolUse, not PreToolUseHooks. Claude Code validates silently: A misspelled key means the hook is ignored with no error. Always test with a debug echo first to confirm the hook fires before adding real logic.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#json-escaping-breaks-shell-commands","level":3,"title":"JSON Escaping Breaks Shell Commands","text":"

            Go's json.Marshal escapes >, <, and & as Unicode sequences (\\u003e) by default. This breaks shell commands in generated config:

            \"command\": \"ctx agent 2\\u003e/dev/null\"\n

            Fix: use json.Encoder with SetEscapeHTML(false) when generating hook configuration.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#stdin-not-environment-variables","level":3,"title":"stdin, Not Environment Variables","text":"

            Hook input arrives as JSON via stdin, not environment variables:

            # Wrong:\nCOMMAND=\"$CLAUDE_TOOL_INPUT\"\n\n# Right:\nHOOK_INPUT=$(cat)\nCOMMAND=$(echo \"$HOOK_INPUT\" | jq -r '.tool_input.command // empty')\n
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#regex-overfitting","level":3,"title":"Regex Overfitting","text":"

            A regex meant to catch ctx as a binary will also match ctx as a directory component:

            # Too broad: blocks: git -C /home/jose/WORKSPACE/ctx status\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# Narrow to binary only:\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n

            Test hook regexes against paths that contain the target string as a substring, not just as the final component.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#repetition-fatigue","level":3,"title":"Repetition Fatigue","text":"

            Injecting context on every tool call sounds safe. In practice, after seeing the same context injection fifteen times, the agent treats it as background noise: Conventions stated in the injected context get violated because salience has been destroyed by repetition.

            Fix: cooldowns. ctx agent --session $PPID --cooldown 10m injects at most once per ten minutes per session using a tombstone file in /tmp/. This is not an optimization; it is a correction for a design flaw. Every injection consumes attention budget: 50 tool calls at 4,000 tokens each means 200,000 tokens of repeated context, most of it wasted.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#hardcoded-paths","level":3,"title":"Hardcoded Paths","text":"

            A username rename (parallels to jose) broke every hook at once. Use $CLAUDE_PROJECT_DIR instead of absolute paths:

            \"command\": \"\\\"$CLAUDE_PROJECT_DIR\\\"/.claude/hooks/block-git-push.sh\"\n

            If the platform provides a runtime variable for paths, always use it.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#next-up","level":2,"title":"Next Up","text":"

            Webhook Notifications →: Get push notifications when loops complete, hooks fire, or agents hit milestones.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#see-also","level":2,"title":"See Also","text":"
            • Customizing Hook Messages: override what hooks say without changing what they do
            • Claude Code Permission Hygiene: how permissions and hooks work together
            • Defense in Depth: why hooks matter for agent security
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/","level":1,"title":"Hook Sequence Diagrams","text":"","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#hook-lifecycle","level":2,"title":"Hook Lifecycle","text":"

            This page documents the ctx system hooks: the built-in ctx system * subcommands that Claude Code invokes via .claude/hooks.json at lifecycle events. These are owned by ctx itself, not authored by users.

            Not to Be Confused with ctx trigger

            ctx has three distinct hook-like layers:

            • ctx system hooks (this page): built-in, owned by ctx, wired into Claude Code via internal/assets/claude/hooks/hooks.json.
            • ctx trigger: user-authored shell scripts in .context/hooks/<type>/*.sh. See ctx trigger reference and the trigger authoring recipe.
            • Claude Code hooks configured directly in .claude/settings.local.json, tool-specific, not portable across AI tools.

            This page is only about the first category.

            Every ctx system hook is a Go binary invoked by Claude Code at one of three lifecycle events: PreToolUse (before a tool runs, can block), PostToolUse (after a tool completes), or UserPromptSubmit (on every user prompt, before any tools run). Hooks receive JSON on stdin and emit JSON or plain text on stdout.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#pretooluse-hooks","level":2,"title":"PreToolUse Hooks","text":"

            These fire before a tool executes. They can block, gate, or inject context.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#context-load-gate","level":3,"title":"Context-Load-Gate","text":"

            Matcher: .* (all tools)

            Injects the full context packet on first tool use of a session. One-shot per session.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as context-load-gate\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Git as git log\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized\n    alt not initialized\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check ctx-loaded-{session} marker\n    alt marker exists\n        Hook-->>CC: (silent exit, already fired)\n    end\n    Hook->>State: Create marker (one-shot guard)\n    Hook->>State: Prune stale session files\n    loop Each file in ReadOrder\n        alt GLOSSARY or TASK\n            Note over Hook: Skip (Task mentioned in footer only)\n        else DECISION or LEARNING\n            Hook->>Ctx: Extract index table only\n        else other files\n            Hook->>Ctx: Read full content\n        end\n        Hook->>Hook: Estimate tokens per file\n    end\n    Hook->>Git: Detect changes since last session\n    Hook->>Hook: Build injection (files + changes + token counts)\n    Hook-->>CC: JSON {additionalContext: injection}\n    Hook->>Hook: Send webhook (metadata only)\n    Hook->>State: Write oversize flag if tokens > threshold
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-non-path-ctx","level":3,"title":"Block-Non-Path-ctx","text":"

            Matcher: Bash

            Blocks ./ctx, go run ./cmd/ctx, or absolute-path ctx invocations. Constitutionally enforced.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-non-path-ctx\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Test regex: relative-path, go-run, absolute-path\n    alt no match\n        Hook-->>CC: (silent exit)\n    end\n    alt absolute-path + test exception\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason + constitution suffix}\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#qa-reminder","level":3,"title":"Qa-Reminder","text":"

            Matcher: Bash

            Gate nudge before any git command. Reminds agent to lint/test.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as qa-reminder\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check command contains \"git\"\n    alt no git command\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, gate, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: QA gate}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#specs-nudge","level":3,"title":"Specs-Nudge","text":"

            Matcher: EnterPlanMode

            Nudges agent to save plans/specs when new implementation detected.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as specs-nudge\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: specs nudge}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#posttooluse-hooks","level":2,"title":"PostToolUse Hooks","text":"

            These fire after a tool completes. They observe, nudge, and track state.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#post-commit","level":3,"title":"Post-Commit","text":"

            Matcher: Bash

            Fires after git commit (not amend). Nudges for context capture and checks version drift.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as post-commit\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"git commit\"?\n    alt not a git commit\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"--amend\"?\n    alt is amend\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: post-commit nudge}\n    Hook->>Hook: Relay(message)\n    Hook->>Hook: CheckVersionDrift()
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-task-completion","level":3,"title":"Check-Task-Completion","text":"

            Matcher: Edit, Write

            Configurable-interval nudge after edits. Per-session counter resets after firing.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-task-completion\n    participant State as .context/state/\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read task nudge interval\n    alt interval <= 0 (disabled)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read per-session counter\n    Hook->>Hook: Increment counter\n    alt counter < interval\n        Hook->>State: Write counter\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Reset counter to 0\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: JSON {additionalContext: task nudge}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#userpromptsubmit-hooks","level":2,"title":"UserPromptSubmit Hooks","text":"

            These fire on every user prompt, before any tools run. They perform health checks, track state, and nudge for housekeeping.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-context-size","level":3,"title":"Check-Context-Size","text":"

            Adaptive context window monitoring. Fires checkpoints, window warnings, and billing alerts based on prompt count and token usage.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-context-size\n    participant State as .context/state/\n    participant Session as Session JSONL\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized\n    Hook->>Hook: Read input, resolve session ID\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: Pause acknowledgment message\n    end\n    Hook->>State: Increment session prompt counter\n    Hook->>Session: Read token info (tokens, model, window)\n\n    rect rgb(255, 240, 240)\n        Note over Hook: Billing check (independent, never suppressed)\n        alt tokens >= billing threshold (one-shot)\n            Hook->>Tpl: LoadMessage(hook, billing, vars)\n            Hook-->>CC: Billing warning nudge box\n            Hook->>Hook: NudgeAndRelay(billing message)\n        end\n    end\n\n    Hook->>State: Check wrap-up marker\n    alt wrapped up recently (< 2h)\n        Hook->>State: Write stats (event: suppressed)\n        Hook-->>CC: (silent exit)\n    end\n\n    rect rgb(240, 248, 255)\n        Note over Hook: Adaptive frequency check\n        alt count > 30 and count % 3 == 0\n            Note over Hook: High frequency trigger\n        else count > 15 and count % 5 == 0\n            Note over Hook: Medium frequency trigger\n        else\n            Hook->>State: Write stats (event: silent)\n            Hook-->>CC: (silent exit)\n        end\n    end\n\n    alt context window >= 80%\n        Hook->>Tpl: LoadMessage(hook, window, vars)\n        Hook-->>CC: Window warning nudge box\n        Hook->>Hook: NudgeAndRelay(window message)\n    else checkpoint trigger\n        Hook->>Tpl: LoadMessage(hook, checkpoint)\n        Hook-->>CC: Checkpoint nudge box\n        Hook->>Hook: NudgeAndRelay(checkpoint message)\n    end\n    Hook->>State: Write session stats
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-ceremonies","level":3,"title":"Check-Ceremonies","text":"

            Daily check for /ctx-remember and /ctx-wrap-up usage in recent journal entries.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-ceremonies\n    participant State as .context/state/\n    participant Journal as Journal files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Read recent files (lookback window)\n    alt no journal files\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Scan for /ctx-remember and /ctx-wrap-up\n    alt both ceremonies present\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Note over Hook: variant: both | remember | wrapup\n    Hook-->>CC: Nudge box (missing ceremonies)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-freshness","level":3,"title":"Check-Freshness","text":"

            Daily check for technology-dependent constants that may need review.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-freshness\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Stat tracked files (5 source files)\n    alt all files modified within 6 months\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {StaleFiles})\n    Hook-->>CC: Nudge box (stale file list + review URL)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-journal","level":3,"title":"Check-Journal","text":"

            Daily check for unimported sessions and unenriched journal entries.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-journal\n    participant State as .context/state/\n    participant Journal as Journal dir\n    participant Claude as Claude projects dir\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Check dir exists\n    Hook->>Claude: Check dir exists\n    alt either dir missing\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Get newest entry mtime\n    Hook->>Claude: Count .jsonl files newer than journal\n    Hook->>Journal: Count unenriched entries\n    alt unimported == 0 and unenriched == 0\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, {counts})\n    Note over Hook: variant: both | unimported | unenriched\n    Hook-->>CC: Nudge box (counts)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-knowledge","level":3,"title":"Check-Knowledge","text":"

            Daily check for knowledge file entry/line counts exceeding configured thresholds.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-knowledge\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read thresholds (decisions, learnings, conventions)\n    alt all thresholds disabled (0)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Ctx: Parse DECISIONS.md entry count\n    Hook->>Ctx: Parse LEARNINGS.md entry count\n    Hook->>Ctx: Count CONVENTIONS.md lines\n    Hook->>Hook: Compare against thresholds\n    alt all within limits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {FileWarnings})\n    Hook-->>CC: Nudge box (file warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-map-staleness","level":3,"title":"Check-Map-Staleness","text":"

            Daily check for architecture map age and relevant code changes.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-map-staleness\n    participant State as .context/state/\n    participant Tracking as map-tracking.json\n    participant Git as git log\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tracking: Read map-tracking.json\n    alt missing, invalid, or opted out\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Parse LastRun date\n    alt map not stale (< N days)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Git: Count commits touching internal/ since LastRun\n    alt no relevant commits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {date, count})\n    Hook-->>CC: Nudge box (last refresh + commit count)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-memory-drift","level":3,"title":"Check-Memory-Drift","text":"

            Per-session check for MEMORY.md changes since last sync.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-memory-drift\n    participant State as .context/state/\n    participant Mem as memory.Discover\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check session tombstone\n    alt already nudged this session\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: DiscoverMemoryPath(projectRoot)\n    alt auto memory not active\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: HasDrift(contextDir, sourcePath)\n    alt no drift\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: Nudge box (drift reminder)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch session tombstone
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-persistence","level":3,"title":"Check-Persistence","text":"

            Tracks context file modification and nudges when edits happen without persisting context. Adaptive threshold based on prompt count.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-persistence\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read persistence state {Count, LastNudge, LastMtime}\n    alt first prompt (no state)\n        Hook->>State: Initialize state {Count:1, LastNudge:0, LastMtime:now}\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Increment Count\n    Hook->>Ctx: Get current context mtime\n    alt context modified since LastMtime\n        Hook->>State: Reset LastNudge = Count, update LastMtime\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: sinceNudge = Count - LastNudge\n    Hook->>Hook: PersistenceNudgeNeeded(Count, sinceNudge)?\n    alt threshold not reached\n        Hook->>State: Write state\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, vars)\n    Hook-->>CC: Nudge box (prompt count, time since last persist)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Update LastNudge = Count, write state
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-reminders","level":3,"title":"Check-Reminders","text":"

            Per-prompt check for due reminders. No throttle.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-reminders\n    participant Store as Reminders store\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Store: ReadReminders()\n    alt load error\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter by due date (After <= today)\n    alt no due reminders\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, reminders, {list})\n    Hook-->>CC: Nudge box (reminder list + dismiss hints)\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-resources","level":3,"title":"Check-Resources","text":"

            Checks system resources (memory, swap, disk, load). Fires on every prompt. No initialization required.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-resources\n    participant Sys as sysinfo\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: HookPreamble (parse input, check pause)\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Sys: Collect snapshot (memory, swap, disk, load)\n    Hook->>Sys: Evaluate thresholds per metric\n    alt max severity < Danger\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter alerts to Danger level only\n    Hook->>Hook: Build alertMessages from danger alerts\n    Hook->>Tpl: LoadMessage(hook, alert, {alertMessages}, fallback)\n    Hook-->>CC: Nudge box (danger alerts)\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-version","level":3,"title":"Check-Version","text":"

            Daily binary-vs-plugin version comparison with piggybacked key rotation check.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-version\n    participant State as .context/state/\n    participant Config as Binary + Plugin version\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read binary version\n    alt dev build\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read plugin version\n    alt plugin version not found or parse error\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Compare major.minor\n    alt versions match\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, mismatch, {versions})\n    Hook-->>CC: Nudge box (version mismatch)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle\n    Hook->>Hook: CheckKeyAge() (piggybacked)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#heartbeat","level":3,"title":"Heartbeat","text":"

            Silent per-prompt pulse. Tracks prompt count, context modification, and token usage. The agent never sees this hook's output.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as heartbeat\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Notify as Webhook + EventLog\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Increment heartbeat counter\n    Hook->>Ctx: Get latest context file mtime\n    Hook->>State: Compare with last recorded mtime\n    Hook->>State: Update mtime record\n    Hook->>State: Read session token info\n    Hook->>Notify: Send heartbeat notification\n    Hook->>Notify: Append to event log\n    Hook->>State: Write heartbeat log entry\n    Note over Hook: No stdout - agent never sees this
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#project-local-hooks","level":2,"title":"Project-Local Hooks","text":"

            These hooks are configured in settings.local.json and are not shipped with ctx. They are specific to individual developer setups.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-dangerous-commands","level":3,"title":"Block-Dangerous-Commands","text":"

            Lifecycle: PreToolUse. Matcher: Bash

            Blocks dangerous shell patterns (sudo, git push, cp to bin). No initialization or pause checks: always active.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-dangerous-commands\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Note over Hook: Cascade: first matching regex wins\n    Hook->>Hook: Test MidSudo regex\n    alt match\n        Hook->>Hook: variant = sudo\n    end\n    Hook->>Hook: Test MidGitPush regex (if no variant)\n    alt match\n        Hook->>Hook: variant = git-push\n    end\n    Hook->>Hook: Test CpMvToBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = cp-to-bin\n    end\n    Hook->>Hook: Test InstallToLocalBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = install-to-bin\n    end\n    alt no variant matched\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason}\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-backup-age","level":3,"title":"Check-Backup-Age","text":"

            Lifecycle: UserPromptSubmit.

            Daily check for SMB mount and backup freshness.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-backup-age\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Check SMB mount (if env var set)\n    Hook->>FS: Check backup marker file age\n    alt no warnings\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {Warnings})\n    Hook-->>CC: Nudge box (warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#throttling-summary","level":2,"title":"Throttling Summary","text":"Hook Lifecycle Throttle Type Scope context-load-gate PreToolUse One-shot marker Per session block-non-path-ctx PreToolUse None Every match qa-reminder PreToolUse None Every git command specs-nudge PreToolUse None Every prompt post-commit PostToolUse None Every git commit check-task-completion PostToolUse Configurable interval Per session check-context-size UserPromptSubmit Adaptive counter Per session check-ceremonies UserPromptSubmit Daily marker Once per day check-freshness UserPromptSubmit Daily marker Once per day check-journal UserPromptSubmit Daily marker Once per day check-knowledge UserPromptSubmit Daily marker Once per day check-map-staleness UserPromptSubmit Daily marker Once per day check-memory-drift UserPromptSubmit Session tombstone Once per session check-persistence UserPromptSubmit Adaptive counter Per session check-reminders UserPromptSubmit None Every prompt check-resources UserPromptSubmit None Every prompt check-version UserPromptSubmit Daily marker Once per day heartbeat UserPromptSubmit None Every prompt block-dangerous-commands PreToolUse * None Every match check-backup-age UserPromptSubmit * Daily marker Once per day

            * Project-local hook (settings.local.json), not shipped with ctx.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#state-file-reference","level":2,"title":"State File Reference","text":"

            All state files live in .context/state/.

            File Pattern Hook Purpose ctx-loaded-{session} context-load-gate One-shot injection marker ctx-paused-{session} (all) Session pause marker ctx-wrapped-up check-context-size Suppress nudges after wrap-up (2h expiry) freshness-checked check-freshness Daily throttle backup-reminded check-backup-age Daily throttle ceremony-reminded check-ceremonies Daily throttle journal-reminded check-journal Daily throttle knowledge-reminded check-knowledge Daily throttle map-staleness-reminded check-map-staleness Daily throttle version-checked check-version Daily throttle memory-drift-nudged-{session} check-memory-drift Per-session tombstone ctx-context-count-{session} check-context-size Prompt counter stats-{session}.jsonl check-context-size Session stats log persist-{session} check-persistence Counter + mtime state ctx-task-count-{session} check-task-completion Prompt counter heartbeat-count-{session} heartbeat Prompt counter heartbeat-mtime-{session} heartbeat Last context mtime","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hub-cluster/","level":1,"title":"HA Cluster","text":"","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#ctx-hub-high-availability-cluster","level":1,"title":"ctx Hub: High-Availability Cluster","text":"

            Run multiple hub nodes with Raft-based leader election for redundancy. Any follower can take over if the leader dies.

            This recipe assumes you've read the ctx Hub overview and the Multi-machine setup. HA only makes sense in the \"small trusted team\" story; a personal cross-project brain on one workstation does not need three Raft peers.

            Raft-Lite

            ctx uses Raft only for leader election, not for data consensus. Entry replication happens via sequence-based gRPC sync on the append-only JSONL store. This is simpler than full Raft log replication and is possible because the store is append-only and clients are idempotent. The implication: a write accepted by the leader is durable on the leader immediately; followers catch up asynchronously. If the leader crashes between accepting a write and replicating it, that write can be lost. Do not use the hub as a bank ledger.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#topology","level":2,"title":"Topology","text":"

            A minimum HA cluster is three nodes. Two is worse than one: it doubles failure probability without providing quorum.

                     +-------------+\n         |  client(s)  |\n         +------+------+\n                |\n    +-----------+-----------+\n    |           |           |\n+---v---+   +---v---+   +---v---+\n| hub A |   | hub B |   | hub C |\n| :9900 |   | :9900 |   | :9900 |\n+-------+   +-------+   +-------+\n    ^           ^           ^\n    +-----------+-----------+\n        Raft (leader election)\n        gRPC (data sync)\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-1-bootstrap-the-first-node","level":2,"title":"Step 1: Bootstrap the First Node","text":"
            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

            The node starts a Raft election as soon as it sees its peers.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-2-start-the-other-nodes","level":2,"title":"Step 2: Start the Other Nodes","text":"

            On hub-b.lan:

            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-c.lan:9900\n

            On hub-c.lan:

            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-b.lan:9900\n

            After a few seconds, one node wins the election and becomes the leader. The other two are followers.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-3-verify-cluster-state","level":2,"title":"Step 3: Verify Cluster State","text":"

            From any node:

            ctx hub status\n

            Expected output:

            role:       leader\npeers:      hub-a.lan:9900 (leader)\n            hub-b.lan:9900 (follower, in-sync)\n            hub-c.lan:9900 (follower, in-sync)\nentries:    1248\nuptime:     3h42m\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-4-register-clients-with-failover-peers","level":2,"title":"Step 4: Register Clients with Failover Peers","text":"

            When registering a client, give it the full peer list:

            ctx connection register hub-a.lan:9900 \\\n  --token ctx_adm_... \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

            If the leader becomes unreachable, the client reconnects to the next peer. Followers redirect to the current leader, so writes always land on the right node.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#runtime-membership-changes","level":2,"title":"Runtime Membership Changes","text":"

            Add a new peer without downtime:

            ctx hub peer add hub-d.lan:9900\n

            Remove a decommissioned peer:

            ctx hub peer remove hub-c.lan:9900\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#planned-maintenance","level":2,"title":"Planned Maintenance","text":"

            Before taking a leader offline, hand off leadership:

            ssh hub-a.lan 'ctx hub stepdown'\n

            stepdown triggers a new election among the remaining followers before the leader goes offline. In-flight clients briefly pause, then reconnect to the new leader.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#failure-modes-at-a-glance","level":2,"title":"Failure Modes at a Glance","text":"Event What happens Leader crashes New election; clients reconnect to new leader Follower crashes No write impact; catches up on restart Network partition (majority) Majority side keeps serving; minority read-only Network partition (split) No quorum; all nodes read-only Disk full on leader Writes rejected; read traffic continues

            For the full list, see Hub failure modes.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#see-also","level":2,"title":"See Also","text":"
            • Multi-machine recipe: single-node deployment
            • Hub operations: backup and maintenance
            • Hub security model: TLS, tokens
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-getting-started/","level":1,"title":"Getting Started","text":"","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#ctx-hub-getting-started","level":1,"title":"ctx Hub: Getting Started","text":"

            Stand up a single-node ctx Hub on localhost, register two projects, publish a decision from one, and see it appear in the other, all in under five minutes.

            Read This First

            If you haven't already, skim the ctx Hub overview. It explains the mental model, names the two user stories (personal vs small team), and (importantly) lists what the hub does not do. This recipe assumes you already know you want the feature.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-youll-get-out-of-this-recipe","level":2,"title":"What You'll Get out of This Recipe","text":"

            By the end, you will have:

            1. A local hub process running on port 9900.
            2. Two project directories both registered with the ctx Hub.
            3. A decision published from project alpha that appears automatically in project beta's .context/hub/ and in ctx agent --include-hub output.

            Concretely, the payoff this unlocks: a lesson you record in one project becomes visible to your agent the next time you open another project, without touching local files in the second project or opening another editor window.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-this-recipe-does-not-cover","level":2,"title":"What This Recipe Does Not Cover","text":"
            • Sharing .context/journal/, .context/pad, or any other local state. The hub only fans out decision, learning, convention, and task entries. Everything else stays local.
            • Multi-user attribution. The hub identifies projects, not people.
            • Running over a LAN; see Multi-machine setup.
            • Redundancy; see HA cluster.
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"
            • ctx installed and on PATH
            • Two project directories, each already initialized with ctx init
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"

            In a dedicated terminal:

            ctx hub start\n

            On first run, the hub generates an admin token and prints it to stdout. Copy it; you'll need it for each project registration:

            ctx hub listening on :9900\nadmin token: ctx_adm_7f3a1c2d...\ndata dir: ~/.ctx/hub-data/\n

            The admin token is written to ~/.ctx/hub-data/admin.token so you can recover it later. Treat it like a password.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-2-register-the-first-project","level":2,"title":"Step 2: Register the First Project","text":"
            cd ~/projects/alpha\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\n

            This stores an encrypted connection config in .context/.connect.enc. The admin token is exchanged for a per-project client token; the admin token itself is never persisted in the project.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-3-choose-what-to-receive","level":2,"title":"Step 3: Choose What to Receive","text":"
            ctx connection subscribe decision learning convention\n

            Only the entry types you subscribe to will be delivered by sync and listen.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-4-publish-a-decision","level":2,"title":"Step 4: Publish a Decision","text":"

            Either use ctx add --share to write locally and push to the ctx Hub:

            ctx add decision \"Use UTC timestamps everywhere\" --share \\\n  --context \"We had timezone drift between the API and journal\" \\\n  --rationale \"Single source of truth avoids conversion bugs\" \\\n  --consequence \"The UI does conversion at render time\"\n

            Or publish an existing entry directly:

            ctx connection publish decision \"Use UTC timestamps everywhere\"\n
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-5-register-a-second-project-and-sync","level":2,"title":"Step 5: Register a Second Project and Sync","text":"
            cd ~/projects/beta\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\nctx connection subscribe decision learning convention\nctx connection sync\n

            The decision from alpha now appears in ~/projects/beta/.context/hub/decisions.md with an origin tag and timestamp.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-6-watch-entries-arrive-live","level":2,"title":"Step 6: Watch Entries Arrive Live","text":"

            Instead of re-running sync, stream new entries as they land:

            ctx connection listen\n

            Leave this running in a terminal; every --share publish from any registered project will appear in .context/hub/ immediately.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-7-feed-shared-knowledge-into-the-agent","level":2,"title":"Step 7: Feed Shared Knowledge into the Agent","text":"

            Once entries exist in .context/hub/, include them in the agent context packet:

            ctx agent --include-hub\n

            Shared entries are added as a dedicated tier in the budget-aware assembly, scored by recency and type relevance.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#auto-sync-on-session-start","level":2,"title":"Auto-Sync on Session Start","text":"

            After register, the check-hub-sync hook pulls new entries at the start of each session (daily throttled). Most users never need to call ctx connection sync manually.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Multi-machine hub: run the hub on a LAN host and connect from other workstations.
            • HA cluster: Raft-based leader election for high availability.
            • Hub operations: daemon mode, backup, log rotation, JSONL store layout.
            • Hub security model: token lifecycle, encryption at rest, threat model.
            • ctx connect reference and ctx hub start reference.
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-multi-machine/","level":1,"title":"Multi-Machine","text":"","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#ctx-hub-multi-machine","level":1,"title":"ctx Hub: Multi-Machine","text":"

            Run the hub on a LAN host and connect from project directories on other workstations. This recipe is the Story 2 (\"small trusted team\") shape described in the ctx Hub overview; read that first if you haven't, especially the trust-model warnings.

            This recipe assumes you've already walked through Getting Started and understand what flows through the hub (decisions, learnings, conventions, tasks, not journals, scratchpad, or raw context files).

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#topology","level":2,"title":"Topology","text":"
            +------------------+        +------------------+\n| workstation A    |        | workstation B    |\n|  ~/projects/x    |        |  ~/projects/y    |\n|  ctx connection  |        |  ctx connection  |\n+---------+--------+        +---------+--------+\n          |                           |\n          +-----------+   +-----------+\n                      v   v\n              +-------------------+\n              | LAN host \"nexus\"  |\n              | ctx hub start     |\n              | --daemon          |\n              | :9900             |\n              +-------------------+\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-1-start-the-daemon-on-the-lan-host","level":2,"title":"Step 1: Start the Daemon on the LAN Host","text":"

            On the machine that will hold the hub (call it nexus):

            ctx hub start --daemon --port 9900\n

            The daemon writes a PID file to ~/.ctx/hub-data/hub.pid. Stop it later with:

            ctx hub stop\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-2-firewall-and-port","level":2,"title":"Step 2: Firewall and Port","text":"

            Open port 9900/tcp on nexus to the LAN only. Never expose the hub to the public internet without a reverse proxy and TLS in front of it (see Hub security model).

            Typical LAN allowlist rules:

            firewalldufwnftables
            sudo firewall-cmd --zone=internal \\\n  --add-port=9900/tcp --permanent\nsudo firewall-cmd --reload\n
            sudo ufw allow from 192.168.1.0/24 to any port 9900 proto tcp\n
            sudo nft add rule inet filter input ip saddr 192.168.1.0/24 \\\n  tcp dport 9900 accept\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-3-retrieve-the-admin-token","level":2,"title":"Step 3: Retrieve the Admin Token","text":"

            The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead:

            cat ~/.ctx/hub-data/admin.token\n

            Copy the token over a trusted channel (SSH, password manager, or an encrypted note). Do not email it or put it in chat.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-4-register-projects-from-each-workstation","level":2,"title":"Step 4: Register Projects from Each Workstation","text":"

            On workstation A:

            cd ~/projects/x\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

            On workstation B:

            cd ~/projects/y\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

            Each registration exchanges the admin token for a per-project client token. Only the client token is persisted in .context/.connect.enc, encrypted with the same AES-256-GCM scheme ctx uses for notification credentials.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-5-verify","level":2,"title":"Step 5: Verify","text":"

            From either workstation:

            ctx connection status\n

            You should see the ctx Hub address, role (leader for single-node), subscription filters, and the sequence number you're synced to.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#tls-recommended","level":2,"title":"TLS (Recommended)","text":"

            For anything beyond a trusted home LAN, terminate TLS in front of the hub. The hub speaks gRPC, so the reverse proxy must speak HTTP/2:

            server {\n    listen 443 ssl http2;\n    server_name nexus.example.com;\n\n    ssl_certificate     /etc/letsencrypt/live/nexus.example.com/fullchain.pem;\n    ssl_certificate_key /etc/letsencrypt/live/nexus.example.com/privkey.pem;\n\n    location / {\n        grpc_pass grpc://127.0.0.1:9900;\n    }\n}\n

            Point ctx connection register at the public hostname and port 443.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#handling-daemon-restarts","level":2,"title":"Handling Daemon Restarts","text":"

            The hub is append-only JSONL, so restarts are safe. Clients keep their last-seen sequence in .context/hub/.sync-state.json and pick up exactly where they left off on the next sync or listen reconnect.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#see-also","level":2,"title":"See Also","text":"
            • HA cluster recipe: for redundancy
            • Hub operations: backup, rotation
            • Hub failure modes
            • Hub security model
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-overview/","level":1,"title":"Overview","text":"","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#ctx-hub-overview","level":1,"title":"ctx Hub: Overview","text":"

            Start here before the other hub recipes. This page answers what the hub is, who it's for, why you'd run one, and, equally important, what it is not.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#mental-model-in-one-paragraph","level":2,"title":"Mental Model in One Paragraph","text":"

            The hub is a fan-out channel for structured knowledge entries across projects. When you publish a decision, learning, convention, or task with --share, the hub stores it in an append-only log and delivers it to every other project subscribed to that type. The next time your agent loads context in any of those projects, shared entries can be included in the context packet alongside local ones.

            That's the whole feature. It is a project-to-project knowledge bus for a small, curated set of entry types. It is not a shared memory, a shared journal, or a multi-user database.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-flows-through-the-hub","level":2,"title":"What Flows through the Hub","text":"

            Only four entry types:

            Type What it is decision Architectural decisions with rationale learning Gotchas, lessons, surprising behaviors convention Coding patterns and standards task Work items worth sharing across projects

            Each entry is an immutable record with a content blob, the publishing project's name as Origin, a timestamp, and a hub-assigned sequence number. Once published, entries are never rewritten.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-does-not-flow-through-the-hub","level":2,"title":"What Does Not Flow through the Hub","text":"

            This is the part new users get wrong most often:

            • Session journals (~/.claude/ logs, .context/journal/) stay local. The hub does not sync your AI session history.
            • Scratchpad (.context/pad) stays local. Encrypted notes never leave the machine they were written on.
            • Local context files as a whole (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) are not mirrored wholesale. Only entries you explicitly --share, or publish later with ctx connection publish, cross the boundary.
            • Anything under .context/ that isn't one of the four entry types above. Configuration, state, logs, memory, journal metadata: all local.

            If you were expecting \"now my agent in project B can see everything my agent did in project A,\" that's not this feature. Local session density still lives on the local machine.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#two-user-stories","level":2,"title":"Two User Stories","text":"

            The hub makes sense in two different shapes. Pick the one that matches your situation; the mechanics are identical but the trust model and threat surface are very different.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-1-personal-cross-project-brain","level":3,"title":"Story 1: Personal Cross-Project Brain","text":"

            One developer, many projects, one hub, usually on localhost.

            You're working across several projects on the same machine (or a handful of machines you own). You want a lesson learned debugging project A to show up when you open project B a week later, without re-discovering it. You want a convention you codified in one project to be visible as-you-type in another.

            Concrete payoff:

            • ctx add learning --share \"...\" in project A → ctx agent --include-hub in project B shows that learning in the next context packet.
            • A decision recorded in your personal \"dotfiles\" project is instantly visible to every other project on your workstation.
            • Cross-project conventions (e.g., \"use UTC timestamps everywhere\") live in one place and propagate.

            Trust model: high, because you trust every participant since every participant is you. Run the hub on localhost or on your own LAN, use the default single-node setup, don't worry about TLS.

            Start here: Getting Started for the one-time setup, then Personal cross-project brain for the day-to-day workflow.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-2-small-trusted-team","level":3,"title":"Story 2: Small Trusted Team","text":"

            A few teammates, projects they each own, one hub on a LAN host they all trust.

            Your team has a handful of services and you want a shared \"things we've learned the hard way\" stream. Someone on the platform team records a convention about timestamp handling; everyone else's agents see it the next session. An on-call engineer records a learning from a 3 AM incident; the rest of the team inherits the lesson without needing to read the postmortem.

            Concrete payoff:

            • Team conventions propagate without needing a wiki or chat.
            • Lessons from one team member become available to everyone else's agent context packets automatically.
            • Cross-project decisions (shared libraries, deployment patterns, naming rules) live in a single log the whole team reads.

            Trust model: the hub assumes everyone holding a client token is friendly. There is no per-user attribution you can rely on, Origin is self-asserted by the publishing client, and there is no read ACL beyond the subscription filter. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

            Operational shape: run the hub on a LAN host (or a three-node HA cluster for redundancy), put TLS in front of it for anything beyond a home LAN, distribute client tokens over a trusted channel.

            Start here: Multi-machine setup for the deployment, Team knowledge bus for the day-to-day team workflow, then HA cluster if you need redundancy.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#identity-projects-not-users","level":2,"title":"Identity: Projects, Not Users","text":"

            The hub has no concept of users. Its unit of identity is the project. ctx connection register binds a hub token to a project directory, not to a person. Two developers working on the same project share either:

            • The same .connect.enc, copied between machines over a trusted channel, or
            • Different project names (alpha@laptop-a, alpha@laptop-b), because the hub rejects duplicate registrations of the same project name.

            Either works; neither gives you per-human attribution. If you need \"who wrote this,\" the hub is the wrong tool.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#when-not-to-use-it","level":2,"title":"When Not to Use It","text":"
            • Solo, single-project work. Local .context/ files are enough. The hub adds operational surface for no payoff.
            • Untrusted participants. The hub assumes everyone with a client token is friendly. It is not hardened against hostile insiders or compromised tokens.
            • Compliance-sensitive environments. There is no audit trail that can prove who published what, only which project published what, and Origin is self-asserted.
            • Secrets or PII. Entry content is stored plaintext on the hub and fanned out to every subscribed client. Don't publish anything you wouldn't paste in a team chat.
            • Wholesale journal sharing. See \"what does not flow\" above. If that's what you want, this feature won't provide it. Talk to us in the issue tracker about what would.
            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#how-entries-reach-your-agent","level":2,"title":"How Entries Reach Your Agent","text":"

            Once a project is registered and subscribed, entries arrive by three mechanisms:

            1. ctx connection sync: an on-demand pull, replays everything new since the last sequence you saw.
            2. ctx connection listen: a long-lived gRPC stream that writes new entries to .context/hub/ as they arrive.
            3. check-hub-sync hook: runs at session start, daily throttled, so most users never call sync manually.

            Once entries exist in .context/hub/, ctx agent --include-hub adds a dedicated tier to the budget-aware context packet, scored by recency and type relevance. That's the end of the pipeline.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#where-to-go-next","level":2,"title":"Where to Go Next","text":"If you're… Read Trying it for yourself on one machine Getting Started A solo developer using the hub day-to-day Personal cross-project brain Setting up for a small team on a LAN Multi-machine setup A small team using the hub day-to-day Team knowledge bus Running redundant nodes HA cluster Operating a hub in production Operations Assessing the security posture Security model Debugging a hub in trouble Failure modes Just reading the commands ctx connect, ctx serve, ctx hub","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-personal/","level":1,"title":"Personal Cross-Project Brain","text":"","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#personal-cross-project-brain","level":1,"title":"Personal Cross-Project Brain","text":"

            This recipe shows how one developer uses a ctx Hub across their own projects day-to-day, the \"Story 1\" shape from the Hub overview. You're not setting up infrastructure for a team; you're making a lesson you learned last Tuesday in project A automatically surface when you open project B next Thursday.

            Prerequisites: a working ctx Hub on localhost (see Getting Started for the roughly five-minute setup). This recipe assumes the hub is already running and you've registered at least two projects.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#the-core-loop","level":2,"title":"The Core Loop","text":"

            Every day, the same three verbs matter:

            1. Record: notice a decision, learning, or convention and capture it with ctx add --share.
            2. Subscribe: every project you care about is subscribed to the types you want delivered (set once with ctx connection subscribe).
            3. Load: your agent picks up shared entries on next session start via the auto-sync hook, or explicitly via ctx agent --include-hub.

            That's the whole workflow. The rest of this recipe fills in the concrete moments where each verb matters.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#a-realistic-day","level":2,"title":"A Realistic Day","text":"

            You have three projects on your workstation:

            • ~/projects/api, a Go service you're actively developing
            • ~/projects/cli, a companion CLI that consumes the API
            • ~/projects/dotfiles, your personal conventions and cross-project learnings

            All three are registered with a single hub running on localhost:9900 (started once at boot, or via a systemd user unit; see Hub operations). All three subscribe to decision, learning, and convention.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#0900-start-work-on-api","level":3,"title":"09:00 - Start Work on api","text":"

            You cd ~/projects/api and start a Claude Code session. Behind the scenes, the plugin's PreToolUse hook calls ctx agent --budget 8000 --include-hub before the first tool call. Agent loads:

            • Local .context/ (TASKS, DECISIONS, LEARNINGS, etc.)
            • Foundation steering files (always-inclusion)
            • Everything you've shared from the other two projects

            So the \"use UTC timestamps everywhere\" decision you recorded in dotfiles last week is already in Claude's context for this session, without any manual sync.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1030-you-discover-a-gotcha","level":3,"title":"10:30 - You Discover a Gotcha","text":"

            While debugging, you find that the API's retry loop silently drops the last error when the transport times out. This is the kind of thing you'd normally add to LEARNINGS.md in api/. But it's useful across every Go service you'll ever write, not just this one. So:

            ctx add learning --share \\\n  --context \"Go http.Client retries mask the final error\" \\\n  --lesson  \"Transport timeouts don't surface as errors when the retry loop re-assigns err without wrapping. Check for context.DeadlineExceeded on the request context instead.\" \\\n  --application \"Any retry loop over http.Client.Do that uses a per-attempt timeout\"\n

            The --share flag does two things:

            1. Writes the learning to api/.context/LEARNINGS.md locally (as a normal ctx add learning would).
            2. Publishes the same entry to the ctx Hub, which stores it in the append-only JSONL and fans it out to every subscribed client.

            Within seconds, cli/.context/hub/learnings.md and dotfiles/.context/hub/learnings.md both contain a copy of this learning (the ctx connection listen daemon picks it up from the ctx Hub's Listen stream).

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1200-you-switch-to-cli","level":3,"title":"12:00 - You Switch to cli","text":"

            cd ~/projects/cli, open a new session. The agent packet for cli now includes the learning you just recorded in api, because cli is subscribed to learning and the entry has already been synced into cli/.context/hub/learnings.md.

            You don't have to re-explain the retry-loop gotcha. Claude already sees it.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1400-you-codify-a-convention","level":3,"title":"14:00 - You Codify a Convention","text":"

            You've been writing error messages in api and decided you want a consistent pattern: lowercase start, no trailing period, single-sentence. This is a convention, not a decision; it applies to every Go project you touch. Record it in dotfiles (since that's your \"personal standards\" project), and share it:

            cd ~/projects/dotfiles\nctx add convention --share \\\n  \"Error messages: lowercase start, no trailing period, single sentence (follows Go's stdlib style)\"\n

            The convention lands in dotfiles/CONVENTIONS.md locally and fans out to api and cli via the hub. The next Claude Code session in either project gets the convention injected into the steering-adjacent slot of the agent packet.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1630-end-of-day","level":3,"title":"16:30 - End of Day","text":"

            You didn't run ctx connection sync once. You didn't git push anything between projects. You didn't remember to tell your agent about the retry-loop gotcha in the new project. The hub did all of it for you.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-the-workflow-actually-looks-like","level":2,"title":"What the Workflow Actually Looks Like","text":"

            Stripped of prose, the day's commands were:

            # Morning: nothing. Agent loads --include-hub automatically.\n\n# Mid-morning: record a learning that should cross projects\nctx add learning --share \\\n  --context \"...\" --lesson \"...\" --application \"...\"\n\n# Afternoon: codify a convention in the \"standards\" project\nctx add convention --share \"...\"\n\n# Evening: nothing. Everything's already propagated.\n

            The hub is passive infrastructure. You never talk to it directly; you talk through it by using --share on commands you were already running.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#tips-for-solo-use","level":2,"title":"Tips for Solo Use","text":"

            Pick a \"standards\" project. One of your projects should play the role of \"canonical source for rules you want everywhere.\" Your dotfiles, a personal scratch repo, or a dedicated ctx-standards project all work. Record cross-cutting conventions there and let the hub propagate them to everything else.

            Subscribe to task only if you want cross-project todos. The four subscribable types are decision, learning, convention, task. Tasks are usually project-local; subscribing makes every hub-shared task from every project show up in every other project's agent packet. That's probably not what you want. Skip task in ctx connection subscribe unless you have a specific reason.

            Run the hub as a user-level daemon so you don't have to remember to start it. On Linux with systemd:

            # ~/.config/systemd/user/ctx-hub.service\n[Unit]\nDescription=ctx Hub (personal)\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/ctx hub start\nRestart=on-failure\n\n[Install]\nWantedBy=default.target\n
            systemctl --user enable --now ctx-hub.service\n

            Don't overthink subscription filters. For personal use, subscribe every project to all four types at first (or three, if you skip task). Tune later if the context packets get noisy.

            Local storage is fine; no TLS needed. The hub runs on localhost. No one else is on the network. Skip the TLS setup from the Multi-machine recipe; it's relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

            Not a setup guide. For the one-time hub install and project registration, use Getting Started.

            Not a team guide. If you're sharing across humans, not just across your own projects, read Team knowledge bus instead; the trust model and operational concerns are different.

            Not production operations. For backup, log rotation, failure recovery, and HA, see Hub operations and Hub failure modes.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#see-also","level":2,"title":"See Also","text":"
            • Hub overview: when to use the Hub and when not to.
            • Team knowledge bus: the multi-human companion recipe.
            • ctx connect: the client-side commands used above (subscribe, publish, sync, listen, status).
            • ctx add: the --share flag reference.
            • ctx hub: operator commands for starting, stopping, and inspecting the hub.
            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-team/","level":1,"title":"Team Knowledge Bus","text":"","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#team-knowledge-bus","level":1,"title":"Team Knowledge Bus","text":"

            This recipe shows how a small trusted team uses a ctx Hub as a shared knowledge bus, the \"Story 2\" shape from the Hub overview. You're not building a wiki, you're not replacing your issue tracker, and you're not running a multi-tenant service. You're connecting 3-10 developers who trust each other so that lessons, decisions, and conventions flow between them without ceremony.

            Prerequisites:

            • A running ctx Hub on a LAN host or internal server everyone on the team can reach. See Multi-machine setup for the deployment guide.
            • Each team member has ctx installed and has ctx connection register-ed their working projects with the hub.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#trust-model-read-this-first","level":2,"title":"Trust Model: Read This First","text":"

            The hub assumes everyone holding a client token is friendly. There's no per-user attribution you can rely on, no read ACL beyond subscription filters, and Origin is self-asserted by the publishing client. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

            If your team is:

            • ✅ 3-10 engineers, all known to each other, all trusted with production access
            • ✅ On a single internal network or behind a VPN
            • ✅ Comfortable with \"the hub assumes friendly participants\"

            …this recipe fits. If your team is:

            • ❌ Larger than ~15, with turnover
            • ❌ Includes contractors, untrusted agents, or compromised-workstation concerns
            • ❌ Needs audit trails that prove who published what
            • ❌ Requires per-team-member isolation

            …you're in \"Story 3\" territory, which the hub does not support today. Use a wiki or a dedicated knowledge platform instead.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#the-teams-three-verbs","level":2,"title":"The Team's Three Verbs","text":"

            Everyone on the team does three things, same as in the personal recipe, but with different social expectations:

            1. Record: when you learn something that would save a teammate time, capture it with ctx add --share.
            2. Subscribe: every engineer's project directories subscribe to the types the team cares about.
            3. Load: agents pick up shared entries automatically via the auto-sync hook and the --include-hub flag in the PreToolUse hook pipeline.

            The operational shape is identical to solo use. What's different is the culture around publishing: when do you --share, and what belongs on the hub vs. in your local .context/.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-goes-on-the-hub-team-rules-of-thumb","level":2,"title":"What Goes on the Hub (Team Rules of Thumb)","text":"

            Share it if it's true for more than one person. The central question: \"would the next teammate who hits this problem save time if they already knew this?\" If yes, --share. If no, record it locally and move on.

            Decisions:

            • ✅ Cross-service decisions (database choice, auth model, deployment pattern, monitoring stack).
            • ✅ Policy decisions that apply to all services (naming, API versioning, error-message format).
            • ❌ Internal implementation decisions inside a single service (\"chose a map over a slice here because lookups dominate\").
            • ❌ One-off tactical calls for a specific PR.

            Learnings:

            • ✅ Gotchas, surprising behavior, flaky infrastructure quirks, anything you'd tell a teammate over coffee with \"watch out for X\".
            • ✅ Lessons from incidents, right after the postmortem is the highest-value time to share.
            • ❌ Internal debugging notes that only make sense with context from your current branch.

            Conventions:

            • ✅ Repo layout, commit message format, pre-commit hooks, review expectations.
            • ✅ Language-level style decisions that apply across services.
            • ❌ Per-service idioms (\"in billing/ we prefer…\").

            Tasks: almost always project-local. Don't subscribe to task unless the team has a specific reason (e.g., a cross-cutting migration you want visible everywhere).

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#a-realistic-week","level":2,"title":"A Realistic Week","text":"

            Monday, 3 AM incident, shared learning

            On-call engineer Alice gets paged: the payment service starts returning 500s after a dependency update. After an hour she finds the culprit: a breaking change in a transitive gRPC dep that only manifests under high concurrency. Postmortem on Tuesday, but right now she records the learning:

            ctx add learning --share \\\n  --context \"Payment service 3 AM incident, 2026-04-03\" \\\n  --lesson  \"grpc-go v1.62+ changes DialContext behavior under high \\\n  concurrency: connections from a single channel can deadlock if the \\\n  server emits GOAWAY mid-stream. Symptom: 500 errors cluster in \\\n  30s bursts, no error in grpc client logs.\" \\\n  --application \"Any service on grpc-go. Pin to v1.61 or patch with \\\n  keepalive: https://github.com/grpc/grpc-go/issues/...\" \n

            By Tuesday morning, every other engineer's agent context packet contains this learning. When Bob starts work on the ledger service (which also uses grpc-go), his Claude Code session already knows about the gotcha without Bob having to read the incident channel.

            Wednesday, cross-service decision

            The team agrees on a new pattern for API versioning: header-based instead of URL-based. Platform lead Carol records the decision:

            ctx add decision --share \\\n  --context \"Need consistent API versioning across all 6 services. \\\n  Current URL-based /v1/ isn't working for gradual rollouts.\" \\\n  --rationale \"Header-based versioning lets us route by header at the \\\n  edge, which makes canary rollouts trivial. URL-based versioning \\\n  forces clients to update their paths.\" \\\n  --consequence \"All new endpoints use X-API-Version header. \\\n  Existing /v1/ endpoints stay. Deprecation schedule in q3.\" \\\n  \"Use header-based API versioning for new endpoints\"\n

            Every engineer's next session knows about this decision automatically. When Dave starts adding endpoints to the inventory service on Thursday, Claude already prompts him for the header pattern instead of defaulting to /v1/.

            Friday, convention drift caught at review

            Dave notices that his PR auto-formatted some error messages to end with periods. He recalls the team convention is \"no trailing period\" but can't remember where it was documented. He runs ctx connection status, sees the hub is healthy, greps his local .context/hub/conventions.md, and finds:

            ## [2026-03-12] Error message format\nLowercase start, no trailing period, single sentence.\n

            He fixes the PR. No lookup on the wiki, no question in chat, no context-switch penalty.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#workflow-tips-for-teams","level":2,"title":"Workflow Tips for Teams","text":"

            Designate a \"champion\" for decisions. The team lead or platform engineer should be the person who explicitly --shares cross-cutting decisions. Other team members share learnings freely but should ask \"should this be a decision?\" in review before --shareing a decision. This keeps the decision stream signal-rich.

            Publish postmortem learnings immediately, not after the meeting. The postmortem itself is a document; the actionable rules that come out of it belong on the hub, and they should land within an hour of the incident. \"Share fast, edit later\" is the rule.

            Delete noisy entries, don't tolerate them. The hub is append-only, but the .context/hub/ mirror on each client is just markdown. If a shared learning turns out to be wrong or obsolete, remove it from local mirrors and stop the hub daemon to truncate entries.jsonl (see Hub operations). Noisy shared feeds lose trust fast.

            Don't subscribe every project to every type. For backend engineers, subscribing to decision + learning + convention is usually right. For platform or DevOps projects, adding task makes sense. For a prototype or experiment project, subscribing only to convention might be enough.

            Run a single hub, not one per team. If two teams need to share knowledge, they should share a hub. Splitting hubs by team creates silos, which is often exactly the thing you were trying to solve.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#operational-concerns","level":2,"title":"Operational Concerns","text":"

            The team recipe assumes someone owns the hub host. That person (or a small group) is responsible for:

            • Uptime: the hub is infrastructure; treat it like any other internal service you run. See Hub operations.
            • Backups: entries.jsonl is the source of truth. Snapshot it to the same backup tier as your other internal data.
            • Upgrades: cadence the team agrees on. Major upgrades may require everyone to re-register, so do them at natural breaks.
            • Failures: see Hub failure modes for the standard oncall playbook.

            Optional but recommended: run a 3-node Raft cluster so the hub survives individual node failures. See HA cluster. For teams under 10 people, a single-node hub with daily backups is usually fine.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#token-management","level":2,"title":"Token Management","text":"

            Every team member has a client token stored in their .context/.connect.enc. Rules of thumb:

            • One token per engineer per project. Not one token per team; not one shared token. Each engineer registers each of their working projects separately.
            • Token compromise = revoke immediately. When an engineer leaves, their tokens should be removed from clients.json on the hub. This is a manual operation today; see Hub security for the revocation steps.
            • No checked-in tokens. .context/.connect.enc is encrypted with the local machine key, but don't push it to shared repos; it's per-workstation.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

            Not a wiki replacement. The hub is for structured entries, not prose. Put your architecture overviews, onboarding docs, and design discussions in a real wiki.

            Not an audit log. Origin on the hub is self-asserted. If compliance requires provenance, the hub is the wrong tool.

            Not a ticket system. Task sharing works, but mature teams already have Jira/Linear/Github Issues. Don't try to replace those with hub tasks; use the hub for lightweight cross-project todos that your existing tracker doesn't capture well.

            Not a production service for end users. This is internal team infrastructure. Do not expose the hub to customers, partners, or the open internet.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#see-also","level":2,"title":"See Also","text":"
            • Hub overview: when to use the hub and when not to.
            • Personal cross-project brain: the single-developer companion recipe.
            • Multi-machine setup: standing up the hub on a LAN host.
            • HA cluster: optional redundancy for larger teams.
            • Hub operations: backup, rotation, monitoring.
            • Hub security: threat model and hardening checklist.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/import-plans/","level":1,"title":"Importing Claude Code Plans","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code plan files (~/.claude/plans/*.md) are ephemeral: They have structured context, approach, and file lists, but they're orphaned after the session ends. The filenames are UUIDs, so you can't tell what's in them without opening each one.

            How do you turn a useful plan into a permanent project spec?

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tldr","level":2,"title":"TL;DR","text":"
            You: /ctx-plan-import\nAgent: [lists plans with dates and titles]\n       1. 2026-02-28  Add authentication middleware\n       2. 2026-02-27  Refactor database connection pool\nYou: \"import 1\"\nAgent: [copies to specs/add-authentication-middleware.md]\n

            Plans are copied (not moved) to specs/, slugified by their H1 heading.

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-plan-import Skill List, filter, and import plan files to specs /ctx-task-add Skill Optionally add a task referencing the spec","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-1-list-available-plans","level":3,"title":"Step 1: List Available Plans","text":"

            Invoke the skill and it lists plans with modification dates and titles:

            You: /ctx-plan-import\n\nAgent: Found 3 plan files:\n         1. 2026-02-28  Add authentication middleware\n         2. 2026-02-27  Refactor database connection pool\n         3. 2026-02-25  Import plans skill\n       Which plans would you like to import?\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-2-filter-optional","level":3,"title":"Step 2: Filter (Optional)","text":"

            You can narrow the list with arguments:

            Argument Effect --today Only plans modified today --since YYYY-MM-DD Only plans modified on or after the date --all Import everything without prompting (none) Interactive selection
            You: /ctx-plan-import --today\nYou: /ctx-plan-import --since 2026-02-27\nYou: /ctx-plan-import --all\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-3-select-and-import","level":3,"title":"Step 3: Select and Import","text":"

            Pick one or more plans by number:

            You: \"import 1 and 3\"\n\nAgent: Imported 2 plan(s):\n         ~/.claude/plans/abc123.md -> specs/add-authentication-middleware.md\n         ~/.claude/plans/ghi789.md -> specs/import-plans-skill.md\n       Want me to add tasks referencing these specs?\n

            The agent reads the H1 heading from each plan and slugifies it for the filename. If a plan has no H1 heading, the original filename (minus extension) is used as the slug.

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-4-add-follow-up-tasks-optional","level":3,"title":"Step 4: Add Follow-Up Tasks (Optional)","text":"

            If you say yes, the agent creates tasks in TASKS.md that reference the imported specs:

            You: \"yes, add tasks\"\n\nAgent: [runs /ctx-task-add for each spec]\n       Added:\n         - [ ] Implement authentication middleware (spec: specs/add-authentication-middleware.md)\n         - [ ] Import plans skill (spec: specs/import-plans-skill.md)\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember the exact skill name:

            You say What happens \"import my plans\" /ctx-plan-import (interactive) \"save today's plans as specs\" /ctx-plan-import --today \"import all plans from this week\" /ctx-plan-import --since ... \"turn that plan into a spec\" /ctx-plan-import (filtered)","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tips","level":2,"title":"Tips","text":"
            • Plans are copied, not moved: The originals stay in ~/.claude/plans/. Claude Code manages that directory; ctx doesn't delete from it.
            • Conflict handling: If specs/{slug}.md already exists, the agent asks whether to overwrite or pick a different name.
            • Specs are project memory: Once imported, specs are tracked in git and available to future sessions. Reference them from TASKS.md phase headers with Spec: specs/slug.md.
            • Pair with /ctx-implement: After importing a plan as a spec, use /ctx-implement to execute it step-by-step with verification.
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: /ctx-plan-import: full skill description
            • The Complete Session: where plan import fits in the session flow
            • Tracking Work Across Sessions: managing tasks that reference imported specs
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/knowledge-capture/","level":1,"title":"Persisting Decisions, Learnings, and Conventions","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-problem","level":2,"title":"The Problem","text":"

            You debug a subtle issue, discover the root cause, and move on.

            Three weeks later, a different session hits the same issue. The knowledge existed briefly in one session's memory but was never written down.

            Architectural decisions suffer the same fate: you weigh trade-offs, pick an approach, and six sessions later the AI suggests the alternative you already rejected.

            How do you make sure important context survives across sessions?

            Prefer Skills to Raw Commands

            Use /ctx-decision-add and /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-reflect               # surface items worth persisting\n/ctx-decision-add \"Title\"  # record with context/rationale/consequence\n/ctx-learning-add \"Title\"  # record with context/lesson/application\n

            Or just tell your agent: \"What have we learned this session?\"

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add decision Command Record an architectural decision ctx add learning Command Record a gotcha, tip, or lesson ctx add convention Command Record a coding pattern or standard ctx reindex Command Rebuild both quick-reference indices ctx decision reindex Command Rebuild the DECISIONS.md index ctx learning reindex Command Rebuild the LEARNINGS.md index /ctx-decision-add Skill AI-guided decision capture with validation /ctx-learning-add Skill AI-guided learning capture with validation /ctx-convention-add Skill AI-guided convention recording with placement /ctx-reflect Skill Surface items worth persisting at breakpoints","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-workflow","level":2,"title":"The Workflow","text":"

            Decision, Learning, or Convention?

            • If you chose between alternatives, it is a decision.
            • If you discovered something surprising, it is a learning.
            • If you are codifying a repeated pattern, it is a convention.
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-1-understand-what-to-persist","level":3,"title":"Step 1: Understand What to Persist","text":"

            Three context files serve different purposes:

            Decisions (DECISIONS.md) answer \"why is it this way?\" They record trade-offs between alternatives with structured fields: context, rationale, and consequences. Decisions prevent re-debating settled questions.

            Learnings (LEARNINGS.md) answer \"what did we discover the hard way?\" They record gotchas and debugging insights specific to this project with structured fields: context, lesson, and application.

            Learnings prevent repeating past mistakes.

            Conventions (CONVENTIONS.md) answer \"how do we do things here?\" They record patterns and standards. No structured fields required: just a name, a rule, and an example. Conventions keep code consistent across sessions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-2-record-decisions","level":3,"title":"Step 2: Record Decisions","text":"

            Decisions have three structured fields:

            • context,
            • rationale,
            • and consequences.

            The easiest way to record one is to tell your agent:

            You: \"Save that as a decision: we picked file-based cooldown\n      tokens over env vars.\"\n\nAgent: \"Recorded in DECISIONS.md:\n  - Decision: Use file-based cooldown tokens instead of env vars\n  - Context: Hook subprocesses cannot persist env vars to parent shell\n  - Rationale: File tokens survive across processes. Simpler than\n    IPC. Cleanup is automatic via TTL.\n  - Consequence: Tombstone files accumulate in /tmp. Cannot share\n    state across machines.\"\n

            The agent was present for the trade-off discussion. It fills in the structured fields from what it observed, which is hard to reconstruct later as a terminal one-liner.

            When multiple options were considered, the agent includes rejected alternatives in the rationale automatically.

            The /ctx-decision-add skill also guides you through the fields interactively. For quick decisions, it supports a Y-statement:

            \"In the context of [situation], facing [constraint], we decided for [choice] and against [alternatives], to achieve [benefit], accepting that [trade-off].\"

            CLI Command for Scripting and Automation

            When no agent is in the loop (CI pipelines, shell scripts, hooks), use the CLI directly with structured flags:

            ctx add decision \"Use file-based cooldown tokens instead of env vars\" \\\n  --context \"Hook subprocesses cannot persist env vars to parent shell\" \\\n  --rationale \"File tokens survive across processes. Simpler than IPC. Cleanup is automatic via TTL.\" \\\n  --consequence \"Tombstone files accumulate in /tmp. Cannot share state across machines.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-3-record-learnings","level":3,"title":"Step 3: Record Learnings","text":"

            Learnings have three structured fields: context, lesson, and application.

            Again, your agent is the best tool for this. It was there when you discovered the gotcha and can express it more completely than a backslash-escaped terminal command:

            You: \"That subprocess thing was painful. Save it as a learning.\"\n\nAgent: \"Added to LEARNINGS.md:\n  - Learning: Claude Code hooks run in a subprocess\n  - Context: Set env var in PreToolUse hook, but it was not visible\n    in the main session\n  - Lesson: Hook scripts execute in a child process. Env changes do\n    not propagate to parent.\n  - Application: Use tombstone files for hook-to-session\n    communication. Never rely on hook env vars.\"\n

            You said one sentence. The agent produced a structured entry with all three fields filled from the session context.

            That is the point: the agent has the full conversation and can be far more expressive than what you would type at a command prompt.

            The /ctx-learning-add skill applies three quality filters:

            1. Could someone Google this in 5 minutes?
            2. Is it specific to this codebase?
            3. Did it take real effort to discover?

            All three must pass.

            Learnings capture principles and heuristics, not code snippets.

            CLI Command for Scripting and Automation

            When no agent is in the loop:

            ctx add learning \"Claude Code hooks run in a subprocess\" \\\n  --context \"Set env var in PreToolUse hook, but it was not visible in the main session\" \\\n  --lesson \"Hook scripts execute in a child process. Env changes do not propagate to parent.\" \\\n  --application \"Use tombstone files for hook-to-session communication. Never rely on hook env vars.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-4-record-conventions","level":3,"title":"Step 4: Record Conventions","text":"

            Conventions are simpler: just a name, a rule, and optionally a section.

            These are short enough that either approach works:

            You: \"We've been using kebab-case for every CLI flag. Codify that.\"\n\nAgent: \"Added to CONVENTIONS.md under Naming:\n  'Use kebab-case for all CLI flag names.'\"\n

            Or from the terminal:

            ctx add convention \"Use kebab-case for all CLI flag names\" --section \"Naming\"\n

            Conventions work best for rules that come up repeatedly. Codify a pattern the third time you see it, not the first.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-5-reindex-after-manual-edits","level":3,"title":"Step 5: Reindex After Manual Edits","text":"

            DECISIONS.md and LEARNINGS.md maintain a quick-reference index at the top: a compact table of date and title for each entry. The index updates automatically via ctx add, but falls out of sync after hand edits.

            ctx reindex\n

            This single command regenerates both indices. You can also reindex individually with ctx decision reindex or ctx learning reindex.

            Run reindex after any manual edit. The index lets AI tools scan all entries without reading the full file, which matters when token budgets are tight.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-6-use-ctx-reflect-to-surface-what-to-capture","level":3,"title":"Step 6: Use /ctx-reflect to Surface What to Capture","text":"

            Keep It Conversational

            /ctx-reflect is not the only way to trigger reflection.

            Agents trained on the ctx playbook naturally surface persist-worthy items at breakpoints, even without invoking the skill explicitly.

            A conversational prompt like \"anything worth saving?\" or \"let's wrap up\" can trigger the same review.

            The skill provides a structured checklist, but the behavior is available through natural conversation.

            At natural breakpoints (after completing a feature, fixing a bug, or before ending a session) use /ctx-reflect to identify items worth persisting.

            /ctx-reflect\n

            The skill walks through learnings, decisions, tasks, and session notes, skipping categories with nothing to report. The output includes specific commands for each suggested persist:

            This session implemented file-based cooldown for `ctx agent` and\ndiscovered that hook subprocesses cannot set env vars in the parent.\n\nI'd suggest persisting:\n- **Learning**: Hook subprocesses cannot propagate env vars\n  `ctx add learning \"...\" --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Decision**: File-based cooldown tokens over env vars\n  `ctx add decision \"...\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

            The skill always asks before persisting.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-7-the-conversational-approach","level":3,"title":"Step 7: The Conversational Approach","text":"

            The commands shown in Steps 2 to 4 are precise, but you rarely need to type them yourself. When working with an agent that has loaded the ctx playbook, natural language triggers the same operations.

            You ask, the agent acts:

            You say What the agent does \"What have we learned this session?\" Reviews recent work, identifies learnings, offers to persist \"Save that as a decision\" Captures the trade-off you just discussed with structured fields \"Any gotchas worth remembering?\" Scans recent work for non-obvious lessons \"Record that convention\" Adds the pattern you just agreed on to CONVENTIONS.md \"Let's wrap up\" Runs through the reflect checklist conversationally

            Example exchange:

            You: \"What did we learn from that debugging session?\"\n\nAgent: \"We discovered three things worth persisting:\n  1. Hook subprocesses can't propagate env vars to the parent. We\n     spent 20 minutes on this before switching to tombstone files.\n  2. Tombstone files need a TTL or they accumulate in /tmp.\n  3. The cooldown logic must check file mtime, not just existence.\n  Want me to save these as learnings?\"\n\nYou: \"Yes, all of them.\"\n\nAgent: \"Done. Added all three to LEARNINGS.md with structured fields.\"\n

            The agent can also be proactive without being asked. The playbook encourages agents to persist context at natural milestones.

            You may see:

            • After fixing a tricky bug: \"I noticed this was a non-obvious issue. Want me to save it as a learning?\"
            • After choosing between approaches: \"We just made a trade-off between file-based tokens and IPC. Should I record this as a decision?\"
            • At session milestones: the agent checks what is worth persisting and offers a brief summary.

            When agents run unattended (no human in the loop), they may persist directly:

            • \"I've added the subprocess env var gotcha to LEARNINGS.md so we don't hit it again.\"
            • \"Recorded the decision to use file-based tokens over env vars in DECISIONS.md.\"
            • \"Marked the cooldown task done and added a follow-up for TTL cleanup.\"

            This is behavior by design.

            The playbook's self-check prompt, \"If this session ended right now, would the next session know what happened?\" drives agents to persist early and often rather than waiting for explicit instructions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#command-line-approach-scripting-and-automation","level":3,"title":"Command-Line Approach (Scripting and Automation)","text":"
            # Decision: record the trade-off\nctx add decision \"Use PostgreSQL over SQLite\" \\\n  --context \"Need concurrent multi-user access\" \\\n  --rationale \"SQLite locks on writes; Postgres handles concurrency\" \\\n  --consequence \"Requires a database server; team needs Postgres training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Learning: record the gotcha\nctx add learning \"SQL migrations must be idempotent\" \\\n  --context \"Deploy failed when migration ran twice after rollback\" \\\n  --lesson \"CREATE TABLE without IF NOT EXISTS fails on retry\" \\\n  --application \"Always use IF NOT EXISTS guards in migrations\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Convention: record the pattern\nctx add convention \"API handlers return structured errors\" --section \"API\"\n\n# Reindex after manual edits\nctx reindex\n\n# Reflect at breakpoints (from AI assistant)\n# /ctx-reflect\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#conversational-approach-interactive-sessions","level":3,"title":"Conversational Approach (Interactive Sessions)","text":"
            You: \"We just picked Postgres over SQLite for the concurrency\n      reasons. Save that?\"\n\nAgent: \"Recorded. Added to DECISIONS.md:\n  - Decision: Use PostgreSQL over SQLite\n  - Context: Need concurrent multi-user access\n  - Rationale: SQLite locks on writes; Postgres handles concurrency\n  - Consequence: Requires a database server; team needs Postgres\n    training\"\n\nYou: \"Also that migration thing. The deploy blew up when the\n      migration ran twice.\"\n\nAgent: \"Got it. Added to LEARNINGS.md:\n  - Learning: SQL migrations must be idempotent\n  - Lesson: CREATE TABLE without IF NOT EXISTS fails on retry\n  - Application: Always use IF NOT EXISTS guards in migrations\"\n\nYou: \"Good. Anything else worth saving before we wrap up?\"\n\nAgent: \"We've been using structured error returns in every API\n  handler this session. Want me to codify that as a convention?\"\n\nYou: \"Do it.\"\n\nAgent: \"Done. Added 'API handlers return structured errors' to\n  CONVENTIONS.md under the API section.\"\n

            Both approaches produce the same structured entries in the same context files.

            • The conversational approach is the natural fit for interactive sessions;
            • the CLI commands are better suited for scripts, hooks, and automation pipelines.
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tips","level":2,"title":"Tips","text":"
            • Record decisions at the moment of choice. The alternatives you considered and the reasons you rejected them fade quickly. Capture trade-offs while they are fresh.
            • Learnings should fail the Gemini test. If someone could find it in a 5-minute Gemini search, it does not belong in LEARNINGS.md.
            • Conventions earn their place through repetition. Add a convention the third time you see a pattern, not the first.
            • Use /ctx-reflect at natural breakpoints. The checklist catches items you might otherwise lose.
            • Keep the entries self-contained. Each entry should make sense on its own. A future session may load only one due to token budget constraints.
            • Reindex after every hand edit. It takes less than a second. A stale index causes AI tools to miss entries.
            • Prefer the structured fields. The verbosity forces clarity. A decision without a rationale is just a fact. A learning without an application is just a story.
            • Talk to your agent, do not type commands. In interactive sessions, the conversational approach is the recommended way to capture knowledge. Say \"save that as a learning\" or \"any decisions worth recording?\" and let the agent handle the structured fields. Reserve the CLI commands for scripting, automation, and CI/CD pipelines where there is no agent in the loop.
            • Trust the agent's proactive instincts. Agents trained on the ctx playbook will offer to persist context at milestones. A brief \"want me to save this?\" is cheaper than re-discovering the same lesson three sessions later.
            • Relax provenance per-project if --session-id, --branch, or --commit are impractical (e.g., manual notes outside an AI session). Add to .ctxrc:

              provenance_required:\n  session_id: false   # allow entries without --session-id\n  branch: true        # still require --branch\n  commit: true        # still require --commit\n

              Default is all three required. Only human config relaxes: Agents cannot bypass, and that's by design.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#next-up","level":2,"title":"Next Up","text":"

            Tracking Work Across Sessions →: Add, prioritize, complete, and archive tasks across sessions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#see-also","level":2,"title":"See Also","text":"
            • Tracking Work Across Sessions: managing the tasks that decisions and learnings support
            • The Complete Session: full session lifecycle including reflection and context persistence
            • Detecting and Fixing Drift: keeping knowledge files accurate as the codebase evolves
            • CLI Reference: full documentation for ctx add, ctx decision, ctx learning
            • Context Files: format and conventions for DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/memory-bridge/","level":1,"title":"Bridging Claude Code Auto Memory","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This file is:

            • Outside the repo - not version-controlled, not portable
            • Machine-specific - tied to one ~/.claude/ directory
            • Invisible to ctx - context loading and hooks don't read it

            Meanwhile, ctx maintains structured context files (DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) that are git-tracked, portable, and token-budgeted - but Claude Code doesn't automatically write to them.

            The two systems hold complementary knowledge with no bridge between them.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#tldr","level":2,"title":"TL;DR","text":"
            ctx memory sync          # Mirror MEMORY.md into .context/memory/mirror.md\nctx memory status        # Check for drift\nctx memory diff          # See what changed since last sync\n

            The check-memory-drift hook nudges automatically when MEMORY.md changes - you don't need to remember to sync manually.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx memory sync CLI command Copy MEMORY.md to mirror, archive previous ctx memory status CLI command Show drift, timestamps, line counts ctx memory diff CLI command Show changes since last sync ctx memory import CLI command Classify and promote entries to .context/ files ctx memory publish CLI command Push curated .context/ content to MEMORY.md ctx memory unpublish CLI command Remove published block from MEMORY.md ctx system check-memory-drift Hook Nudge when MEMORY.md has changed (once/session)","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#how-it-works","level":2,"title":"How It Works","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#discovery","level":3,"title":"Discovery","text":"

            Claude Code encodes project paths as directory names under ~/.claude/projects/. The encoding replaces / with - and prefixes with -:

            /home/jose/WORKSPACE/ctx  →  ~/.claude/projects/-home-jose-WORKSPACE-ctx/\n

            ctx memory uses this encoding to locate MEMORY.md automatically from your project root - no configuration needed.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#mirroring","level":3,"title":"Mirroring","text":"

            When you run ctx memory sync:

            1. The previous mirror is archived to .context/memory/archive/mirror-<timestamp>.md
            2. MEMORY.md is copied to .context/memory/mirror.md
            3. Sync state is updated in .context/state/memory-import.json

            The mirror is git-tracked, so it travels with the project. Archives provide a fallback for projects that don't use git.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#drift-detection","level":3,"title":"Drift Detection","text":"

            The check-memory-drift hook compares MEMORY.md's modification time against the mirror. When drift is detected, the agent sees:

            ┌─ Memory Drift ────────────────────────────────────────────────\n│ MEMORY.md has changed since last sync.\n│ Run: ctx memory sync\n│ Context: .context\n└────────────────────────────────────────────────────────────────\n

            The nudge fires once per session to avoid noise.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#typical-workflow","level":2,"title":"Typical Workflow","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#at-session-start","level":3,"title":"At Session Start","text":"

            If the hook fires a drift nudge, sync before diving into work:

            ctx memory diff     # Review what changed\nctx memory sync     # Mirror the changes\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#periodic-check","level":3,"title":"Periodic Check","text":"
            ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#dry-run","level":3,"title":"Dry Run","text":"

            Preview what sync would do without writing:

            ctx memory sync --dry-run\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#storage-layout","level":2,"title":"Storage Layout","text":"
            .context/\n├── memory/\n│   ├── mirror.md                          # Raw copy of MEMORY.md (often git-tracked)\n│   └── archive/\n│       ├── mirror-2026-03-05-143022.md    # Timestamped pre-sync snapshots\n│       └── mirror-2026-03-04-220015.md\n├── state/\n│   └── memory-import.json                 # Sync tracking state\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#edge-cases","level":2,"title":"Edge Cases","text":"Scenario Behavior Auto memory not active sync exits 1 with message. status reports \"not active\". Hook skips silently. First sync (no mirror) Creates mirror without archiving. MEMORY.md is empty Syncs to empty mirror (valid). Not initialized Init guard rejects (same as all ctx commands).","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#importing-entries","level":2,"title":"Importing Entries","text":"

            Once you've synced, you can classify and promote entries into structured .context/ files:

            ctx memory import --dry-run    # Preview classification\nctx memory import              # Actually promote entries\n

            Each entry is classified by keyword heuristics:

            Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

            Entries that don't match any pattern are skipped - they stay in the mirror for manual review. Deduplication (hash-based) prevents re-importing the same entry on subsequent runs.

            Review Before Importing

            Use --dry-run first. The heuristic classifier is deliberately simple - it may misclassify ambiguous entries. Review the plan, then import.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-workflow","level":3,"title":"Full Workflow","text":"
            ctx memory sync                # 1. Mirror MEMORY.md\nctx memory import --dry-run    # 2. Preview what would be imported\nctx memory import              # 3. Promote entries to .context/ files\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#publishing-context-to-memorymd","level":2,"title":"Publishing Context to MEMORY.md","text":"

            Push curated .context/ content back into MEMORY.md so Claude Code sees structured project context on session start - without needing hooks.

            ctx memory publish --dry-run    # Preview what would be published\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter line budget\n

            Published content is wrapped in markers:

            <!-- ctx:published -->\n# Project Context (managed by ctx)\n\n## Pending Tasks\n- [ ] Implement feature X\n...\n<!-- ctx:end -->\n

            Rules:

            • ctx owns everything between the markers
            • Claude owns everything outside the markers
            • ctx memory import reads only outside the markers
            • ctx memory publish replaces only inside the markers

            To remove the published block entirely:

            ctx memory unpublish\n

            Publish at Wrap-Up, Not on Commit

            The best time to publish is during session wrap-up, after persisting decisions and learnings. Never auto-publish - give yourself a chance to review what's going into MEMORY.md.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-bidirectional-workflow","level":3,"title":"Full Bidirectional Workflow","text":"
            ctx memory sync                 # 1. Mirror MEMORY.md\nctx memory import --dry-run     # 2. Check what Claude wrote\nctx memory import               # 3. Promote entries to .context/\nctx memory publish --dry-run    # 4. Check what would be published\nctx memory publish              # 5. Push context to MEMORY.md\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/multi-tool-setup/","level":1,"title":"Setup Across AI Tools","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-problem","level":2,"title":"The Problem","text":"

            You have installed ctx and want to set it up with your AI coding assistant so that context persists across sessions. Different tools have different integration depths. For example:

            • Claude Code supports native hooks that load and save context automatically.
            • Cursor injects context via its system prompt.
            • Aider reads context files through its --read flag.

            This recipe walks through the complete setup for each tool, from initialization through verification, so you end up with a working memory layer regardless of which AI tool you use.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tldr","level":2,"title":"TL;DR","text":"
            cd your-project\nctx init                      # creates .context/\nsource <(ctx completion zsh)  # shell completion (or bash/fish)\n\n# ## Claude Code (automatic after plugin install) ##\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n\n# ## Cursor / Aider / Copilot / Windsurf ##\nctx setup cursor # or: aider, copilot, windsurf\n\n# ## Companion tools (highly recommended) ##\nnpx gitnexus analyze          # code knowledge graph\n# Add Gemini Search MCP server for grounded web search\n

            Create a .ctxrc in your project root to configure token budgets, context directory, drift thresholds, and more.

            Then start your AI tool and ask: \"Do you remember?\"

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Create .context/ directory, templates, and permissions ctx setup Generate integration configuration for a specific AI tool ctx agent Print a token-budgeted context packet for AI consumption ctx load Output assembled context in read order (for manual pasting) ctx watch Auto-apply context updates from AI output (non-native tools) ctx completion Generate shell autocompletion for bash, zsh, or fish ctx journal import Import sessions to editable journal Markdown","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-1-initialize-ctx","level":3,"title":"Step 1: Initialize ctx","text":"

            Run ctx init in your project root. This creates the .context/ directory with all template files and seeds ctx permissions in settings.local.json.

            cd your-project\nctx init\n

            This produces the following structure:

            .context/\n  CONSTITUTION.md     # Hard rules the AI must never violate\n  TASKS.md            # Current and planned work\n  CONVENTIONS.md      # Code patterns and standards\n  ARCHITECTURE.md     # System overview\n  DECISIONS.md        # Architectural decisions with rationale\n  LEARNINGS.md        # Lessons learned, gotchas, tips\n  GLOSSARY.md         # Domain terms and abbreviations\n  AGENT_PLAYBOOK.md   # How AI tools should use this system\n

            Using a Different .context Directory

            The .context/ directory doesn't have to live inside your project. You can point ctx to an external folder via .ctxrc, the CTX_DIR environment variable, or the --context-dir CLI flag.

            This is useful for monorepos or shared context across repositories.

            See Configuration for details and External Context for a full recipe.

            For Claude Code, install the ctx plugin to get hooks and skills:

            claude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

            If you only need the core files (useful for lightweight setups), use the --minimal flag:

            ctx init --minimal\n

            This creates only TASKS.md, DECISIONS.md, and CONSTITUTION.md.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-2-generate-tool-specific-hooks","level":3,"title":"Step 2: Generate Tool-Specific Hooks","text":"

            If you are using a tool other than Claude Code (which is configured automatically by ctx init), generate its integration configuration:

            # For Cursor\nctx setup cursor\n\n# For Aider\nctx setup aider\n\n# For GitHub Copilot\nctx setup copilot\n\n# For Windsurf\nctx setup windsurf\n

            Each command prints the configuration you need. How you apply it depends on the tool.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#claude-code","level":4,"title":"Claude Code","text":"

            No action needed. Just install ctx from the Marketplace as ActiveMemory/ctx.

            Claude Code Is a First-Class Citizen

            With the ctx plugin installed, Claude Code gets hooks and skills automatically. The PreToolUse hook runs ctx agent --budget 4000 on every tool call (with a 10-minute cooldown so it only fires once per window).

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#cursor","level":4,"title":"Cursor","text":"

            Add the system prompt snippet to .cursor/settings.json:

            {\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and .context/CONVENTIONS.md before responding. Follow rules in .context/CONSTITUTION.md.\"\n}\n

            Context files appear in Cursor's file tree. You can also paste a context packet directly into chat:

            ctx agent --budget 4000 | xclip    # Linux\nctx agent --budget 4000 | pbcopy   # macOS\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#aider","level":4,"title":"Aider","text":"

            Create .aider.conf.yml so context files are loaded on every session:

            read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n

            Then start Aider normally:

            aider\n

            Or specify files on the command line:

            aider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-3-set-up-shell-completion","level":3,"title":"Step 3: Set Up Shell Completion","text":"

            Shell completion lets you tab-complete ctx subcommands and flags, which is especially useful while learning the CLI.

            # Bash (add to ~/.bashrc)\nsource <(ctx completion bash)\n\n# Zsh (add to ~/.zshrc)\nsource <(ctx completion zsh)\n\n# Fish\nctx completion fish > ~/.config/fish/completions/ctx.fish\n

            After sourcing, typing ctx a<TAB> completes to ctx agent, and ctx journal <TAB> shows list, show, and export.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-4-verify-the-setup-works","level":3,"title":"Step 4: Verify the Setup Works","text":"

            Start a fresh session in your AI tool and ask:

            \"Do you remember?\"

            A correctly configured tool responds with specific context: current tasks from TASKS.md, recent decisions, and previous session topics. It should not say \"I don't have memory\" or \"Let me search for files.\"

            This question checks the passive side of memory. A properly set-up agent is also proactive: it treats context maintenance as part of its job:

            • After a debugging session, it offers to save a learning.
            • After a trade-off discussion, it asks whether to record the decision.
            • After completing a task, it suggests follow-up items.

            The \"do you remember?\" check verifies both halves: recall and responsibility.

            For example, after resolving a tricky bug, a proactive agent might say:

            That Redis timeout issue was subtle. Want me to save this as a *learning*\nso we don't hit it again?\n

            If you see behavior like this, the setup is working end to end.

            In Claude Code, you can also invoke the /ctx-status skill:

            /ctx-status\n

            This prints a summary of all context files, token counts, and recent activity, confirming that hooks are loading context.

            If context is not loading, check the basics:

            Symptom Fix ctx: command not found Ensure ctx is in your PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list Context not refreshing Cooldown may be active; wait 10 minutes or set --cooldown 0","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-5-enable-watch-mode-for-non-native-tools","level":3,"title":"Step 5: Enable Watch Mode for Non-Native Tools","text":"

            Tools like Aider, Copilot, and Windsurf do not support native hooks for saving context automatically. For these, run ctx watch alongside your AI tool.

            Pipe the AI tool's output through ctx watch:

            # Terminal 1: Run Aider with output logged\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch the log for context updates\nctx watch --log /tmp/aider.log\n

            Or for any generic tool:

            your-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

            When the AI emits structured update commands, ctx watch parses and applies them automatically:

            <context-update type=\"learning\"\n  context=\"Debugging rate limiter\"\n  lesson=\"Redis MULTI/EXEC does not roll back on error\"\n  application=\"Wrap rate-limit checks in Lua scripts instead\"\n>Redis Transaction Behavior</context-update>\n

            To preview changes without modifying files:

            ctx watch --dry-run --log /tmp/ai.log\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-6-import-session-transcripts-optional","level":3,"title":"Step 6: Import Session Transcripts (Optional)","text":"

            If you want to browse past session transcripts, import them to the journal:

            ctx journal import --all\n

            This converts raw session data into editable Markdown files in .context/journal/. You can then enrich them with metadata using /ctx-journal-enrich-all inside your AI assistant.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Here is the condensed setup for all three tools:

            # ## Common (run once per project) ##\ncd your-project\nctx init\nsource <(ctx completion zsh)       # or bash/fish\n\n# ## Claude Code (automatic, just verify) ##\n# Start Claude Code, then ask: \"Do you remember?\"\n\n# ## Cursor ##\nctx setup cursor\n# Add the system prompt to .cursor/settings.json\n# Paste context: ctx agent --budget 4000 | pbcopy\n\n# ## Aider ##\nctx setup aider\n# Create .aider.conf.yml with read: paths\n# Run watch mode alongside: ctx watch --log /tmp/aider.log\n\n# ## Verify any Tool ##\n# Ask your AI: \"Do you remember?\"\n# Expect: specific tasks, decisions, recent context\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tips","level":2,"title":"Tips","text":"
            • Start with ctx init (not --minimal) for your first project. The full template set gives the agent more to work with, and you can always delete files later.
            • For Claude Code, the token budget is configured in the plugin's hooks.json. To customize, adjust the --budget flag in the ctx agent hook command.
            • The --session $PPID flag isolates cooldowns per Claude Code process, so parallel sessions do not suppress each other.
            • Commit your .context/ directory to version control. Several ctx features (journals, changelogs, blog generation) rely on git history.
            • For Cursor and Copilot, keep CONVENTIONS.md visible. These tools treat open files as higher-priority context.
            • Run ctx drift periodically to catch stale references before they confuse the agent.
            • The agent playbook instructs the agent to persist context at natural milestones (completed tasks, decisions, gotchas). In practice, this works best when you reinforce the habit: a quick \"anything worth saving?\" after a debugging session goes a long way.
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#companion-tools-highly-recommended","level":2,"title":"Companion Tools (Highly Recommended)","text":"

            ctx skills can leverage external MCP servers for web search and code intelligence. ctx works without them, but they significantly improve agent behavior across sessions. The investment is small and the benefits compound. Skills like /ctx-code-review, /ctx-explain, and /ctx-refactor all become noticeably better with these tools connected.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gemini-search","level":3,"title":"Gemini Search","text":"

            Provides grounded web search with citations. Used by skills and the agent playbook as the preferred search backend (faster and more accurate than built-in web search).

            Setup: Add the Gemini Search MCP server to your Claude Code settings. See the Gemini Search MCP documentation for installation.

            Verification:

            # The agent checks this automatically during /ctx-remember\n# Manual test: ask the agent to search for something\n

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gitnexus","level":3,"title":"GitNexus","text":"

            Provides a code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Used by skills like /ctx-refactor (impact analysis) and /ctx-code-review (dependency awareness).

            Setup: Add the GitNexus MCP server to your Claude Code settings, then index your project:

            npx gitnexus analyze\n

            Verification:

            # The agent checks this automatically during /ctx-remember\n# If the index is stale, it will suggest rehydrating\n

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#suppressing-the-check","level":3,"title":"Suppressing the Check","text":"

            If you don't use companion tools and want to skip the availability check at session start, add to .ctxrc:

            companion_check: false\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#future-direction","level":3,"title":"Future Direction","text":"

            The companion tool integration is evolving toward a pluggable model: bring your own search engine, bring your own code intelligence. The current integration is MCP-based and limited to Gemini Search and GitNexus. If you use a different search or code intelligence tool, skills will degrade gracefully to built-in capabilities.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#next-up","level":2,"title":"Next Up","text":"

            Keeping Context in a Separate Repo →: Store context files outside the project tree for multi-repo or open source setups.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle recipe
            • Multilingual Session Parsing: configure session header prefixes for other languages
            • CLI Reference: all commands and flags
            • Integrations: detailed per-tool integration docs
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multilingual-sessions/","level":1,"title":"Multilingual Session Parsing","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#the-problem","level":2,"title":"The Problem","text":"

            Your team works across languages. Session files written by AI tools might use headers like # Oturum: 2026-01-15 - API Düzeltme (Turkish) or # セッション: 2026-01-15 - テスト (Japanese) instead of # Session: 2026-01-15 - Fix API.

            By default, ctx only recognizes Session: as a session header prefix. Files with other prefixes are silently skipped during journal import and journal generation: They look like regular Markdown, not sessions.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#tldr","level":2,"title":"TL;DR","text":"

            Add recognized prefixes to .ctxrc:

            session_prefixes:\n  - \"Session:\"      # English (include to keep default)\n  - \"Oturum:\"       # Turkish\n  - \"セッション:\"     # Japanese\n

            Restart your session. All configured prefixes are now recognized.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#how-it-works","level":2,"title":"How It Works","text":"

            The Markdown session parser detects session files by looking for an H1 header that starts with a known prefix followed by a date:

            # Session: 2026-01-15 - Fix API Rate Limiting\n# Oturum: 2026-01-15 - API Düzeltme\n# セッション: 2026-01-15 - テスト\n

            The list of recognized prefixes comes from session_prefixes in .ctxrc. When the key is absent or empty, ctx falls back to the built-in default: [\"Session:\"].

            Date-only headers (# 2026-01-15 - Morning Work) are always recognized regardless of prefix configuration.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#configuration","level":2,"title":"Configuration","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#adding-a-language","level":3,"title":"Adding a Language","text":"

            Add the prefix with a trailing colon to your .ctxrc:

            session_prefixes:\n  - \"Session:\"\n  - \"Sesión:\"       # Spanish\n

            Include Session: Explicitly

            When you override session_prefixes, the default is replaced, not extended. If you still want English headers recognized, include \"Session:\" in your list.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#team-setup","level":3,"title":"Team Setup","text":"

            Commit .ctxrc to the repo so all team members share the same prefix list. This ensures ctx journal import and journal generation pick up sessions from all team members regardless of language.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#common-prefixes","level":3,"title":"Common Prefixes","text":"Language Prefix English Session: Turkish Oturum: Spanish Sesión: French Session: German Sitzung: Japanese セッション: Korean 세션: Portuguese Sessão: Chinese 会话:","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#verifying","level":3,"title":"Verifying","text":"

            After configuring, test with ctx journal source. Sessions with the new prefixes should appear in the output.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#what-this-does-not-do","level":2,"title":"What This Does NOT Do","text":"
            • Change the interface language: ctx output is always English. This setting only controls which session files ctx can parse.
            • Generate headers: ctx never writes session headers. The prefix list is recognition-only (input, not output).
            • Affect JSONL sessions: Claude Code JSONL transcripts don't use header prefixes. This only applies to Markdown session files in .context/sessions/.
            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#see-also","level":2,"title":"See Also","text":"

            See also: Setup Across AI Tools - complete multi-tool setup including Markdown session configuration.

            See also: CLI Reference - full .ctxrc field reference including session_prefixes.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/parallel-worktrees/","level":1,"title":"Parallel Agent Development with Git Worktrees","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-problem","level":2,"title":"The Problem","text":"

            You have a large backlog (10, 20, 30 open tasks) and many of them are independent: docs work that doesn't touch Go code, a new package that doesn't overlap with existing ones, test coverage for a stable module.

            Running one agent at a time means serial execution. You want 3-4 agents working in parallel, each on its own track, without stepping on each other's files.

            Git worktrees solve this.

            Each worktree is a separate working directory with its own branch, but they share the same .git object database. Combined with ctx's persistent context, each agent session picks up the full project state and works independently.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-worktree                                   # 1. group tasks by file overlap\ngit worktree add ../myproject-docs -b work/docs # 2. create worktrees\ncd ../myproject-docs && claude                  # 3. launch agents (one per track)\n/ctx-worktree teardown docs                     # 4. merge back and clean up\n

            TASKS.md will conflict on merge: Accept all [x] completions from both sides.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-worktree Skill Create, list, and tear down worktrees /ctx-next Skill Pick tasks from the backlog for each track git worktree Command Underlying git worktree management git merge Command Merge completed tracks back to main","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-1-assess-the-backlog","level":3,"title":"Step 1: Assess the Backlog","text":"

            Start in your main checkout. Ask the agent to analyze your tasks and group them by blast radius: which files and directories each task touches.

            /ctx-worktree\nLook at TASKS.md and group the pending tasks into 2-3 independent\ntracks based on which files they'd touch. Show me the grouping\nbefore creating anything.\n

            The agent reads TASKS.md, estimates file overlap, and proposes groups:

            Proposed worktree groups:\n\n  work/docs   # recipe updates, blog post (touches: docs/)\n  work/crypto # scratchpad encryption infra (touches: internal/crypto/)\n  work/tests  # journal test coverage (touches: internal/cli/journal/)\n
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-2-create-the-worktrees","level":3,"title":"Step 2: Create the Worktrees","text":"

            Once you approve the grouping, the agent creates worktrees as sibling directories:

            Create the worktrees for those three groups.\n

            Behind the scenes:

            git worktree add ../myproject-docs -b work/docs\ngit worktree add ../myproject-crypto -b work/crypto\ngit worktree add ../myproject-tests -b work/tests\n

            Each worktree is a full working copy on its own branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-3-launch-agents","level":3,"title":"Step 3: Launch Agents","text":"

            Open a separate terminal (or editor window) for each worktree and start a Claude Code session:

            # Terminal 1\ncd ../myproject-docs\nclaude\n\n# Terminal 2\ncd ../myproject-crypto\nclaude\n\n# Terminal 3\ncd ../myproject-tests\nclaude\n

            Each agent sees the full project, including .context/, and can work independently.

            Do Not Initialize Context in Worktrees

            Do not run ctx init in worktrees: The .context directory is already tracked in git.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-4-work","level":3,"title":"Step 4: Work","text":"

            Each agent works through its assigned tasks. They can read TASKS.md to know what's assigned to their track, use /ctx-next to pick the next item, and commit normally on their work/* branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-5-merge-back","level":3,"title":"Step 5: Merge Back","text":"

            As each track finishes, return to the main checkout and merge:

            /ctx-worktree teardown docs\n

            The agent checks for uncommitted changes, merges work/docs into your current branch, removes the worktree, and deletes the branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-6-handle-tasksmd-conflicts","level":3,"title":"Step 6: Handle TASKS.md Conflicts","text":"

            TASKS.md will almost always conflict when merging: Multiple agents will mark different tasks as [x]. This is expected and easy to resolve:

            Accept all completions from both sides. No task should go from [x] back to [ ]. The merge resolution is always additive.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-7-cleanup","level":3,"title":"Step 7: Cleanup","text":"

            After all tracks are merged, verify everything is clean:

            /ctx-worktree list\n

            Should show only the main working tree. All work/* branches should be gone.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't have to use the skill directly for every step. These natural prompts work:

            • \"I have a big backlog. Can we split it across worktrees?\"
            • \"Which of these tasks can run in parallel without conflicts?\"
            • \"Merge the docs track back in.\"
            • \"Clean up all the worktrees, we're done.\"
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#what-works-differently-in-worktrees","level":2,"title":"What Works Differently in Worktrees","text":"

            The encryption key lives at ~/.ctx/.ctx.key (user-level, outside the project). Because all worktrees on the same machine share this path, ctx pad and ctx hook notify work in worktrees automatically - no special setup needed.

            One thing to watch:

            • Journal enrichment: ctx journal import and ctx journal enrich write files relative to the current working directory. Enrichments created in a worktree stay there and are discarded on teardown. Enrich journals on the main branch after merging: the JSONL session logs are always intact, and you don't lose any data.

            Context Files Will Merge Just Fine

            Tracked context files (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) work normally; git handles them.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tips","level":2,"title":"Tips","text":"
            • 3-4 worktrees max. Beyond that, merge complexity outweighs the parallelism benefit. The skill enforces this limit.
            • Group by package or directory, not by priority. Two high-priority tasks that touch the same files must be in the same track.
            • TASKS.md will conflict on merge. This is normal. Accept all [x] completions: The resolution is always additive.
            • Don't run ctx init in worktrees. The .context/ directory is tracked in git. Running init overwrites shared context files.
            • Name worktrees by concern, not by number. work/docs and work/crypto are more useful than work/track-1 and work/track-2.
            • Commit frequently in each worktree. Smaller commits make merge conflicts easier to resolve.
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#next-up","level":2,"title":"Next Up","text":"

            Back to the beginning: Guide Your Agent →

            Or explore the full recipe list.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#see-also","level":2,"title":"See Also","text":"
            • Running an Unattended AI Agent: for serial autonomous loops instead of parallel tracks
            • Tracking Work Across Sessions: managing the task backlog that feeds into parallelization
            • The Complete Session: the complete session workflow end-to-end, with examples
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/permission-snapshots/","level":1,"title":"Permission Snapshots","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code's .claude/settings.local.json accumulates one-off permissions every time you click \"Allow\". After busy sessions the file is full of session-specific entries that expand the agent's surface area beyond intent.

            Since settings.local.json is .gitignored, there is no PR review or CI check. The file drifts independently on every machine, and there is no built-in way to reset to a known-good state.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-permission-sanitize               # audit for dangerous patterns\nctx permission snapshot            # save golden image\n# ... sessions accumulate cruft ...\nctx permission restore             # reset to golden state\n
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-solution","level":2,"title":"The Solution","text":"

            Save a curated settings.local.json as a golden image, then restore from it to drop session-accumulated permissions. The golden file (.claude/settings.golden.json) is committed to version control and shared with the team.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx permission snapshot Save settings.local.json as golden image ctx permission restore Reset settings.local.json from golden image /ctx-permission-sanitize Audit for dangerous patterns before snapshotting","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#step-by-step","level":2,"title":"Step by Step","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#1-curate-your-permissions","level":3,"title":"1. Curate Your Permissions","text":"

            Start with a clean settings.local.json. Optionally run /ctx-permission-sanitize to remove dangerous patterns first.

            Review the file manually. Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

            See the Permission Hygiene recipe for recommended defaults.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#2-take-a-snapshot","level":3,"title":"2. Take a Snapshot","text":"
            ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n

            This creates a byte-for-byte copy. No re-encoding, no indent changes.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#3-commit-the-golden-file","level":3,"title":"3. Commit the Golden File","text":"
            git add .claude/settings.golden.json\ngit commit -m \"Add permission golden image\"\n

            The golden file is not gitignored (unlike settings.local.json). This is intentional: it becomes a team-shared baseline.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#4-auto-restore-at-the-session-start","level":3,"title":"4. Auto-Restore at the Session Start","text":"

            Add this instruction to your CLAUDE.md:

            ## On Session Start\n\nRun `ctx permission restore` to reset permissions to the golden image.\n

            The agent will restore the golden image at the start of every session, automatically dropping any permissions accumulated during previous sessions.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#5-update-when-intentional-changes-are-made","level":3,"title":"5. Update When Intentional Changes Are Made","text":"

            When you add a new permanent permission (not a one-off debugging entry):

            # Edit settings.local.json with the new permission\n# Then update the golden image:\nctx permission snapshot\ngit add .claude/settings.golden.json\ngit commit -m \"Update permission golden image: add cargo test\"\n
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember exact commands. These natural-language prompts work with agents trained on the ctx playbook:

            What you say What happens \"Save my current permissions as baseline\" Agent runs ctx permission snapshot \"Reset permissions to the golden image\" Agent runs ctx permission restore \"Clean up my permissions\" Agent runs /ctx-permission-sanitize then snapshot \"What permissions did I accumulate?\" Agent diffs local vs golden","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#next-up","level":2,"title":"Next Up","text":"

            Turning Activity into Content →: Generate blog posts, changelogs, and journal sites from your project activity.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#see-also","level":2,"title":"See Also","text":"
            • Permission Hygiene: recommended defaults and maintenance workflow
            • CLI Reference: ctx permission: full command documentation
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/publishing/","level":1,"title":"Turning Activity into Content","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-problem","level":2,"title":"The Problem","text":"

            Your .context/ directory is full of decisions, learnings, and session history.

            Your git log tells the story of a project evolving.

            But none of this is visible to anyone outside your terminal.

            You want to turn this raw activity into:

            • a browsable journal site,
            • blog posts,
            • changelog posts.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tldr","level":2,"title":"TL;DR","text":"
            ctx journal import --all             # 1. import sessions to markdown\n\n/ctx-journal-enrich-all             # 2. add metadata and tags\n\nctx journal site --serve            # 3. build and serve the journal\n\n/ctx-blog about the caching layer   # 4. draft a blog post\n/ctx-blog-changelog v0.1.0 \"v0.2\"   # 5. write a changelog post\n

            Read on for details on each stage.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal import Command Import session JSONL to editable markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx serve Command Serve any zensical directory (default: journal) ctx site feed Command Generate Atom feed from finalized blog posts make journal Makefile Shortcut for import + site rebuild /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich (recommended) /ctx-journal-enrich Skill Add metadata, summaries, and tags to one entry /ctx-blog Skill Draft a blog post from recent project activity /ctx-blog-changelog Skill Write a themed post from a commit range","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-1-import-sessions-to-markdown","level":3,"title":"Step 1: Import Sessions to Markdown","text":"

            Raw session data lives as JSONL files in Claude Code's internal storage. The first step is converting these into readable, editable markdown.

            # Import all sessions from the current project\nctx journal import --all\n\n# Import from all projects (if you work across multiple repos)\nctx journal import --all --all-projects\n\n# Import a single session by ID or slug\nctx journal import abc123\nctx journal import gleaming-wobbling-sutherland\n

            Imported files land in .context/journal/ as individual Markdown files with session metadata and the full conversation transcript.

            --all is safe by default: Only new sessions are imported. Existing files are skipped. Use --regenerate to re-import existing files (YAML frontmatter is preserved). Use --regenerate --keep-frontmatter=false -y to regenerate everything including frontmatter.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-2-enrich-entries-with-metadata","level":3,"title":"Step 2: Enrich Entries with Metadata","text":"

            Raw entries have timestamps and conversations but lack the structured metadata that makes a journal searchable. Use /ctx-journal-enrich-all to process your entire backlog at once:

            /ctx-journal-enrich-all\n

            The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

            For large backlogs (20+ entries), it can spawn subagents to process entries in parallel.

            To enrich a single entry instead:

            /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich 2026-01-24\n

            After enrichment, an entry gains YAML frontmatter:

            ---\ntitle: \"Implement Redis caching for API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n

            This metadata powers better navigation in the journal site:

            • titles replace slugs,
            • summaries appear in the index,
            • and search covers topics and technologies.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-3-generate-the-journal-site","level":3,"title":"Step 3: Generate the Journal Site","text":"

            With entries exported and enriched, generate the static site:

            # Generate site files\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally (opens at http://localhost:8000)\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

            The site is generated in .context/journal-site/ by default. It uses zensical for static site generation (pipx install zensical).

            Or use the Makefile shortcut that combines export and rebuild:

            make journal\n

            This runs ctx journal import --all followed by ctx journal site --build, then reminds you to enrich before rebuilding. To serve the built site, use make journal-serve or ctx serve (serve-only, no regeneration).

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#alternative-export-to-obsidian-vault","level":3,"title":"Alternative: Export to Obsidian Vault","text":"

            If you use Obsidian for knowledge management, generate a vault instead of (or alongside) the static site:

            ctx journal obsidian\nctx journal obsidian --output ~/vaults/ctx-journal\n

            This produces an Obsidian-ready directory with wikilinks, MOC (Map of Content) pages for topics/files/types, and a \"Related Sessions\" footer on each entry for graph connectivity. Open the output directory in Obsidian as a vault.

            The vault uses the same enriched source entries as the static site. Both outputs can coexist: The static site goes to .context/journal-site/, the vault to .context/journal-obsidian/.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-4-draft-blog-posts-from-activity","level":3,"title":"Step 4: Draft Blog Posts from Activity","text":"

            When your project reaches a milestone worth sharing, use /ctx-blog to draft a post from recent activity. The skill gathers context from multiple sources: git log, DECISIONS.md, LEARNINGS.md, completed tasks, and journal entries.

            /ctx-blog about the caching layer we just built\n/ctx-blog last week's refactoring work\n/ctx-blog lessons learned from the migration\n

            The skill gathers recent commits, decisions, and learnings; identifies a narrative arc; drafts an outline for approval; writes the full post; and saves it to docs/blog/YYYY-MM-DD-slug.md.

            Posts are written in first person with code snippets, commit references, and an honest discussion of what went wrong.

            The Output Is zensical-Flavored Markdown

            The blog skills produce Markdown tuned for a zensical site: topics: frontmatter (zensical's tag field), a docs/blog/ output path, and a banner image reference.

            The content is still standard Markdown and can be adapted to other static site generators, but the defaults assume a zensical project structure.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-5-write-changelog-posts-from-commit-ranges","level":3,"title":"Step 5: Write Changelog Posts from Commit Ranges","text":"

            For release notes or \"what changed\" posts, /ctx-blog-changelog takes a starting commit and a theme, then analyzes everything that changed:

            /ctx-blog-changelog 040ce99 \"building the journal system\"\n/ctx-blog-changelog HEAD~30 \"what's new in v0.2.0\"\n/ctx-blog-changelog v0.1.0 \"the road to v0.2.0\"\n

            The skill diffs the commit range, identifies the most-changed files, and constructs a narrative organized by theme rather than chronology, including a key commits table and before/after comparisons.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-6-generate-the-blog-feed","level":3,"title":"Step 6: Generate the Blog Feed","text":"

            After publishing blog posts, generate the Atom feed so readers and automation can discover new content:

            ctx site feed\n

            This scans docs/blog/ for finalized posts (reviewed_and_finalized: true), extracts title, date, author, topics, and summary, and writes a valid Atom 1.0 feed to site/feed.xml. The feed is also generated automatically as part of make site.

            The feed is available at ctx.ist/feed.xml.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-conversational-approach","level":2,"title":"The Conversational Approach","text":"

            You can also drive your publishing anytime with natural language:

            \"write about what we did this week\"\n\"turn today's session into a blog post\"\n\"make a changelog post covering everything since the last release\"\n\"enrich the last few journal entries\"\n

            The agent has full visibility into your .context/ state (tasks completed, decisions recorded, learnings captured), so its suggestions are grounded in what actually happened.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            The full pipeline from raw transcripts to published content:

            # 1. Import all sessions\nctx journal import --all\n\n# 2. In Claude Code: enrich all entries with metadata\n/ctx-journal-enrich-all\n\n# 3. Build and serve the journal site\nmake journal\nmake journal-serve\n\n# 3b. Or generate an Obsidian vault\nctx journal obsidian\n\n# 4. In Claude Code: draft a blog post\n/ctx-blog about the features we shipped this week\n\n# 5. In Claude Code: write a changelog post\n/ctx-blog-changelog v0.1.0 \"what's new in v0.2.0\"\n

            The journal pipeline is idempotent at every stage. You can rerun ctx journal import --all without losing enrichment. You can rebuild the site as many times as you want.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tips","level":2,"title":"Tips","text":"
            • Import regularly. Run ctx journal import --all after each session to keep your journal current. Only new sessions are imported: Existing files are skipped by default.
            • Use batch enrichment. /ctx-journal-enrich-all filters noise (suggestion sessions, trivial sessions, multipart continuations) so you do not have to decide what is worth enriching.
            • Keep journal files in .gitignore. Session journals can contain sensitive data: file contents, commands, internal discussions, and error messages with stack traces. Add .context/journal/ and .context/journal-site/ to .gitignore.
            • Use /ctx-blog for narrative posts and /ctx-blog-changelog for release posts. One finds a story in recent activity, the other explains a commit range by theme.
            • Edit the drafts. These skills produce drafts, not final posts. Review the narrative, add your perspective, and remove anything that does not serve the reader.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#next-up","level":2,"title":"Next Up","text":"

            Running an Unattended AI Agent →: Set up an AI agent that works through tasks overnight without you at the keyboard.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#see-also","level":2,"title":"See Also","text":"
            • Session Journal: journal system, enrichment schema
            • CLI Reference: ctx journal: import, list, show session history
            • CLI Reference: ctx journal site: static site generation
            • CLI Reference: ctx journal obsidian: Obsidian vault export
            • CLI Reference: ctx serve: serve-only (no regeneration)
            • Browsing and Enriching Past Sessions: journal browsing workflow
            • The Complete Session: capturing context during a session
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/scratchpad-sync/","level":1,"title":"Syncing Scratchpad Notes Across Machines","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-problem","level":2,"title":"The Problem","text":"

            You work from multiple machines: a desktop and a laptop, or a local machine and a remote dev server.

            The scratchpad entries are encrypted. The ciphertext (.context/scratchpad.enc) travels with git, but the encryption key lives outside the project at ~/.ctx/.ctx.key and is never committed. Without the key on each machine, you cannot read or write entries.

            How do you distribute the key and keep the scratchpad in sync?

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                                                  # 1. generates key\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key        # 2. copy key\nchmod 600 ~/.ctx/.ctx.key                                 # 3. secure it\n# Normal git push/pull syncs the encrypted scratchpad.enc\n# On conflict: ctx pad resolve → rebuild → git add + commit\n

            Finding Your Key File

            The key is always at ~/.ctx/.ctx.key - one key, one machine.

            Treat the Key like a Password

            The scratchpad key is the only thing protecting your encrypted entries.

            Store a backup in a secure enclave such as a password manager, and treat it with the same care you would give passwords, certificates, or API tokens.

            Anyone with the key can decrypt every scratchpad entry.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context (generates the key automatically) ctx pad add CLI command Add a scratchpad entry ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad edit CLI command Edit a scratchpad entry ctx pad resolve CLI command Show both sides of a merge conflict ctx pad merge CLI command Merge entries from other scratchpad files ctx pad import CLI command Bulk-import lines from a file ctx pad export CLI command Export blob entries to a directory scp Shell Copy the key file between machines git push / git pull Shell Sync the encrypted file via git /ctx-pad Skill Natural language interface to pad commands","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-1-initialize-on-machine-a","level":3,"title":"Step 1: Initialize on Machine A","text":"

            Run ctx init on your first machine. The key is created automatically at ~/.ctx/.ctx.key:

            ctx init\n# ...\n# Created ~/.ctx/.ctx.key (0600)\n# Created .context/scratchpad.enc\n

            The key lives outside the project directory and is never committed. The .enc file is tracked in git.

            Key Folder Change (v0.7.0+)

            If you built ctx from source or upgraded past v0.6.0, the key location changed to ~/.ctx/.ctx.key. Check these legacy folders and copy your key manually:

            # Old locations (pick whichever exists)\nls ~/.local/ctx/keys/        # pre-v0.7.0 user-level\nls .context/.ctx.key         # pre-v0.6.0 project-local\n\n# Copy to the new location\nmkdir -p ~/.ctx && chmod 700 ~/.ctx\ncp <old-key-path> ~/.ctx/.ctx.key\nchmod 600 ~/.ctx/.ctx.key\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-2-copy-the-key-to-machine-b","level":3,"title":"Step 2: Copy the Key to Machine B","text":"

            Use any secure transfer method. The key is always at ~/.ctx/.ctx.key:

            # scp - create the target directory first\nssh user@machine-b \"mkdir -p ~/.ctx && chmod 700 ~/.ctx\"\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key\n\n# Or use a password manager, USB drive, etc.\n

            Set permissions on Machine B:

            chmod 600 ~/.ctx/.ctx.key\n

            Secure the Transfer

            The key is a raw 256-bit AES key. Anyone with the key can decrypt the scratchpad. Use an encrypted channel (SSH, password manager, vault).

            Never paste it in plaintext over email or chat.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-3-normal-pushpull-workflow","level":3,"title":"Step 3: Normal Push/Pull Workflow","text":"

            The encrypted file is committed, so standard git sync works:

            # Machine A: add entries and push\nctx pad add \"staging API key: sk-test-abc123\"\ngit add .context/scratchpad.enc\ngit commit -m \"Update scratchpad\"\ngit push\n\n# Machine B: pull and read\ngit pull\nctx pad\n#   1. staging API key: sk-test-abc123\n

            Both machines have the same key, so both can decrypt the same .enc file.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-4-read-and-write-from-either-machine","level":3,"title":"Step 4: Read and Write from Either Machine","text":"

            Once the key is distributed, all ctx pad commands work identically on both machines. Entries added on Machine A are visible on Machine B after a git pull, and vice versa.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-5-handle-merge-conflicts","level":3,"title":"Step 5: Handle Merge Conflicts","text":"

            If both machines add entries between syncs, pulling will create a merge conflict on .context/scratchpad.enc. Git cannot merge binary (encrypted) content automatically.

            The fastest approach is ctx pad merge: It reads both conflict sides, deduplicates, and writes the union:

            # Extract theirs to a temp file, then merge it in\ngit show :3:.context/scratchpad.enc > /tmp/theirs.enc\ngit checkout --ours .context/scratchpad.enc\nctx pad merge /tmp/theirs.enc\n\n# Done: Commit the resolved scratchpad:\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n

            Alternatively, use ctx pad resolve to inspect both sides manually:

            ctx pad resolve\n# === Ours (this machine) ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n#\n# === Theirs (incoming) ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n

            Then reconstruct the merged scratchpad:

            # Start fresh with all entries from both sides\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\n# Mark the conflict resolved\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#merge-conflict-walkthrough","level":2,"title":"Merge Conflict Walkthrough","text":"

            Here's a full scenario showing how conflicts arise and how to resolve them:

            1. Both machines start in sync (1 entry):

            Machine A: 1. staging API key: sk-test-abc123\nMachine B: 1. staging API key: sk-test-abc123\n

            2. Both add entries independently:

            Machine A adds: \"check DNS after deploy\"\nMachine B adds: \"new endpoint: api.example.com/v2\"\n

            3. Machine A pushes first. Machine B pulls and gets a conflict:

            git pull\n# CONFLICT (content): Merge conflict in .context/scratchpad.enc\n

            4. Machine B runs ctx pad resolve:

            ctx pad resolve\n# === Ours ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n#\n# === Theirs ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n

            5. Rebuild with entries from both sides and commit:

            # Clear and rebuild (or use the skill to guide you)\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\ngit add .context/scratchpad.enc\ngit commit -m \"Merge scratchpad: keep entries from both machines\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#conversational-approach","level":3,"title":"Conversational Approach","text":"

            When working with an AI assistant, you can resolve conflicts naturally:

            You: \"I have a scratchpad merge conflict. Can you resolve it?\"\n\nAgent: \"Let me extract theirs and merge it in.\"\n       [runs git show :3:.context/scratchpad.enc > /tmp/theirs.enc]\n       [runs git checkout --ours .context/scratchpad.enc]\n       [runs ctx pad merge /tmp/theirs.enc]\n       \"Merged 2 new entries (1 duplicate skipped). Want me to\n       commit the resolution?\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tips","level":2,"title":"Tips","text":"
            • Back up the key: If you lose it, you lose access to all encrypted entries. Store a copy in your password manager.
            • One key per project: Each ctx init generates a unique key. Don't reuse keys across projects.
            • Keys work in worktrees: Because the key lives at ~/.ctx/.ctx.key (outside the project), git worktrees on the same machine share the key automatically. No special setup needed.
            • Plaintext fallback for non-sensitive projects: If encryption adds friction and you have nothing sensitive, set scratchpad_encrypt: false in .ctxrc. Merge conflicts become trivial text merges.
            • Never commit the key: The key is stored outside the project at ~/.ctx/.ctx.key and should never be copied into the repository.
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#next-up","level":2,"title":"Next Up","text":"

            Hook Output Patterns →: Choose the right output pattern for your Claude Code hooks.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#see-also","level":2,"title":"See Also","text":"
            • Scratchpad: feature overview, all commands, when to use scratchpad vs context files
            • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-with-claude/","level":1,"title":"Using the Scratchpad","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-problem","level":2,"title":"The Problem","text":"

            During a session you accumulate quick notes, reminders, intermediate values, and sometimes sensitive tokens. They don't fit TASKS.md (not work items) or DECISIONS.md (not decisions). They don't have the structured fields that LEARNINGS.md requires.

            Without somewhere to put them, they get lost between sessions.

            How do you capture working memory that persists across sessions without polluting your structured context files?

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tldr","level":2,"title":"TL;DR","text":"
            ctx pad add \"check DNS propagation after deploy\"\nctx pad         # list entries\nctx pad show 1  # print entry (pipe-friendly)\n

            Entries are encrypted at rest and travel with git.

            Use the /ctx-pad skill to manage entries from inside your AI session.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx pad CLI command List all scratchpad entries ctx pad show N CLI command Output raw text of entry N (pipe-friendly) ctx pad add CLI command Add a new entry ctx pad edit CLI command Replace, append to, or prepend to an entry ctx pad add --file CLI command Ingest a file as a blob entry ctx pad show N --out CLI command Extract a blob entry to a file ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad normalize CLI command Reassign entry IDs as 1..N ctx pad mv CLI command Reorder entries ctx pad --tag CLI command Filter entries by tag ctx pad tags CLI command List all tags with counts ctx pad import CLI command Bulk-import lines from a file (or stdin) ctx pad export CLI command Export all blob entries to a directory /ctx-pad Skill Natural language interface to all pad commands","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-1-add-a-note-naturally","level":3,"title":"Step 1: Add a Note Naturally","text":"

            You don't need to remember any syntax. Just tell your assistant what to jot down:

            You: \"jot down: check DNS propagation after deploy\"\n\nAgent: \"Added to scratchpad:\n  1. check DNS propagation after deploy\"\n

            Behind the scenes, the agent runs ctx pad add \"check DNS propagation after deploy\".

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-2-view-your-scratchpad","level":3,"title":"Step 2: View Your Scratchpad","text":"
            You: \"show my scratchpad\"\n\nAgent: \"Your scratchpad has 3 entries:\n  1. check DNS propagation after deploy\n  2. staging API endpoint: api.staging.example.com/v2\n  3. retry limit should be 5, not 3\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-3-edit-an-entry","level":3,"title":"Step 3: Edit an Entry","text":"
            You: \"update entry 2 to include the new endpoint\"\n\nAgent: \"Updated entry 2:\n  was: staging API endpoint: api.staging.example.com/v2\n  now: staging API endpoint: api.prod.example.com/v2\"\n

            For partial edits, the agent can use --append or --prepend:

            You: \"append the port number to entry 2\"\n\nAgent: \"Updated entry 2:\n  staging API endpoint: api.prod.example.com/v2 :8443\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-4-compose-entries","level":3,"title":"Step 4: Compose Entries","text":"

            Use ctx pad show N to pipe entry content into other operations:

            You: \"append the content of entry 3 to entry 1\"\n\nAgent: [runs ctx pad edit 1 --append \"$(ctx pad show 3)\"]\n       \"Done. Entry 1 is now:\n        check DNS propagation after deploy retry limit should be 5, not 3\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-5-reorder-by-priority","level":3,"title":"Step 5: Reorder by Priority","text":"
            You: \"move entry 4 to the top\"\n\nAgent: [runs ctx pad mv 4 1]\n       \"Moved entry 4 to position 1. Scratchpad reordered.\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-6-clean-up","level":3,"title":"Step 6: Clean Up","text":"
            You: \"remove entry 2, it's done\"\n\nAgent: [runs ctx pad rm 2]\n       \"Removed entry 2. 3 entries remaining.\n        (IDs are stable; remaining entries keep their IDs.)\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-7-store-a-file-as-a-blob","level":3,"title":"Step 7: Store a File as a Blob","text":"

            The scratchpad can hold small files (up to 64 KB) as encrypted blob entries. The file is base64-encoded and stored alongside a label you provide:

            # Ingest a file: the first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# List shows the label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-8-extract-a-blob","level":3,"title":"Step 8: Extract a Blob","text":"

            Use show --out to write the decoded file back to disk:

            # Write blob entry to a file\nctx pad show 2 --out ./recovered-deploy.yaml\n\n# Or print to stdout (for piping)\nctx pad show 2 | head -5\n

            Blob entries are encrypted identically to text entries: They're just base64-encoded before encryption. The --out flag decodes and writes the raw bytes.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-9-bulk-import-notes","level":3,"title":"Step 9: Bulk Import Notes","text":"

            When you have a file with many notes (one per line), import them in bulk instead of adding one at a time:

            # Import from a file: Each non-empty line becomes an entry\nctx pad import notes.txt\n\n# Or pipe from stdin\ngrep TODO *.go | ctx pad import -\n

            All entries are written in a single encrypt/write cycle, regardless of how many lines the file contains.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-10-export-blobs-to-disk","level":3,"title":"Step 10: Export Blobs to Disk","text":"

            Export all blob entries to a directory as individual files. Each blob's label becomes the filename:

            # Export to a directory (created if needed)\nctx pad export ./ideas\n\n# Preview what would be exported\nctx pad export --dry-run ./ideas\n\n# Force overwrite existing files\nctx pad export --force ./backup\n

            When a file already exists, a unix timestamp is prepended to the filename to avoid collisions. Use --force to overwrite instead.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-11-tag-entries-for-organization","level":3,"title":"Step 11: Tag Entries for Organization","text":"

            Tags let you categorize entries without any structure beyond a #word token in the text. Add them when creating or editing entries:

            You: \"jot down: check DNS propagation #later\"\nYou: \"tag entry 2 as urgent\"\n\nAgent: [runs ctx pad edit 2 --tag urgent]\n       \"Updated entry 2.\"\n

            Filter your scratchpad by tag:

            You: \"show me everything tagged later\"\n\nAgent: [runs ctx pad --tag later]\n       \"  1. check DNS propagation #later\n        3. review PR feedback #later #ci\"\n

            Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry regardless of deletions or active filters. Use ctx pad normalize to reassign IDs as 1..N.

            Exclude a tag with ~:

            ctx pad --tag ~later         # everything NOT tagged #later\nctx pad --tag later --tag ci # entries with BOTH tags (AND logic)\n

            See what tags you're using:

            You: \"what tags do I have?\"\n\nAgent: [runs ctx pad tags]\n       \"ci       1\n        later    2\n        urgent   1\"\n

            Tags work on blob entries too; they're extracted from the label:

            ctx pad add \"deploy config #prod\" --file ./deploy.yaml\nctx pad --tag prod\n#   1. deploy config #prod [BLOB]\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#using-ctx-pad-in-a-session","level":2,"title":"Using /ctx-pad in a Session","text":"

            Invoke the /ctx-pad skill first, then describe what you want in natural language. Without the skill prefix, the agent may route your request to TASKS.md or another context file instead of the scratchpad.

            You: /ctx-pad jot down: check DNS after deploy\nYou: /ctx-pad show my scratchpad\nYou: /ctx-pad delete entry 3\n

            Once the skill is active, it translates intent into commands:

            You say (after /ctx-pad) What the agent does \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"remember this: retry limit is 5\" ctx pad add \"retry limit is 5\" \"show my scratchpad\" / \"what's on my pad\" ctx pad \"show me entry 3\" ctx pad show 3 \"delete the third one\" / \"remove entry 3\" ctx pad rm 3 \"remove entries 3 through 5\" ctx pad rm 3-5 \"renumber my scratchpad\" ctx pad normalize \"change entry 2 to ...\" ctx pad edit 2 \"new text\" \"append ' +important' to entry 3\" ctx pad edit 3 --append \" +important\" \"prepend 'URGENT:' to entry 1\" ctx pad edit 1 --prepend \"URGENT: \" \"prioritize entry 4\" / \"move to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./ideas\" ctx pad export ./ideas \"show entries tagged later\" ctx pad --tag later \"show everything except later\" ctx pad --tag ~later \"what tags do I have\" ctx pad tags \"tag entry 5 as urgent\" ctx pad edit 5 --tag urgent

            When in Doubt, Use the CLI Directly

            The ctx pad commands work the same whether you run them yourself or let the skill invoke them.

            If the agent misroutes a request, fall back to ctx pad add \"...\" in your terminal.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#when-to-use-scratchpad-vs-context-files","level":2,"title":"When to Use Scratchpad vs Context Files","text":"Situation Use Temporary reminders (\"check X after deploy\") Scratchpad Session-start reminders (\"remind me next session\") ctx remind Working values during debugging (ports, endpoints, counts) Scratchpad Sensitive tokens or API keys (short-term storage) Scratchpad Quick notes that don't fit anywhere else Scratchpad Work items with completion tracking TASKS.md Trade-offs between alternatives with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

            Decision Guide

            • If it has structured fields (context, rationale, lesson, application), it belongs in a context file like DECISIONS.md or LEARNINGS.md.
            • If it's a work item you'll mark done, it belongs in TASKS.md.
            • If you want a message relayed VERBATIM at the next session start, it belongs in ctx remind.
            • If it's a quick note, reminder, or working value (especially if it's sensitive or ephemeral) it belongs on the scratchpad.

            Scratchpad Is Not a Junk Drawer

            The scratchpad is for working memory, not long-term storage.

            If a note is still relevant after several sessions, promote it:

            A persistent reminder becomes a task, a recurring value becomes a convention, a hard-won insight becomes a learning.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tips","level":2,"title":"Tips","text":"
            • Entries persist across sessions: The scratchpad is committed (encrypted) to git, so entries survive session boundaries. Pick up where you left off.
            • Entries are numbered and reorderable: Use ctx pad mv to put high-priority items at the top.
            • ctx pad show N enables unix piping: Output raw entry text with no numbering prefix. Compose with --append, --prepend, or other shell tools.
            • Never mention the key file contents to the AI: The agent knows how to use ctx pad commands but should never read or print the encryption key (~/.ctx/.ctx.key) directly.
            • Encryption is transparent: You interact with plaintext; the encryption/decryption happens automatically on every read/write.
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#next-up","level":2,"title":"Next Up","text":"

            Syncing Scratchpad Notes Across Machines →: Distribute encryption keys and scratchpad data across environments.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#see-also","level":2,"title":"See Also","text":"
            • Scratchpad: feature overview, all commands, encryption details, plaintext override
            • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
            • The Complete Session: full session lifecycle showing how the scratchpad fits into the broader workflow
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/session-archaeology/","level":1,"title":"Browsing and Enriching Past Sessions","text":"","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-problem","level":2,"title":"The Problem","text":"

            After weeks of AI-assisted development you have dozens of sessions scattered across JSONL files in ~/.claude/projects/. Finding the session where you debugged the Redis connection pool, or remembering what you decided about the caching strategy three Tuesdays ago, often means grepping raw JSON.

            There is no table of contents, no search, and no summaries.

            This recipe shows how to turn that raw session history into a browsable, searchable, and enriched journal site you can navigate in your browser.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tldr","level":2,"title":"TL;DR","text":"

            Export and Generate

            ctx journal import --all\nctx journal site --serve\n

            Enrich

            /ctx-journal-enrich-all\n

            Rebuild

            ctx journal site --serve\n

            Read on for what each stage does and why.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal source Command List parsed sessions with metadata ctx journal source --show Command Inspect a specific session in detail ctx journal import Command Import sessions to editable journal Markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx journal schema check Command Validate JSONL files and report schema drift ctx journal schema dump Command Print the embedded JSONL schema definition ctx serve Command Serve any zensical directory (default: journal) /ctx-history Skill Browse sessions inside your AI assistant /ctx-journal-enrich Skill Add frontmatter metadata to a single entry /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-workflow","level":2,"title":"The Workflow","text":"

            The session journal follows a four-stage pipeline.

            Each stage is idempotent and safe to re-run:

            By default, each stage skips entries that have already been processed.

            import -> enrich -> rebuild\n
            Stage Tool What it does Skips if Where Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) CLI or agent Enrich /ctx-journal-enrich-all Adds frontmatter, summaries, topic tags Frontmatter already present Agent only Rebuild ctx journal site --build Generates browsable static HTML N/A CLI only Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks N/A CLI only

            Where Do You Run Each Stage?

            Import (Steps 1 to 3) works equally well from the terminal or inside your AI assistant via /ctx-history. The CLI is fine here: the agent adds no special intelligence, it just runs the same command.

            Enrich (Step 4) requires the agent: it reads conversation content and produces structured metadata.

            Rebuild and serve (Step 5) is a terminal operation that starts a long-running server.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-1-list-your-sessions","level":3,"title":"Step 1: List Your Sessions","text":"

            Start by seeing what sessions exist for the current project:

            ctx journal source\n

            Sample output:

            Sessions (newest first)\n=======================\n\n  Slug                           Project   Date         Duration  Turns  Tokens\n  gleaming-wobbling-sutherland   ctx       2026-02-07   1h 23m    47     82,341\n  twinkly-stirring-kettle        ctx       2026-02-06   0h 45m    22     38,102\n  bright-dancing-hopper          ctx       2026-02-05   2h 10m    63     124,500\n  quiet-flowing-dijkstra         ctx       2026-02-04   0h 18m    11     15,230\n  ...\n

            Slugs Look Cryptic?

            These auto-generated slugs (gleaming-wobbling-sutherland) are hard to recognize later.

            Use /ctx-journal-enrich to add human-readable titles, topic tags, and summaries to exported journal entries, making them easier to find.

            Filter by project or tool if you work across multiple codebases:

            ctx journal source --project ctx --limit 10\nctx journal source --tool claude-code\nctx journal source --all-projects\n
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-2-inspect-a-specific-session","level":3,"title":"Step 2: Inspect a Specific Session","text":"

            Before exporting everything, inspect a single session to see its metadata and conversation summary:

            ctx journal source --show --latest\n

            Or look up a specific session by its slug, partial ID, or UUID:

            ctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show twinkly\nctx journal source --show abc123\n

            Add --full to see the complete message content instead of the summary view:

            ctx journal source --show --latest --full\n

            This is useful for checking what happened before deciding whether to export and enrich it.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-3-import-sessions-to-the-journal","level":3,"title":"Step 3: Import Sessions to the Journal","text":"

            Import converts raw session data into editable Markdown files in .context/journal/:

            # Import all sessions from the current project\nctx journal import --all\n\n# Import a single session\nctx journal import gleaming-wobbling-sutherland\n\n# Include sessions from all projects\nctx journal import --all --all-projects\n

            --keep-frontmatter=false Discards Enrichments

            --keep-frontmatter=false discards enriched YAML frontmatter during regeneration.

            Back up your journal before using this flag.

            Each imported file contains session metadata (date, time, duration, model, project, git branch), a tool usage summary, and the full conversation transcript.

            Re-importing is safe. Running ctx journal import --all only imports new sessions: Existing files are never touched. Use --dry-run to preview what would be imported without writing anything.

            To re-import existing files (e.g., after a format improvement), use --regenerate: Conversation content is regenerated while preserving any YAML frontmatter you or the enrichment skill has added. You'll be prompted before any files are overwritten.

            --regenerate Replaces the Markdown Body

            --regenerate preserves YAML frontmatter but replaces the entire Markdown body with freshly generated content from the source JSONL.

            If you manually edited the conversation transcript (added notes, redacted sensitive content, restructured sections), those edits will be lost.

            BACK UP YOUR JOURNAL FIRST.

            To protect entries you've hand-edited, you can explicitly lock them:

            ctx journal lock <pattern>\n

            Locked entries are always skipped, regardless of flags.

            If you prefer to add locked: true directly in frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json:

            ctx journal sync\n

            See ctx journal lock --help and ctx journal sync --help for details.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-4-enrich-with-metadata","level":3,"title":"Step 4: Enrich with Metadata","text":"

            Raw imports have timestamps and transcripts but lack the semantic metadata that makes sessions searchable: topics, technology tags, outcome status, and summaries. The /ctx-journal-enrich* skills add this structured frontmatter.

            Locked entries are skipped by enrichment skills, just as they are by import. Lock entries you want to protect before running batch enrichment.

            Batch enrichment (recommended):

            /ctx-journal-enrich-all\n

            The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

            It shows you a grouped summary before applying changes so you can scan quickly rather than reviewing one by one.

            For large backlogs (20+ entries), the skill can spawn subagents to process entries in parallel.

            Single-entry enrichment:

            /ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-02-06\n

            Each enriched entry gets YAML frontmatter like this:

            ---\ntitle: \"Implement Redis caching middleware\"\ndate: 2026-02-06\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/api/middleware/cache.go\n---\n

            The skill also generates a summary and can extract decisions, learnings, and tasks mentioned during the session.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-5-generate-and-serve-the-site","level":3,"title":"Step 5: Generate and Serve the Site","text":"

            With imported and enriched journal files, generate the static site:

            # Generate site structure only\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate, build, and serve locally\nctx journal site --serve\n

            Then open http://localhost:8000 to browse.

            The site includes a date-sorted index, individual session pages with full conversations, search (press /), dark mode, and enriched titles in the navigation when frontmatter exists.

            You can also serve an already-generated site without regenerating using ctx serve (serve-only, no regeneration).

            The site generator requires zensical (pipx install zensical).

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#where-does-agent-add-value","level":2,"title":"Where Does Agent Add Value?","text":"

            Export, list, and show are mechanical: The agent runs the same CLI commands you would, so you can stay in your terminal for those.

            The agent earns its keep in enrich. It reads conversation content, understands what happened, and produces structured metadata. That is agent work, not CLI work.

            You can also ask your agent to browse sessions conversationally instead of remembering flags:

            What did we work on last week?\nShow me the session about Redis.\nImport everything to the journal.\n

            This is convenient but not required: ctx journal source gives you the same inventory.

            Where the agent genuinely helps is chaining the pipeline:

            You:   What happened last Tuesday?\nAgent: Last Tuesday you worked on two sessions:\n       - bright-dancing-hopper (2h 10m): refactored the middleware\n         pipeline and added Redis caching\n       - quiet-flowing-dijkstra (18m): quick fix for a nil pointer\n         in the config loader\n       Want me to export and enrich them?\nYou:   Yes, do it.\nAgent: Exports both, enriches, then proposes frontmatter.\n

            The value is staying in one context while the agent runs import -> enrich without you manually switching tools.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A typical pipeline from raw sessions to a browsable site:

            # Terminal: import and generate\nctx journal import --all\nctx journal site --serve\n
            # AI assistant: enrich\n/ctx-journal-enrich-all\n
            # Terminal: rebuild with enrichments\nctx journal site --serve\n

            If your project includes Makefile.ctx (deployed by ctx init), use make journal to combine import and rebuild stages. Then enrich inside Claude Code, then make journal again to pick up enrichments.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#session-retention-and-cleanup","level":2,"title":"Session Retention and Cleanup","text":"

            Claude Code does not keep JSONL transcripts forever. Understanding its cleanup behavior helps you avoid losing session history.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#default-behavior","level":3,"title":"Default Behavior","text":"

            Claude Code retains session transcripts for approximately 30 days. After that, JSONL files are automatically deleted during cleanup. Once deleted, ctx journal can no longer see those sessions - the data is gone.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-cleanupperioddays-setting","level":3,"title":"The cleanupPeriodDays Setting","text":"

            Claude Code exposes a cleanupPeriodDays setting in its configuration (~/.claude/settings.json) that controls retention:

            Value Behavior 30 (default) Transcripts older than 30 days are deleted 60, 90, etc. Extends the retention window 0 Disables writing new transcripts entirely - not \"keep forever\"

            Setting cleanupPeriodDays To 0

            Setting this to 0 does not mean \"never delete.\" It disables transcript creation altogether. No new JSONL files are written, which means ctx journal sees nothing new. This is rarely what you want.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#why-journal-import-matters","level":3,"title":"Why Journal Import Matters","text":"

            The journal import pipeline (Steps 1-4 above) is your archival mechanism. Imported Markdown files in .context/journal/ persist independently of Claude Code's cleanup cycle. Even after the source JSONL files are deleted, your journal entries remain.

            Recommendation: import regularly - weekly, or after any session worth revisiting. A quick ctx journal import --all takes seconds and ensures nothing falls through the 30-day window.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#quick-archival-checklist","level":3,"title":"Quick Archival Checklist","text":"
            1. Run ctx journal import --all at least weekly
            2. Enrich high-value sessions with /ctx-journal-enrich before the details fade from your own memory
            3. Lock enriched entries (ctx journal lock <pattern>) to protect them from accidental regeneration
            4. Rebuild the journal site periodically to keep it current
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tips","level":2,"title":"Tips","text":"
            • Start with /ctx-history inside your AI assistant. If you want to quickly check what happened in a recent session without leaving your editor, /ctx-history lets you browse interactively without importing.
            • Large sessions may be split automatically. Sessions with 200+ messages can be split into multiple parts (session-abc123.md, session-abc123-p2.md, session-abc123-p3.md) with navigation links between them. The site generator can handle this.
            • Suggestion sessions can be separated. Claude Code can generate short suggestion sessions for autocomplete. These may appear under a separate section in the site index, so they do not clutter your main session list.
            • Your agent is a good session browser. You do not need to remember slugs, dates, or flags. Ask \"what did we do yesterday?\" or \"find the session about Redis\" and it can map the question to recall commands.

            Journal Files Are Sensitive

            Journal files MUST be .gitignored.

            Session transcripts can contain sensitive data such as file contents, commands, error messages with stack traces, and potentially API keys.

            Add .context/journal/, .context/journal-site/, and .context/journal-obsidian/ to your .gitignore.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#next-up","level":2,"title":"Next Up","text":"

            Persisting Decisions, Learnings, and Conventions →: Record decisions, learnings, and conventions so they survive across sessions.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: where session saving fits in the daily workflow
            • Turning Activity into Content: generating blog posts from session history
            • Session Journal: full documentation of the journal system
            • CLI Reference: ctx journal: all journal subcommands and flags
            • CLI Reference: ctx serve: serve-only (no regeneration)
            • Context Files: the .context/ directory structure
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-ceremonies/","level":1,"title":"Session Ceremonies","text":"","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#the-problem","level":2,"title":"The Problem","text":"

            Sessions have two critical moments: the start and the end.

            • At the start, you need the agent to load context and confirm it knows what is going on.
            • At the end, you need to capture whatever the session produced before the conversation disappears.

            Most ctx skills work conversationally: \"jot down: check DNS after deploy\" is as good as /ctx-pad add \"check DNS after deploy\". But session boundaries are different. They are well-defined moments with specific requirements, and partial execution is costly.

            If the agent only half-loads context at the start, it works from stale assumptions. If it only half-persists at the end, learnings and decisions are lost.

            This Is One of the Few Times Being Explicit Matters

            Session ceremonies are the two bookend skills that mark these boundaries.

            They are the exception to the conversational rule:

            Invoke /ctx-remember and /ctx-wrap-up explicitly as slash commands.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tldr","level":2,"title":"TL;DR","text":"

            Start: /ctx-remember: load context, get a structured readback.

            End: /ctx-wrap-up: review session, propose candidates, persist approved items.

            Use the slash commands, not conversational triggers, for completeness.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#explicit-invocation-matters","level":2,"title":"Explicit Invocation Matters","text":"

            Most ctx skills encourage natural language. These two are different:

            Well-defined moments: Sessions have clear boundaries. A slash command marks the boundary unambiguously.

            Ambiguity risk: \"Do you remember?\" could mean many things. /ctx-remember means exactly one thing: load context and present a structured readback.

            Completeness: Conversational triggers risk partial execution. The agent might load some files but skip the session history, or persist one learning but forget to check for uncommitted changes. The slash command runs the full ceremony.

            Muscle memory: Typing /ctx-remember at session start and /ctx-wrap-up at session end becomes a habit, like opening and closing braces.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-remember Skill Load context and present structured readback /ctx-wrap-up Skill Gather session signal, propose and persist context /ctx-commit Skill Commit with context capture (offered by wrap-up) ctx agent CLI Load token-budgeted context packet ctx journal source CLI List recent sessions ctx add CLI Persist learnings, decisions, conventions, tasks","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-start-ctx-remember","level":2,"title":"Session Start: /ctx-remember","text":"

            Invoke at the beginning of every session:

            /ctx-remember\n

            The skill silently:

            1. Loads the context packet via ctx agent --budget 4000
            2. Reads TASKS.md, DECISIONS.md, LEARNINGS.md
            3. Checks recent sessions via ctx journal source --limit 3

            Then presents a structured readback with four sections:

            • Last session: topic, date, what was accomplished
            • Active work: pending and in-progress tasks
            • Recent context: 1-2 relevant decisions or learnings
            • Next step: suggestion or question about what to focus on

            The readback should feel like recall, not a file system tour. If the agent says \"Let me check if there are files...\" instead of a confident summary, the skill is not working correctly.

            What about 'do you remember?'

            The conversational trigger still works. But /ctx-remember guarantees the full ceremony runs:

            • context packet,
            • file reads,
            • session history,
            • and all four readback sections.

            The conversational version may cut corners.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-end-ctx-wrap-up","level":2,"title":"Session End: /ctx-wrap-up","text":"

            Invoke before ending a session where meaningful work happened:

            /ctx-wrap-up\n

            The skill runs four phases:

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-1-gather-signal","level":3,"title":"Phase 1: Gather Signal","text":"

            Silently checks git diff --stat, recent commits, and scans the conversation for themes: architectural choices, gotchas, patterns established, follow-up work identified.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-2-propose-candidates","level":3,"title":"Phase 2: Propose Candidates","text":"

            Presents a structured list grouped by type:

            ## Session Wrap-Up\n\n### Learnings (2 candidates)\n1. **PyMdownx details extension breaks pre/code rendering**\n   - Context: Journal site showed broken code blocks inside details tags\n   - Lesson: details extension wraps content in <details> HTML, which\n     interferes with <pre><code> rendering\n   - Application: Use fenced code blocks instead of indented code inside\n     admonitions when details extension is active\n\n2. **Hook subprocesses cannot propagate env vars**\n   - Context: Set env var in PreToolUse hook, invisible in main session\n   - Lesson: Hooks execute in child processes; env changes don't propagate\n   - Application: Use tombstone files for hook-to-session communication\n\n### Decisions (1 candidate)\n1. **File-based cooldown tokens over env vars**\n   - Context: Need session-scoped cooldown for ctx agent auto-loading\n   - Rationale: File tokens survive across processes, simpler than IPC\n   - Consequence: Tombstone files accumulate in /tmp; need TTL cleanup\n\nPersist all? Or select which to keep?\n

            Each candidate has complete structured fields, not just a title. Empty categories are omitted.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-3-persist","level":3,"title":"Phase 3: Persist","text":"

            After you approve (all, some, or modified), the skill runs the appropriate ctx add commands and reports results.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#nudge-suppression","level":3,"title":"Nudge Suppression","text":"

            After persisting, the skill marks the session as wrapped up via ctx system mark-wrapped-up. This suppresses context checkpoint nudges for 2 hours so the wrap-up ceremony itself does not trigger noisy reminders.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-4-commit-offer","level":3,"title":"Phase 4: Commit Offer","text":"

            If there are uncommitted changes, offers to run /ctx-commit. Does not auto-commit.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#when-to-skip","level":2,"title":"When to Skip","text":"

            Not every session needs ceremonies.

            Skip /ctx-remember when:

            • You are doing a quick one-off lookup (reading a file, checking a value)
            • Context was already loaded this session via /ctx-agent
            • You are continuing immediately after a previous session and context is still fresh

            Skip /ctx-wrap-up when:

            • Nothing meaningful happened (only read files, answered a question)
            • You already persisted everything manually during the session
            • The session was trivial (typo fix, quick config change)

            A good heuristic: if the session produced something a future session should know about, run /ctx-wrap-up. If not, just close.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#quick-reference","level":2,"title":"Quick Reference","text":"
            # Session start\n/ctx-remember\n\n# ... do work ...\n\n# Session end\n/ctx-wrap-up\n

            That is the complete ceremony. Two commands, bookending your session.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#relationship-to-other-skills","level":2,"title":"Relationship to Other Skills","text":"Skill When Purpose /ctx-remember Session start Load and confirm context /ctx-reflect Mid-session breakpoints Checkpoint at milestones /ctx-wrap-up Session end Full session review and persist /ctx-commit After completing work Commit with context capture

            /ctx-reflect is for mid-session checkpoints. /ctx-wrap-up is for end-of-session: it is more thorough, covers the full session arc, and includes the commit offer. If you already ran /ctx-reflect recently, /ctx-wrap-up avoids proposing the same candidates again.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tips","level":2,"title":"Tips","text":"
            • Make it a habit: The value of ceremonies compounds over sessions. Each /ctx-wrap-up makes the next /ctx-remember richer.
            • Trust the candidates: The agent scans the full conversation. It often catches learnings you forgot about.
            • Edit before approving: If a proposed candidate is close but not quite right, tell the agent what to change. Do not settle for a vague learning when a precise one is possible.
            • Do not force empty ceremonies: If /ctx-wrap-up finds nothing worth persisting, that is fine. A session that only read files and answered questions does not need artificial learnings.
            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#next-up","level":2,"title":"Next Up","text":"

            Browsing and Enriching Past Sessions →: Export session history to a browsable journal and enrich entries with metadata.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: the full session workflow that ceremonies bookend
            • Persisting Decisions, Learnings, and Conventions: deep dive on what gets persisted during wrap-up
            • Detecting and Fixing Drift: keeping context files accurate between ceremonies
            • Pausing Context Hooks: skip ceremonies entirely for quick tasks that don't need them
            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-changes/","level":1,"title":"Reviewing Session Changes","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-changed-while-you-were-away","level":2,"title":"What Changed While You Were Away?","text":"

            Between sessions, teammates commit code, context files get updated, and decisions pile up. ctx change gives you a single-command summary of everything that moved since your last session.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#quick-start","level":2,"title":"Quick Start","text":"
            # Auto-detects your last session and shows what changed\nctx change\n\n# Check what changed in the last 48 hours\nctx change --since 48h\n\n# Check since a specific date\nctx change --since 2026-03-10\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#how-reference-time-works","level":2,"title":"How Reference Time Works","text":"

            ctx change needs a reference point to compare against. It tries these sources in order:

            1. --since flag: explicit duration (24h, 72h) or date (2026-03-10, RFC3339 timestamp)
            2. Session markers: ctx-loaded-* files in .context/state/; picks the second-most-recent (your previous session start)
            3. Event log: last context-load-gate event from .context/state/events.jsonl
            4. Fallback: 24 hours ago

            The marker-based detection means ctx change usually just works without any flags: it knows when you last loaded context and shows everything after that.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-it-reports","level":2,"title":"What It Reports","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#context-file-changes","level":3,"title":"Context File Changes","text":"

            Any .md file in .context/ modified after the reference time:

            ### Context File Changes\n- `TASKS.md` - modified 2026-03-11 14:30\n- `DECISIONS.md` - modified 2026-03-11 09:15\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#code-changes","level":3,"title":"Code Changes","text":"

            Git activity since the reference time:

            ### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#integrating-into-session-start","level":2,"title":"Integrating into Session Start","text":"

            Pair ctx change with the /ctx-remember ceremony for a complete session-start picture:

            # 1. Load context (this also creates the session marker)\nctx agent --budget 4000\n\n# 2. See what changed since your last session\nctx change\n

            Or script it:

            # .context/hooks/session-start.sh\nctx agent --budget 4000\necho \"---\"\nctx change\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#team-workflows","level":2,"title":"Team Workflows","text":"

            When multiple people share a .context/ directory, ctx change shows who changed what:

            # After pulling from remote\ngit pull\nctx change --since 72h\n

            This surfaces context file changes from teammates that you might otherwise miss in the commit log.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#tips","level":2,"title":"Tips","text":"
            • No changes? If nothing shows up, the reference time might be wrong. Use --since 48h to widen the window.
            • Works without git. Context file changes are detected by filesystem mtime, not git. Code changes require git.
            • Hook integration. The context-load-gate hook writes the session marker that ctx change uses for auto-detection. If you're not using the ctx plugin, markers won't exist and it falls back to the event log or 24h window.
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-lifecycle/","level":1,"title":"The Complete Session","text":"","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-problem","level":2,"title":"The Problem","text":"

            \"What does a full ctx session look like from start to finish?\"

            You have ctx installed and your .context/ directory initialized, but the individual commands and skills feel disconnected.

            How do they fit together into a coherent workflow?

            This recipe walks through a complete session, from opening your editor to persisting context before you close it, so you can see how each piece connects.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tldr","level":2,"title":"TL;DR","text":"
            1. Load: /ctx-remember: load context, get structured readback.
            2. Orient: /ctx-status: check file health and token usage.
            3. Pick: /ctx-next: choose what to work on.
            4. Work: implement, test, iterate.
            5. Commit: /ctx-commit: commit and capture decisions/learnings.
            6. Reflect: /ctx-reflect: identify what to persist (at milestones)
            7. Wrap up: /ctx-wrap-up: end-of-session ceremony.

            Read on for the full walkthrough with examples.

            What Is a Readback?

            A readback is a structured summary where the agent plays back what it knows:

            • last session,
            • active tasks,
            • recent decisions.

            This way, you can confirm it loaded the right context.

            The term \"readback\" comes from aviation, where pilots repeat instructions back to air traffic control to confirm they heard correctly.

            Same idea in ctx: The agent tells you what it \"thinks\" is going on, and you correct anything that's off before the work begins.

            • Last session: topic, date, what was accomplished
            • Active work: pending and in-progress tasks
            • Recent context: 1-2 decisions or learnings that matter now
            • Next step: suggestion or question about what to focus on
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx status CLI command Quick health check on context files ctx agent CLI command Load token-budgeted context packet ctx journal source CLI command List previous sessions ctx journal source --show CLI command Inspect a specific session in detail /ctx-remember Skill Recall project context with structured readback /ctx-agent Skill Load full context packet inside the assistant /ctx-status Skill Show context summary with commentary /ctx-next Skill Suggest what to work on with rationale /ctx-commit Skill Commit code and prompt for context capture /ctx-reflect Skill Structured reflection checkpoint /ctx-history Skill Browse session history inside your AI assistant","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-workflow","level":2,"title":"The Workflow","text":"

            The session lifecycle has seven steps. You will not always use every step (for example, a quick bugfix might skip reflection, and a research session might skip committing), but the full arc looks like this:

            Load context > Orient > Pick a Task > Work > Commit > Reflect

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-1-load-context","level":3,"title":"Step 1: Load Context","text":"

            Start every session by loading what you know. The fastest way is a single prompt:

            Do you remember what we were working on?\n

            This triggers the /ctx-remember skill. Behind the scenes, the assistant runs ctx agent --budget 4000, reads the files listed in the context packet (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md), checks ctx journal source --limit 3 for recent sessions, and then presents a structured readback.

            The readback should feel like a recall, not a file system tour. If you see \"Let me check if there are files...\" instead of a confident summary, the context system is not loaded properly.

            As an alternative, if you want raw data instead of a readback, run ctx status in your terminal or invoke /ctx-status for a summarized health check showing file counts, token usage, and recent activity.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-2-orient","level":3,"title":"Step 2: Orient","text":"

            After loading context, verify you understand the current state.

            /ctx-status\n

            The status output shows which context files are populated, how many tokens they consume, and which files were recently modified. Look for:

            • Empty core files: TASKS.md or CONVENTIONS.md with no content means the context is sparse
            • High token count (over 30k): the context is bloated and might need ctx compact
            • No recent activity: files may be stale and need updating

            If the status looks healthy and the readback from Step 1 gave you enough context, skip ahead.

            If something seems off (stale tasks, missing decisions...), spend a minute reading the relevant file before proceeding.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

            With context loaded, choose a task. You can pick one yourself, or ask the assistant to recommend:

            /ctx-next\n

            The skill reads TASKS.md, checks recent sessions to avoid re-suggesting completed work, and presents 1-3 ranked recommendations with rationale.

            It prioritizes in-progress tasks over new starts (finishing is better than starting), respects explicit priority tags, and favors momentum: continuing a thread from a recent session is cheaper than context-switching.

            If you already know what you want to work on, state it directly:

            Let's work on the session enrichment feature.\n
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-4-do-the-work","level":3,"title":"Step 4: Do the Work","text":"

            This is the main body of the session: write code, fix bugs, refactor, research: whatever the task requires.

            During this phase, a few ctx-specific patterns help:

            Check decisions before choosing: when you face a design choice, check if a prior decision covers it.

            Is this consistent with our decisions?\n

            Constrain scope: keep the assistant focused on the task at hand.

            Only change files in internal/cli/session/. Nothing else.\n

            Use /ctx-implement for multistep plans: if the task has multiple steps, this skill executes them one at a time with build/test verification between each step.

            Context monitoring runs automatically: the check-context-size hook monitors context capacity at adaptive intervals. Early in a session it stays silent. After 16+ prompts it starts monitoring, and past 30 prompts it checks frequently. If context capacity is running high, it will suggest saving unsaved work. No manual invocation is needed.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-5-commit-with-context","level":3,"title":"Step 5: Commit with Context","text":"

            When the work is ready, use the context-aware commit instead of raw git commit:

            /ctx-commit\n

            The Agent May Recommend Committing

            You do not always need to invoke /ctx-commit explicitly.

            After a commit, the agent may proactively offer to capture context:

            \"We just made a trade-off there. Want me to record it as a decision?\"

            This is normal: The Agent Playbook encourages persisting at milestones, and a commit is a natural milestone.

            As an alternative, you can ask the assistant \"can we commit this?\" and it will pick up the /ctx-commit skill for you.

            The skill runs a pre-commit build check (for Go projects, go build), reviews the staged changes, drafts a commit message focused on \"why\" rather than \"what\", and then commits.

            After the commit succeeds, it prompts you:

            **Any context to capture?**\n\n- **Decision**: Did you make a design choice or trade-off?\n- **Learning**: Did you hit a gotcha or discover something?\n- **Neither**: No context to capture; we are done.\n

            If you made a decision, the skill records it with ctx add decision. If you learned something, it records it with ctx add learning including context, lesson, and application fields. This is the bridge between committing code and remembering why the code looks the way it does.

            If source code changed in areas that affect documentation, the skill also offers to check for doc drift.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-6-reflect","level":3,"title":"Step 6: Reflect","text":"

            At natural breakpoints (after finishing a feature, resolving a complex bug, or before switching tasks) pause to reflect:

            /ctx-reflect\n

            Agents Reflect at Milestones

            Agents often reflect without explicit invocation.

            After completing a significant piece of work, the agent may naturally surface items worth persisting:

            \"We discovered that $PPID resolves differently inside hooks. Should I save that as a learning?\"

            This is the agent following the Work-Reflect-Persist cycle from the Agent Playbook.

            You do not need to say /ctx-reflect for this to happen; the agent treats milestones as reflection triggers on its own.

            The skill works through a checklist: learnings discovered, decisions made, tasks completed or created, and whether there are items worth persisting. It then presents a summary with specific items to persist, each with the exact command to run:

            I would suggest persisting:\n\n- **Learning**: `$PPID` in PreToolUse hooks resolves to the Claude Code PID\n  `ctx add learning --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Task**: mark \"Add cooldown to ctx agent\" as done\n- **Decision**: tombstone-based cooldown with 10m default\n  `ctx add decision \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

            The skill asks before persisting anything. You choose what to keep.

            Not every commit needs reflection. A typo fix does not. But when you have been debugging for an hour and finally understand the root cause, that is worth a reflection checkpoint.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-7-persist-before-ending","level":3,"title":"Step 7: Persist Before Ending","text":"

            Before ending the session, run the wrap-up ceremony to capture outstanding learnings, decisions, conventions, and tasks:

            /ctx-wrap-up\n

            Ceremony Skills

            /ctx-remember at session start and /ctx-wrap-up at session end are ceremony skills: Invoke them as explicit slash commands for completeness, not conversationally.

            See Session Ceremonies for details on why these two skills are the exception to the conversational rule.

            The skill gathers signal from git diff, recent commits, and the conversation, then proposes structured candidates for your approval. After you select which to keep, it persists them via ctx add and offers /ctx-commit if uncommitted changes remain.

            Session transcripts are automatically captured by Claude Code and can be browsed later with ctx journal source and ctx journal source --show.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#conversational-session-flow","level":2,"title":"Conversational Session Flow","text":"

            The steps above describe the mechanics. In practice, a session with a ctx-aware agent feels more like a conversation than a command sequence.

            The agent knows the lifecycle and moves through it naturally, often without explicit invocation. Here is a realistic example:

            You: Do you remember what we were working on?

            Agent (loads context silently, presents structured readback):

            Last session (Feb 7, caching-layer): we implemented the in-memory cache with\nTTL expiration and wrote tests for the eviction policy. \n\nYou have 3 pending tasks; the biggest one is adding Redis as a backend.\n\nRecent decision:<br>\nwe chose LRU over LFU because the access pattern is mostly recency-based.\n\nWant to continue with the Redis backend, or tackle something else?\n

            You: Let's work on the Redis caching backend.

            Agent (picks up the task, begins implementation):

            Got it. I see the task in TASKS.md: \"Add Redis backend for cache layer.\"\n\nI will start with the connection pool setup. Only touching\n`internal/cache/redis/` for now.\n

            (The agent works... writes code, runs tests, iterates.)

            Agent (after implementing the connection pool and choosing a retry strategy):

            The connection pool is working. I went with exponential backoff with jitter\nfor retries instead of fixed intervals; it handles thundering herd better\nunder reconnection storms. That is a design choice worth recording. \n\nWant me to save it as a decision?\n

            You: Yes, go ahead.

            Agent:

            Done. Added to DECISIONS.md: \"Exponential backoff with jitter for Redis\nreconnection, to avoid thundering herd on pool recovery.\"\n

            (More work happens... the agent implements the cache read/write paths.)

            You: What else did we learn during this session?

            Agent:

            A few things worth capturing:\n\n* **Learning**: Redis `SET` with `NX` and `PX` flags handles atomic\n  set-if-not-exists with TTL in a single call. No need for a separate\n  `EXPIRE` command. We initially did it in two calls and hit a race condition.\n* **Learning**: The `go-redis` library's `PoolSize` default is 10 per CPU.\n  For our workload, explicit sizing at 20 was more predictable.\n* **Task update**: The connection pool subtask is done; the read/write path\n  is implemented but needs integration tests.\n\nWant me to persist these?\n

            You: Yes. And let's wrap up.

            Agent (reflects and persists... all in one flow):

            Persisted:\n\n* 2 learnings added to LEARNINGS.md\n* Marked \"connection pool setup\" done in TASKS.md\n* Added \"write integration tests for Redis cache\" as a new task\n\nContext is up to date. You are good to close.\n

            Notice What Happened

            In the above workflow, the user never typed /ctx-reflect or ctx add learning.

            The agent moved through Load, Orient, Pick, Work, Commit, and Reflect driven by natural conversation.

            \"Let's wrap up\" was enough to trigger the full reflect-and-persist flow.

            The agent surfaced persist-worthy items at milestones: after a design choice, after discovering a gotcha: without waiting to be asked.

            This is the intended experience.

            The commands and skills still exist for when you want precise control, but the agent is a proactive partner in the lifecycle, not a passive executor of slash commands.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Quick-reference checklist for a complete session:

            • Load: /ctx-remember: load context and confirm readback
            • Orient: /ctx-status: check file health and token usage
            • Pick: /ctx-next: choose what to work on
            • Work: implement, test, iterate (scope with \"only change X\")
            • Commit: /ctx-commit: commit and capture decisions/learnings
            • Reflect: /ctx-reflect: identify what to persist (at milestones)
            • Wrap up: /ctx-wrap-up: end-of-session ceremony

            Conversational equivalents: you can drive the same lifecycle with plain language:

            Step Slash command Natural language Load /ctx-remember \"Do you remember?\" / \"What were we working on?\" Orient /ctx-status \"How's our context looking?\" Pick /ctx-next \"What should we work on?\" / \"Let's do the caching task\" Work -- \"Only change files in internal/cache/\" Commit /ctx-commit \"Commit this\" / \"Ship it\" Reflect /ctx-reflect \"What did we learn?\" / (agent offers at milestones) Wrap up /ctx-wrap-up (use the slash command for completeness)

            The agent understands both columns.

            In practice, most sessions use a mix:

            • Explicit Commands when you want precision;
            • Natural Language when you want flow and agentic autonomy.

            The agent will also initiate steps on its own (particularly \"Reflect\") when it recognizes a milestone.

            Short sessions (quick bugfix) might only use: Load, Work, Commit.

            Long sessions should Reflect after each major milestone and persist learnings and decisions before ending.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tips","level":2,"title":"Tips","text":"

            Persist early if context is running low. A hook monitors context capacity and notifies you when it gets high, but do not wait for the notification. If you have been working for a while and have unpersisted learnings, persist proactively.

            Browse previous sessions by topic. If you need context from a prior session, ctx journal source --show auth will match by keyword. You do not need to remember the exact date or slug.

            Reflection is optional but valuable. You can skip /ctx-reflect for small changes, but always persist learnings and decisions before ending a session where you did meaningful work. These are what the next session loads.

            Let the hook handle context loading. The PreToolUse hook runs ctx agent automatically with a cooldown, so context loads on first tool use without you asking. The /ctx-remember prompt at session start is for your benefit (to get a readback), not because the assistant needs it.

            The agent is a proactive partner, not a passive tool. A ctx-aware agent follows the Agent Playbook: it watches for milestones (completed tasks, design decisions, discovered gotchas) and offers to persist them without being asked. If you finish a tricky debugging session, it may say \"That root cause is worth saving as a learning. Want me to record it?\" before you think to ask. This is by design.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#next-up","level":2,"title":"Next Up","text":"

            Session Ceremonies →: The two bookend rituals for every session: /ctx-remember at the start, /ctx-wrap-up at the end.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#see-also","level":2,"title":"See Also","text":"
            • Session Ceremonies: why /ctx-remember and /ctx-wrap-up are explicit slash commands, not conversational
            • CLI Reference: full documentation for all ctx commands
            • Prompting Guide: effective prompts for ctx-enabled projects
            • Tracking Work Across Sessions: deep dive on task management
            • Persisting Decisions, Learnings, and Conventions: deep dive on knowledge capture
            • Detecting and Fixing Drift: keeping context files accurate
            • Pausing Context Hooks: shortcut the full lifecycle for quick tasks that don't need ceremony overhead
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-pause/","level":1,"title":"Pausing Context Hooks","text":"","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#the-problem","level":2,"title":"The Problem","text":"

            Not every session needs the full ceremony. Quick investigations, one-off questions, small fixes unrelated to active project work: These tasks don't benefit from persistence nudges, ceremony reminders, or knowledge checks. Every hook still fires, consuming tokens and attention on work that won't produce learnings or decisions worth capturing.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tldr","level":2,"title":"TL;DR","text":"Command What it does ctx hook pause or /ctx-pause Silence all nudge hooks for this session ctx hook resume or /ctx-resume Restore normal hook behavior

            Pause is session-scoped: It only affects the current session. Other sessions (same project, different terminal) are unaffected.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-gets-paused","level":2,"title":"What Gets Paused","text":"

            All nudge and reminder hooks go silent:

            • Context size checkpoints
            • Ceremony adoption nudges
            • Persistence reminders
            • Journal maintenance reminders
            • Knowledge growth nudges
            • Map staleness nudges
            • Version update nudges
            • Resource pressure warnings
            • QA reminders
            • Post-commit nudges
            • Specs nudges
            • Backup age warnings
            • Context load gate
            • Pending reminders relay
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-still-fires","level":2,"title":"What Still Fires","text":"

            Security hooks always run, even when paused:

            • block-non-path-ctx: prevents ./ctx invocations
            • block-dangerous-commands: blocks sudo, force push, etc.
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#workflow","level":2,"title":"Workflow","text":"
            # 1. Session starts: Context loads normally.\n\n# 2. You realize this is a quick task\nctx hook pause\n\n# 3. Work without interruption: hooks are silent\n\n# 4. Session evolves into real work? Resume first\nctx hook resume\n\n# 5. Now wrap up normally\n# /ctx-wrap-up\n
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#graduated-reminder","level":2,"title":"Graduated Reminder","text":"

            Paused hooks aren't completely invisible. A minimal indicator appears so you always know the state:

            Paused turns What you see 1-5 ctx:paused 6+ ctx:paused (N turns): resume with /ctx-resume

            This prevents the \"forgot I paused\" problem during long sessions.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tips","level":2,"title":"Tips","text":"
            • Resume before wrapping up. If your quick task turns into real work, resume hooks before running /ctx-wrap-up. The wrap-up ceremony needs active hooks to capture learnings properly.

            • Initial context load is unaffected. The ~8k token startup injection (CLAUDE.md, playbook, constitution) happens before any command runs. Pause only affects hooks that fire during the session.

            • Use for quick investigations. Debugging a stack trace? Checking a git log? Answering a colleague's question? Pause, do the work, close the session. No ceremony needed.

            • Don't use for real work. If you're implementing features, fixing bugs, or making decisions: keep hooks active. The nudges exist to prevent context loss.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#see-also","level":2,"title":"See Also","text":"

            See also: Session Ceremonies: the bookend rituals that pause lets you skip when they aren't needed.

            See also: Customizing Hook Messages: if you want to change what hooks say rather than silencing them entirely.

            See also: The Complete Session: the full session workflow that pause shortcuts for quick tasks.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-reminders/","level":1,"title":"Session Reminders","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-problem","level":2,"title":"The Problem","text":"

            You're deep in a session and realize: \"I need to refactor the swagger definitions next time.\" You could add a task, but this isn't a work item: it's a note to future-you. You could jot it on the scratchpad, but scratchpad entries don't announce themselves.

            How do you leave a message that your next session opens with?

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tldr","level":2,"title":"TL;DR","text":"
            ctx remind \"refactor the swagger definitions\"\nctx remind list\nctx remind dismiss 1       # or batch: ctx remind dismiss 1 3-5\n

            Reminders surface automatically at session start: VERBATIM, every session, until you dismiss them.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx remind CLI command Add a reminder (default action) ctx remind list CLI command Show all pending reminders ctx remind dismiss CLI command Remove a reminder by ID (or --all) /ctx-remind Skill Natural language interface to reminders","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-1-leave-a-reminder","level":3,"title":"Step 1: Leave a Reminder","text":"

            Tell your agent what to remember, or run it directly:

            You: \"remind me to refactor the swagger definitions\"\n\nAgent: [runs ctx remind \"refactor the swagger definitions\"]\n       \"Reminder set:\n         + [1] refactor the swagger definitions\"\n

            Or from the terminal:

            ctx remind \"refactor the swagger definitions\"\n
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-2-set-a-date-gate-optional","level":3,"title":"Step 2: Set a Date Gate (Optional)","text":"

            If the reminder shouldn't fire until a specific date:

            You: \"remind me to check the deploy logs after Tuesday\"\n\nAgent: [runs ctx remind \"check the deploy logs\" --after 2026-02-25]\n       \"Reminder set:\n         + [2] check the deploy logs  (after 2026-02-25)\"\n

            The reminder stays silent until that date, then fires every session.

            The agent converts natural language dates (\"tomorrow\", \"next week\", \"after the release on Friday\") to YYYY-MM-DD. If it's ambiguous, it asks.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-3-start-a-new-session","level":3,"title":"Step 3: Start a New Session","text":"

            Next session, the reminder appears automatically before anything else:

            ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n

            No action needed: The check-reminders hook fires on UserPromptSubmit and the agent relays the box verbatim.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-4-dismiss-when-done","level":3,"title":"Step 4: Dismiss When Done","text":"

            After you've acted on a reminder (or decided to skip it):

            You: \"dismiss reminder 1\"\n\nAgent: [runs ctx remind dismiss 1]\n       \"Dismissed:\n         - [1] refactor the swagger definitions\"\n\n# Batch dismiss also works:\n# \"dismiss reminders 3, 5 through 7\"\n# → ctx remind dismiss 3 5-7\n

            Or clear everything:

            ctx remind dismiss --all\n
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-5-check-whats-pending","level":3,"title":"Step 5: Check What's Pending","text":"
            ctx remind list\n
              [1] refactor the swagger definitions\n  [3] review auth token expiry logic\n  [4] check deploy logs  (after 2026-02-25, not yet due)\n

            Date-gated reminders that haven't reached their date show (not yet due).

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#using-ctx-remind-in-a-session","level":2,"title":"Using /ctx-remind in a Session","text":"

            Invoke the /ctx-remind skill, then describe what you want:

            You: /ctx-remind remind me to update the API docs\nYou: /ctx-remind what reminders do I have?\nYou: /ctx-remind dismiss reminder 3\n
            You say (after /ctx-remind) What the agent does \"remind me to update the API docs\" ctx remind \"update the API docs\" \"remind me next week to check staging\" ctx remind \"check staging\" --after 2026-03-02 \"what reminders do I have?\" ctx remind list \"dismiss reminder 3\" ctx remind dismiss 3 \"dismiss reminders 3, 5 through 7\" ctx remind dismiss 3 5-7 \"clear all reminders\" ctx remind dismiss --all","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#reminders-vs-scratchpad-vs-tasks","level":2,"title":"Reminders vs Scratchpad vs Tasks","text":"You want to... Use Leave a note that announces itself next session ctx remind Jot down a quick value or sensitive token ctx pad Track work with status and completion TASKS.md Record a decision or lesson for all sessions Context files

            Decision guide:

            • If it should announce itself at session start → ctx remind
            • If it's a quiet note you'll check manually → ctx pad
            • If it's a work item you'll mark done → TASKS.md

            Reminders Are Sticky Notes, Not Tasks

            A reminder has no status, no priority, no lifecycle. It's a message to \"future you\" that fires until dismissed.

            If you need tracking, use a task in TASKS.md.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tips","level":2,"title":"Tips","text":"
            • Reminders fire every session: Unlike nudges (which throttle to once per day), reminders repeat until you dismiss them. This is intentional: You asked to be reminded.
            • Date gating is session-scoped, not clock-scoped: --after 2026-02-25 means \"don't show until sessions on or after Feb 25.\" It does not mean \"alarm at midnight on Feb 25.\"
            • The agent handles date parsing: Say \"next week\" or \"after Friday\": The agent converts it to YYYY-MM-DD. The CLI only accepts the explicit date format.
            • Reminders are committed to git: They travel with the repo. If you switch machines, your reminders follow.
            • IDs never reuse: After dismissing reminder 3, the next reminder gets ID 4 (or higher). No confusion from recycled numbers.
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#next-up","level":2,"title":"Next Up","text":"

            Using the Scratchpad →: For quiet notes and sensitive values that don't need session-start announcements.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#see-also","level":2,"title":"See Also","text":"
            • CLI Reference: ctx remind: full command syntax and flags
            • The Complete Session: how reminders fit into the session lifecycle
            • Managing Tasks: for work items that need status tracking
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/state-maintenance/","level":1,"title":"State Directory Maintenance","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-problem","level":2,"title":"The Problem","text":"

            Every session creates tombstone files in .context/state/ - small markers that suppress repeat hook nudges (\"already checked context size\", \"already sent persistence reminder\"). Over days and weeks, these accumulate into hundreds of files from long-dead sessions.

            The files are harmless individually, but the clutter makes it harder to reason about state, and stale global tombstones can suppress nudges across sessions entirely.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tldr","level":2,"title":"TL;DR","text":"
            ctx prune --dry-run     # preview what would be removed\nctx prune               # prune files older than 7 days\nctx prune --days 1      # more aggressive: keep only today\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx prune Command Remove old per-session state files ctx status Command Quick health overview including state dir","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#understanding-state-files","level":2,"title":"Understanding State Files","text":"

            State files fall into two categories:

            Session-scoped (contain a UUID in the filename): Created per-session to suppress repeat nudges. Safe to prune once the session ends. Examples:

            context-check-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\nheartbeat-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\npersistence-nudge-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\n

            Global (no UUID): Persist across sessions. ctx prune preserves these automatically. Some are legitimate state (events.jsonl, memory-import.json); others may be stale tombstones that need manual review.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-workflow","level":2,"title":"The Workflow","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-1-preview","level":3,"title":"Step 1: Preview","text":"

            Always dry-run first to see what would be removed:

            ctx prune --dry-run\n

            The output shows each file, its age, and a summary:

              would prune: context-check-abc123... (age: 3d)\n  would prune: heartbeat-abc123... (age: 3d)\n\nDry run - would prune 150 files (skip 70 recent, preserve 14 global)\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-2-prune","level":3,"title":"Step 2: Prune","text":"

            Choose an age threshold. The default is 7 days:

            ctx prune               # older than 7 days\nctx prune --days 3      # older than 3 days\nctx prune --days 1      # older than 1 day (aggressive)\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-3-review-global-files","level":3,"title":"Step 3: Review Global Files","text":"

            After pruning, check what prune preserved:

            ls .context/state/ | grep -v '[0-9a-f]\\{8\\}-[0-9a-f]\\{4\\}'\n

            Legitimate global files (keep):

            • events.jsonl - event log
            • memory-import.json - import tracking state

            Stale global tombstones (safe to delete):

            • Files like backup-reminded, ceremony-reminded, version-checked with no session UUID are one-shot markers. If they are from a previous session, they are stale and can be removed manually.
            rm .context/state/backup-reminded .context/state/ceremony-reminded\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-4-verify","level":3,"title":"Step 4: Verify","text":"
            ls .context/state/ | wc -l    # should be manageable\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#when-to-prune","level":2,"title":"When to Prune","text":"
            • Weekly: ctx prune with default 7-day threshold
            • After heavy parallel work: Multiple concurrent sessions create many tombstones. Prune with --days 1 afterward.
            • When state directory exceeds ~100 files: A sign that pruning hasn't run recently
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tips","level":2,"title":"Tips","text":"

            Pruning active sessions is safe but noisy: If you prune a file belonging to a still-running session, the corresponding hook will re-fire its nudge on the next prompt. Minor UX annoyance, not data loss.

            No context files are stored in state: The state directory contains only tombstones, counters, and diagnostic data. Nothing in .context/state/ affects your decisions, learnings, tasks, or conventions.

            Test artifacts sneak in: Files like context-check-statstest or heartbeat-unknown are artifacts from development or testing. They lack UUIDs so prune preserves them. Delete manually.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#see-also","level":2,"title":"See Also","text":"
            • Detecting and Fixing Drift: broader context maintenance including drift detection and archival
            • Troubleshooting: diagnostic workflow using ctx doctor and event logs
            • CLI Reference: system: full flag documentation for ctx prune and related commands
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/steering/","level":1,"title":"Writing Steering Files","text":"","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#writing-steering-files","level":1,"title":"Writing Steering Files","text":"

            Steering files tell your AI assistant how to behave, not what was decided or how the codebase is written. This recipe walks through writing a steering file from scratch, validating which prompts will trigger it, and syncing it out to your configured AI tools.

            Before You Start

            If you're unsure whether a rule belongs in steering/, DECISIONS.md, or CONVENTIONS.md, read the \"Steering vs decisions vs conventions\" admonition on the ctx steering reference page. The short version: if the rule is \"the AI should always do X when asked about Y,\" that's steering. Otherwise it's probably a decision or convention.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#start-here-customize-the-foundation-files","level":2,"title":"Start Here: Customize the Foundation Files","text":"

            ctx init scaffolds four foundation steering files for you the first time you initialize a project:

            File Purpose .context/steering/product.md Product context, goals, target users .context/steering/tech.md Tech stack, constraints, key dependencies .context/steering/structure.md Directory layout, naming conventions .context/steering/workflow.md Branch strategy, commit rules, pre-commit

            Each file opens with an inline HTML comment that explains the three inclusion modes, what priority means, and the tools scope. The comment is invisible in rendered markdown but visible when you edit the file. Delete it once the file is yours.

            All four default to inclusion: always and priority: 10, so they fire on every AI tool call until you customize them. If you're reading this recipe and haven't touched them yet, open each one now and replace the placeholder bullet list with actual rules for your project. That's the highest-leverage five minutes you can spend in a new ctx setup.

            What to fill in, by file:

            product.md: The elevator pitch plus hard scope:

            • One-sentence product description.
            • Primary users and their top job-to-be-done.
            • Two or three \"this is explicitly out of scope\" items so the AI doesn't wander.

            tech.md: Technology and constraints:

            • Languages and versions (Go 1.22, Node 20, etc.).
            • Frameworks and key libraries.
            • Runtime and deployment target.
            • Hard constraints: \"no CGO\", \"no network at test time\", \"no external DB for unit tests\". These are the things that burn agents when they don't know them.

            structure.md: Layout and naming:

            • Top-level directories and their purpose.
            • Where new files should go (and where they should NOT).
            • Naming conventions for packages, files, types.

            workflow.md: Process rules:

            • Branch strategy (main-only, trunk-based, feature branches).
            • Commit message format, signed-off-by requirement.
            • Pre-commit and pre-push checks.
            • Review expectations.

            After editing, the next AI tool call in Claude Code will pick up the new rules automatically via the plugin's PreToolUse hook, with no sync step and no restart. Other tools (Cursor, Cline, Kiro) need ctx steering sync to export into their native format.

            Prefer a Bare .context/steering/ Directory?

            Re-run ctx init --no-steering-init and delete the scaffolded files. ctx init leaves existing files alone, so the flag is only needed if you want to opt out of the initial scaffold.

            The rest of this recipe walks through creating an additional, scenario-specific steering file beyond the four foundation defaults.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#scenario","level":2,"title":"Scenario","text":"

            You're working on a project with a strict input-validation policy: every new API handler must validate request bodies before touching the database. You want the AI to flag this concern automatically whenever it's asked to write an HTTP handler, without you having to remind it every session.

            Claude Code Users: Pick always, Not auto

            This walkthrough uses inclusion: auto because the scenario is a scoped rule that matches a specific kind of prompt. That works natively on Cursor, Cline, and Kiro (they resolve the description keyword match themselves).

            On Claude Code, auto does not fire through the plugin's PreToolUse hook. The hook passes an empty prompt to ctx agent, so only always files match. Claude can still reach an auto file by calling the ctx_steering_get MCP tool, but that requires Claude to decide to call it; there's no automatic injection.

            If Claude Code is your tool, set inclusion: always in Step 2 instead of auto. The rule will fire on every tool call regardless of topic. You may want to narrow the rule body so the extra tokens per turn aren't wasted on unrelated work.

            See the ctx steering reference \"Prefer inclusion: always for Claude Code\" section for the full trade-off.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-1-scaffold-the-file","level":2,"title":"Step 1: Scaffold the File","text":"
            ctx steering add api-validation\n

            That creates .context/steering/api-validation.md with default frontmatter:

            ---\nname: api-validation\ndescription:\ninclusion: manual\ntools: []\npriority: 50\n---\n

            The defaults are deliberately conservative: inclusion: manual means the file won't be applied until you opt in, which keeps the rules out of the prompt until you've reviewed them.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-2-fill-in-the-rule","level":2,"title":"Step 2: Fill in the Rule","text":"

            Open the file and write the rule body plus a focused description. The description is what inclusion: auto matches against later.

            ---\nname: api-validation\ndescription: HTTP handler input validation and request parsing\ninclusion: auto\ntools: []\npriority: 20\n---\n\n# API request validation\n\nEvery new HTTP handler MUST:\n\n1. Parse request bodies into typed structs, never `map[string]any`.\n2. Validate required fields before any database call.\n3. Return 400 with a machine-readable error for validation failures.\n4. Use `context.Context` from the request for all downstream calls.\n\nPrefer existing validation helpers in `internal/validate/`\nrather than inline checks.\n

            Notes on the choices:

            • inclusion: auto: this rule should fire automatically on HTTP-handler-shaped prompts, not always.
            • priority: 20: lower than the default, so this rule appears near the top of the prompt alongside other high-priority rules.
            • Description is keyword-rich (\"HTTP handler input validation and request parsing\"); the auto matcher scores prompts against these words.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-3-preview-which-prompts-match","level":2,"title":"Step 3: Preview Which Prompts Match","text":"

            Before committing the file, validate your description catches the prompts you care about:

            ctx steering preview \"add an endpoint for updating user email\"\n

            Expected output:

            Steering files matching prompt \"add an endpoint for updating user email\":\n  api-validation       inclusion=auto     priority=20  tools=all\n

            Good, the prompt matches. Try a negative case:

            ctx steering preview \"fix a bug in the JSON renderer\"\n

            Expected: empty match (or whatever else is currently auto). If api-validation incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-4-list-to-confirm-metadata","level":2,"title":"Step 4: List to Confirm Metadata","text":"
            ctx steering list\n

            Should show api-validation alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-5-get-the-rules-in-front-of-the-ai","level":2,"title":"Step 5: Get the Rules in Front of the AI","text":"

            Steering files are authored once in .context/steering/, but how they reach the AI depends on which tool you use. There are two delivery mechanisms:

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-a-native-rules-tools-cursor-cline-kiro","level":3,"title":"Path A: Native-Rules Tools (Cursor, Cline, Kiro)","text":"

            These tools read a specific directory for rules. ctx steering sync exports your files into that directory with tool-specific frontmatter:

            ctx steering sync\n

            Depending on the active tool in .ctxrc or --tool:

            Tool Target Cursor .cursor/rules/ Cline .clinerules/ Kiro .kiro/steering/

            The sync is idempotent; unchanged files are skipped. Run it whenever you edit a steering file.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-b-claude-code-and-codex-hook-mcp","level":3,"title":"Path B: Claude Code and Codex (Hook + MCP)","text":"

            Claude Code and Codex have no native rules primitive, so ctx steering sync is a no-op for them; it deliberately skips both. Instead, steering reaches these tools through two non-sync channels:

            1. PreToolUse hook (automatic). The ctx setup claude-code plugin installs a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them against the active prompt, and includes matching bodies as Tier 6 of the context packet. The packet gets injected into Claude's context automatically.

            2. ctx_steering_get MCP tool (on-demand). Claude can call this MCP tool mid-task to fetch matching steering files for a specific prompt. Automatic activation comes from Claude's judgment, not a hook.

            Both channels activate when you run:

            ctx setup claude-code --write\n

            That installs the plugin, wires the hook, and registers the MCP server. After that, steering files you edit are picked up on the next tool call, with no sync step needed.

            Running ctx steering sync with Claude Code

            It won't error; it will simply report that Claude and Codex aren't sync targets and skip them. If Claude Code is your only tool, you never need to run sync. If you use both Claude Code and (say) Cursor, run sync to keep Cursor up to date; the Claude pipeline takes care of itself via the hook.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-6-verify-the-ai-sees-it","level":2,"title":"Step 6: Verify the AI Sees It","text":"

            Open your AI tool and ask it something the rule should fire on:

            \"Add a POST /users endpoint that accepts email and name.\"

            If the rule is working, the AI's first response should mention input validation, typed structs, and the internal/validate/ package, because that's what the steering file told it to do.

            If nothing happens, the fix depends on which path you're on:

            Path A (Cursor/Cline/Kiro):

            1. Re-run ctx steering preview with the literal prompt to confirm the match.
            2. Run ctx steering list and verify inclusion is auto, not manual.
            3. Check the tool's own config directory (e.g. .cursor/rules/); the file should be there after ctx steering sync.

            Path B (Claude Code):

            1. Re-run ctx steering preview with the literal prompt to confirm the match.
            2. Verify the plugin is installed: cat .claude/hooks.json should include ctx agent --budget 8000 under PreToolUse. If not, re-run ctx setup claude-code --write.
            3. Run ctx agent --budget 8000 manually and grep the output for your rule body. If it's there, the data is fine; if it's missing, the inclusion mode or description is at fault.
            4. As a last resort, ask Claude directly: \"Call the ctx_steering_get MCP tool with my prompt and show me the result.\" If the MCP tool returns your rule, Claude has access but isn't pulling it into the initial context packet; tighten the description keywords.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            Too-generic descriptions. description: general coding will match almost every prompt and flood the context window. Keep descriptions specific to the scenario the rule applies to.

            Overlapping rules. If two steering files match the same prompt and contradict each other, the result is confusing. Use priority to resolve, but better: merge the files or narrow the descriptions so they don't overlap.

            Putting decisions in steering. \"We decided to use PostgreSQL\" is a decision, not a rule for the AI to follow on every prompt. Record decisions with ctx add decision, not ctx steering add.

            Committing inclusion: always without thinking. Rules marked always fire on every prompt, consuming tier-6 budget permanently. Only use always for true invariants (security, safety, licensing). Everything else should be auto or manual.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#see-also","level":2,"title":"See Also","text":"
            • ctx steering reference: full command, flag, and frontmatter reference.
            • ctx setup: configure which tools the steering sync writes to.
            • Authoring triggers: if you want script-based automation, not rule-based prompt injection.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/system-hooks-audit/","level":1,"title":"Auditing System Hooks","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-problem","level":2,"title":"The Problem","text":"

            ctx runs 14 system hooks behind the scenes: nudging your agent to persist context, warning about resource pressure, gating commits on QA. But these hooks are invisible by design. You never see them fire. You never know if they stopped working.

            How do you verify your hooks are actually running, audit what they do, and get alerted when they go silent?

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tldr","level":2,"title":"TL;DR","text":"
            ctx system check-resources # run a hook manually\nls -la .context/logs/      # check hook execution logs\nctx hook notify setup      # get notified when hooks fire\n

            Or ask your agent: \"Are our hooks running?\"

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx system <hook> CLI command Run a system hook manually ctx sysinfo CLI command Show system resource status ctx usage CLI command Stream or dump per-session token stats ctx hook notify setup CLI command Configure webhook for audit trail ctx hook notify test CLI command Verify webhook delivery .ctxrc notify.events Configuration Subscribe to relay for full hook audit .context/logs/ Log files Local hook execution ledger","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-are-system-hooks","level":2,"title":"What Are System Hooks?","text":"

            System hooks are plumbing commands that ctx registers with your AI tool (Claude Code, Cursor, etc.) via the plugin's hooks.json. They fire automatically at specific events during your AI session:

            Event When Hooks UserPromptSubmit Before the agent sees your prompt 10 check hooks + heartbeat PreToolUse Before the agent uses a tool block-non-path-ctx, qa-reminder PostToolUse After a tool call succeeds post-commit

            You never run these manually. Your AI tool runs them for you: That's the point.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-complete-hook-catalog","level":2,"title":"The Complete Hook Catalog","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#prompt-time-checks-userpromptsubmit","level":3,"title":"Prompt-Time Checks (UserPromptSubmit)","text":"

            These fire before every prompt, but most are throttled to avoid noise.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-context-size-context-capacity-warning","level":4,"title":"check-context-size: Context Capacity Warning","text":"

            What: Adaptive prompt counter. Silent for the first 15 prompts, then nudges with increasing frequency (every 5th, then every 3rd).

            Why: Long sessions lose coherence. The nudge reminds both you and the agent to persist context before the window fills up.

            Output: VERBATIM relay box with prompt count.

            ┌─ Context Checkpoint (prompt #20) ────────────────\n│ This session is getting deep. Consider wrapping up\n│ soon. If there are unsaved learnings, decisions, or\n│ conventions, now is a good time to persist them.\n│ ⏱ Context window: ~45k tokens (~22% of 200k)\n└──────────────────────────────────────────────────\n

            Usage: Every prompt records token usage to .context/state/stats-{session}.jsonl. Monitor live with ctx usage --follow or query with ctx usage --json. Usage is recorded even during wrap-up suppression (event: suppressed).

            Billing guard: When billing_token_warn is set in .ctxrc, a one-shot warning fires if session tokens exceed the threshold. This warning is independent of all other triggers - it fires even during wrap-up suppression.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-persistence-context-staleness-nudge","level":4,"title":"check-persistence: Context Staleness Nudge","text":"

            What: Tracks when .context/*.md files were last modified. If too many prompts pass without a write, nudges the agent to persist.

            Why: Sessions produce insights that evaporate if not recorded. This catches the \"we talked about it but never wrote it down\" failure mode.

            Output: VERBATIM relay after 20+ prompts without a context file change.

            ┌─ Persistence Checkpoint (prompt #20) ───────────\n│ No context files updated in 20+ prompts.\n│ Have you discovered learnings, made decisions,\n│ established conventions, or completed tasks\n│ worth persisting?\n│\n│ Run /ctx-wrap-up to capture session context.\n└──────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-ceremonies-session-ritual-adoption","level":4,"title":"check-ceremonies: Session Ritual Adoption","text":"

            What: Scans your last 3 journal entries for /ctx-remember and /ctx-wrap-up usage. Nudges once per day if missing.

            Why: Session ceremonies are the highest-leverage habit in ctx. This hook bootstraps the habit until it becomes automatic.

            Output: Tailored nudge depending on which ceremony is missing.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-journal-unimported-session-reminder","level":4,"title":"check-journal: Unimported Session Reminder","text":"

            What: Detects unimported Claude Code sessions and unenriched journal entries. Fires once per day.

            Why: Exported sessions become searchable history. Unenriched entries lack metadata for filtering. Both decay in value over time.

            Output: VERBATIM relay with counts and exact commands.

            ┌─ Journal Reminder ─────────────────────────────\n│ You have 3 new session(s) not yet exported.\n│ 5 existing entries need enrichment.\n│\n│ Export and enrich:\n│   ctx journal import --all\n│   /ctx-journal-enrich-all\n└────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-resources-system-resource-pressure","level":4,"title":"check-resources: System Resource Pressure","text":"

            What: Monitors memory, swap, disk, and CPU load. Only fires at DANGER severity (memory >= 90%, swap >= 75%, disk >= 95%, load >= 1.5x CPU count).

            Why: Resource exhaustion mid-session can corrupt work. This provides early warning to persist and exit.

            Output: VERBATIM relay listing critical resources.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-knowledge-knowledge-file-growth","level":4,"title":"check-knowledge: Knowledge File Growth","text":"

            What: Counts entries in LEARNINGS.md, DECISIONS.md, and lines in CONVENTIONS.md. Fires once per day when thresholds are exceeded.

            Why: Large knowledge files dilute agent context. 35 learnings compete for attention; 15 focused ones get applied. Thresholds are configurable in .ctxrc.

            Default thresholds:

            # .ctxrc\nentry_count_learnings: 30\nentry_count_decisions: 20\nconvention_line_count: 200\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-version-binaryplugin-version-drift","level":4,"title":"check-version: Binary/Plugin Version Drift","text":"

            What: Compares the ctx binary version against the plugin version. Fires once per day. Also checks encryption key age for rotation nudge.

            Why: Version drift means hooks reference features the binary doesn't have. The key rotation nudge prevents indefinite key reuse.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-reminders-pending-reminder-relay","level":4,"title":"check-reminders: Pending Reminder Relay","text":"

            What: Reads .context/reminders.json and surfaces any due reminders via VERBATIM relay. No throttle: fires every session until dismissed.

            Why: Reminders are sticky notes to future-you. Unlike nudges (which throttle to once per day), reminders repeat deliberately until the user dismisses them.

            Output: VERBATIM relay box listing due reminders.

            ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-freshness-technology-constant-staleness","level":4,"title":"check-freshness: Technology Constant Staleness","text":"

            What: Stats files listed in .ctxrc freshness_files and warns if any haven't been modified in over 6 months. Daily throttle. Silent when no files are configured (opt-in via .ctxrc).

            Why: Model capabilities evolve - token budgets, attention limits, and context window sizes that were accurate 6 months ago may no longer reflect best practices. This hook reminds you to review and touch the file to confirm values are still current.

            Config (.ctxrc):

            freshness_files:\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # optional\n

            Each entry has a path (relative to project root), desc (what constants live there), and optional review_url (where to check current values). When review_url is set, the nudge includes \"Review against: {url}\". When absent, just \"Touch the file to mark it as reviewed.\"

            Output: VERBATIM relay listing stale files, silent otherwise.

            ┌─ Technology Constants Stale ──────────────────────\n│   config/thresholds.yaml (210 days ago)\n│     - Model token limits and batch sizes\n│   Review against: https://docs.example.com/limits\n│ Touch each file to mark it as reviewed.\n└───────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-map-staleness-architecture-map-drift","level":4,"title":"check-map-staleness: Architecture Map Drift","text":"

            What: Checks whether map-tracking.json is older than 30 days and there are commits touching internal/ since the last map refresh. Daily throttle prevents repeated nudges.

            Why: Architecture documentation drifts silently as code evolves. This hook detects structural changes that the map hasn't caught up with and suggests running /ctx-architecture to refresh.

            Output: VERBATIM relay when stale and modules changed, silent otherwise.

            ┌─ Architecture Map Stale ────────────────────────────\n│ ARCHITECTURE.md hasn't been refreshed since 2026-01-15\n│ and there are commits touching 12 modules.\n│ /ctx-architecture keeps architecture docs drift-free.\n│\n│ Want me to run /ctx-architecture to refresh?\n└─────────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#heartbeat-session-heartbeat-webhook","level":4,"title":"heartbeat: Session Heartbeat Webhook","text":"

            What: Fires on every prompt. Sends a webhook notification with prompt count, session ID, context modification status, and token usage telemetry. Never produces stdout.

            Why: Other hooks only send webhooks when they \"speak\" (nudge/relay). When silent, you have no visibility into session activity. The heartbeat provides a continuous session-alive signal with token consumption data for observability dashboards or liveness monitoring.

            Output: None (webhook + event log only).

            Payload:

            {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  }\n}\n

            Token fields (tokens, context_window, usage_pct) are included when usage data is available from the session JSONL file.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tool-time-hooks-pretooluse-posttooluse","level":3,"title":"Tool-Time Hooks (PreToolUse / PostToolUse)","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#block-non-path-ctx-path-enforcement-hard-gate","level":4,"title":"block-non-path-ctx: PATH Enforcement (Hard Gate)","text":"

            What: Blocks any Bash command that invokes ./ctx, ./dist/ctx, go run ./cmd/ctx, or an absolute path to ctx. Only PATH invocations are allowed.

            Why: Enforces CONSTITUTION.md's invocation invariant. Running a dev-built binary in production context causes version confusion and silent behavior drift.

            Output: Block response (prevents the tool call):

            {\"decision\": \"block\", \"reason\": \"Use 'ctx' from PATH, not './ctx'...\"}\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#qa-reminder-pre-commit-qa-gate","level":4,"title":"qa-reminder: Pre-Commit QA Gate","text":"

            What: Fires on every Edit tool use. Reminds the agent to lint and test the entire project before committing.

            Why: Agents tend to \"I'll test later\" and then commit untested code. Repetition is intentional: the hook reinforces the habit on every edit, not just before commits.

            Output: Agent directive with hard QA gate instructions.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#post-commit-context-capture-after-commit","level":4,"title":"post-commit: Context Capture After Commit","text":"

            What: Fires after any git commit (excludes --amend). Prompts the agent to offer context capture (decision? learning?) and suggest running lints/tests before pushing.

            Why: Commits are natural reflection points. The nudge converts mechanical git operations into context-capturing opportunities.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-the-local-event-log","level":2,"title":"Auditing Hooks via the Local Event Log","text":"

            If you don't need an external audit trail, enable the local event log for a self-contained record of hook activity:

            # .ctxrc\nevent_log: true\n

            Once enabled, every hook that fires writes an entry to .context/state/events.jsonl. Query it with ctx hook event:

            ctx hook event                    # last 50 events\nctx hook event --hook qa-reminder # filter by hook\nctx hook event --session <id>     # filter by session\nctx hook event --json | jq '.'    # raw JSONL for processing\n

            The event log is local, queryable, and doesn't require any external service. For a full diagnostic workflow combining event logs with structural health checks, see Troubleshooting.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-webhooks","level":2,"title":"Auditing Hooks via Webhooks","text":"

            The most powerful audit setup pipes all hook output to a webhook, giving you a real-time external record of what your agent is being told.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-1-set-up-the-webhook","level":3,"title":"Step 1: Set Up the Webhook","text":"
            ctx hook notify setup\n# Enter your webhook URL (Slack, Discord, ntfy.sh, IFTTT, etc.)\n

            See Webhook Notifications for service-specific setup.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-2-subscribe-to-relay-events","level":3,"title":"Step 2: Subscribe to relay Events","text":"
            # .ctxrc\nnotify:\n  events:\n    - relay   # all hook output: VERBATIM relays, directives, blocks\n    - nudge   # just the user-facing VERBATIM relays\n

            The relay event fires for every hook that produces output. This includes:

            Hook Event sent check-context-size relay + nudge check-persistence relay + nudge check-ceremonies relay + nudge check-journal relay + nudge check-resources relay + nudge check-knowledge relay + nudge check-version relay + nudge check-reminders relay + nudge check-freshness relay + nudge check-map-staleness relay + nudge heartbeat heartbeat only block-non-path-ctx relay only post-commit relay only qa-reminder relay only","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-3-cross-reference","level":3,"title":"Step 3: Cross-Reference","text":"

            With relay enabled, your webhook receives a JSON payload every time a hook fires:

            {\n  \"event\": \"relay\",\n  \"message\": \"check-persistence: No context updated in 20+ prompts\",\n  \"session_id\": \"b854bd9c\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"my-project\"\n}\n

            This creates an external audit trail independent of the agent. You can now cross-verify: did the agent actually relay the checkpoint the hook told it to relay?

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#verifying-hooks-actually-fire","level":2,"title":"Verifying Hooks Actually Fire","text":"

            Hooks are invisible. An invisible thing that breaks is indistinguishable from an invisible thing that never existed. Three verification methods, from simplest to most robust:

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-1-ask-the-agent","level":3,"title":"Method 1: Ask the Agent","text":"

            The simplest check. After a few prompts into a session:

            \"Did you receive any hook output this session? Print the last\ncontext checkpoint or persistence nudge you saw.\"\n

            The agent should be able to recall recent hook output from its context window. If it says \"I haven't received any hook output\", either:

            • The hooks aren't firing (check installation);
            • The session is too short (hooks throttle early);
            • The hooks fired but the agent absorbed them silently.

            Limitation: You are trusting the agent to report accurately. Agents sometimes confabulate or miss context. Use this as a quick smoke test, not definitive proof.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-2-check-the-webhook-trail","level":3,"title":"Method 2: Check the Webhook Trail","text":"

            If you have relay events enabled, check your webhook receiver. Every hook that fires sends a timestamped notification. No notification = no fire.

            This is the ground truth. The webhook is called directly by the ctx binary, not by the agent. The agent cannot fake, suppress, or modify webhook deliveries.

            Compare what the webhook received against what the agent claims to have relayed. Discrepancies mean the agent is absorbing nudges instead of surfacing them.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-3-read-the-local-logs","level":3,"title":"Method 3: Read the Local Logs","text":"

            Hooks that support logging write to .context/logs/:

            # Check context-size hook activity\ncat .context/logs/check-context-size.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] prompt#1 silent\n# [2026-02-22 09:17:33] [session:b854bd9c] prompt#16 CHECKPOINT\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 CHECKPOINT\n
            # Check persistence nudge activity\ncat .context/logs/check-persistence.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] init count=1 mtime=1770646611\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 NUDGE since_nudge=20\n

            Logs are append-only and written by the ctx binary, not the agent.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#detecting-silent-hook-failures","level":2,"title":"Detecting Silent Hook Failures","text":"

            The hardest failure mode: hooks that stop firing without error. The plugin config changes, a binary update drops a hook, or a PATH issue silently breaks execution. Nothing errors: The hook just never runs.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-staleness-signal","level":3,"title":"The Staleness Signal","text":"

            If .context/logs/check-context-size.log has no entries newer than 5 days but you've been running sessions daily, something is wrong. The absence of evidence is evidence of absence: but only if you control for inactivity.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#false-positive-protection","level":3,"title":"False Positive Protection","text":"

            A naive \"hooks haven't fired in N days\" alert fires incorrectly when you simply haven't used ctx. The correct check needs two inputs:

            1. Last hook fire time: from .context/logs/ or webhook history
            2. Last session activity: from journal entries or ctx journal source

            If sessions are happening but hooks aren't firing, that's a real problem. If neither sessions nor hooks are happening, that's a vacation.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-to-check","level":3,"title":"What to Check","text":"

            When you suspect hooks aren't firing:

            # 1. Verify the plugin is installed\nls ~/.claude/plugins/\n\n# 2. Check hook registration\ncat ~/.claude/plugins/ctx/hooks.json | head -20\n\n# 3. Run a hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-context-size\n\n# 4. Check for PATH issues\nwhich ctx\nctx --version\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tips","level":2,"title":"Tips","text":"
            • Start with nudge, graduate to relay: The nudge event covers user-facing VERBATIM relays. Add relay when you want full visibility into agent directives and hard gates.
            • Webhooks are your trust anchor: The agent can ignore a nudge, but it can't suppress the webhook. If the webhook fired and the agent didn't relay, you have proof of a compliance gap.
            • Hooks are throttled by design: Most check hooks fire once per day or use adaptive frequency. Don't expect a notification every prompt: Silence usually means the throttle is working, not that the hook is broken.
            • Daily markers live in .context/state/: Throttle files are stored in .context/state/ alongside other project-scoped state. If you need to force a hook to re-fire during testing, delete the corresponding marker file.
            • The QA reminder is intentionally noisy: Unlike other hooks, qa-reminder fires on every Edit call with no throttle. This is deliberate: The commit quality degrades when the reminder fades from salience.
            • Log files are safe to commit: .context/logs/ contains only timestamps, session IDs, and status keywords. No secrets, no code.
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#next-up","level":2,"title":"Next Up","text":"

            Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#see-also","level":2,"title":"See Also","text":"
            • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
            • Customizing Hook Messages: override what hooks say without changing what they do
            • Webhook Notifications: setting up and configuring the webhook system
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Detecting and Fixing Drift: structural checks that complement runtime hook auditing
            • CLI Reference: full ctx system command reference
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/task-management/","level":1,"title":"Tracking Work Across Sessions","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-problem","level":2,"title":"The Problem","text":"

            You have work that spans multiple sessions. Tasks get added during one session, partially finished in another, and completed days later.

            Without a system, follow-up items fall through the cracks, priorities drift, and you lose track of what was done versus what still needs doing. TASKS.md grows cluttered with completed checkboxes that obscure the remaining work.

            How do you manage work items that span multiple sessions without losing context?

            Prefer Skills over Raw Commands

            When working with an AI agent, use /ctx-task-add instead of raw ctx add task. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tldr","level":2,"title":"TL;DR","text":"

            Manage Tasks:

            ctx add task \"Fix race condition\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add\nctx add task \"Write tests\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add to phase\nctx task complete \"race condition\"                      # mark done\nctx task snapshot \"before-refactor\"               # backup\nctx task archive                                  # clean up\n

            Pick Up the Next Task:

            /ctx-next # pick what's next\n

            Read on for the full workflow and conversational patterns.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add task Command Add a new task to TASKS.md ctx task complete Command Mark a task as done by number or text ctx task snapshot Command Create a point-in-time backup of TASKS.md ctx task archive Command Move completed tasks to archive file /ctx-task-add Skill AI-assisted task creation with validation /ctx-archive Skill AI-guided archival with safety checks /ctx-next Skill Pick what to work on based on priorities","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-1-add-tasks-with-priorities","level":3,"title":"Step 1: Add Tasks with Priorities","text":"

            Every piece of follow-up work gets a task. Use ctx add task from the terminal or /ctx-task-add from your AI assistant. Tasks should start with a verb and be specific enough that someone unfamiliar with the session could act on them.

            # High-priority bug found during code review\nctx add task \"Fix race condition in session cooldown\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Medium-priority feature work\nctx add task \"Add --format json flag to ctx status for CI integration\" --priority medium \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Low-priority cleanup\nctx add task \"Remove deprecated --raw flag from ctx load\" --priority low \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            The /ctx-task-add skill validates your task before recording it. It checks that the description is actionable, not a duplicate, and specific enough for someone else to pick up.

            If you say \"fix the bug,\" it will ask you to clarify which bug and where.

            Tasks Are Often Created Proactively

            In practice, many tasks are created proactively by the agent rather than by explicit CLI commands.

            After completing a feature, the agent will often identify follow-up work: tests, docs, edge cases, error handling, and offer to add them as tasks.

            You do not need to dictate ctx add task commands; the agent picks up on work context and suggests tasks naturally.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-2-organize-with-phase-sections","level":3,"title":"Step 2: Organize with Phase Sections","text":"

            Tasks live in phase sections inside TASKS.md.

            Phases provide logical groupings that preserve order and enable replay.

            A task does not move between sections. It stays in its phase permanently, and status is tracked via checkboxes and inline tags.

            ## Phase 1: Core CLI\n\n- [x] Implement ctx add command\n- [x] Implement ctx task complete command\n- [ ] Add --section flag to ctx add task `#priority:medium`\n\n## Phase 2: AI Integration\n\n- [ ] Implement ctx agent cooldown `#priority:high` `#in-progress`\n- [ ] Add ctx watch XML parsing `#priority:medium`\n  - Blocked by: Need to finalize agent output format\n\n## Backlog\n\n- [ ] Performance optimization for large TASKS.md files `#priority:low`\n- [ ] Add metrics dashboard to ctx status `#priority:deferred`\n

            Use --section when adding a task to a specific phase:

            ctx add task \"Add ctx watch XML parsing\" --priority medium --section \\\n    \"Phase 2: AI Integration\" \\\n    --session-id abc12345 --branch main --commit 68fbc00a\n

            Without --section, the task is inserted before the first unchecked task in TASKS.md.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

            At the start of a session, or after finishing a task, use /ctx-next to get prioritized recommendations.

            The skill reads TASKS.md, checks recent sessions, and ranks candidates using explicit priority, blocking status, in-progress state, momentum from recent work, and phase order.

            You can also ask naturally: \"what should we work on?\" or \"what's the highest priority right now?\"

            /ctx-next\n

            The output looks like this:

            **1. Implement ctx agent cooldown** `#priority:high`\n\n    Still in-progress from yesterday's session. The tombstone file approach is\n    half-built. Finishing is cheaper than context-switching.\n\n**2. Add --section flag to ctx add task** `#priority:medium`\n\n    Last Phase 1 item. Quick win that unblocks organized task entry.\n\n---\n\n*Based on 8 pending tasks across 3 phases.\n\nLast session: agent-cooldown (2026-02-06).*\n

            In-progress tasks almost always come first:

            Finishing existing work takes priority over starting new work.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-4-complete-tasks","level":3,"title":"Step 4: Complete Tasks","text":"

            When a task is done, mark it complete by number or partial text match:

            # By task number (as shown in TASKS.md)\nctx task complete 3\n\n# By partial text match\nctx task complete \"agent cooldown\"\n

            The task's checkbox changes from [ ] to [x]. Tasks are never deleted: they stay in their phase section so history is preserved.

            Be Conversational

            You rarely need to run ctx task complete yourself during an interactive session.

            When you say something like \"the rate limiter is done\" or \"we finished that,\" the agent marks the task complete and moves on to suggesting what is next.

            The CLI commands are most useful for manual housekeeping, scripted workflows, or when you want precision.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-5-snapshot-before-risky-changes","level":3,"title":"Step 5: Snapshot Before Risky Changes","text":"

            Before a major refactor or any change that might break things, snapshot your current task state. This creates a copy of TASKS.md in .context/archive/ without modifying the original.

            # Default snapshot\nctx task snapshot\n\n# Named snapshot (recommended before big changes)\nctx task snapshot \"before-refactor\"\n

            This creates a file like .context/archive/tasks-before-refactor-2026-02-08-1430.md. If the refactor goes sideways, and you need to confirm what the task state looked like before you started, the snapshot is there.

            Snapshots are cheap: Take them before any change you might want to undo or review later.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-6-archive-when-tasksmd-gets-cluttered","level":3,"title":"Step 6: Archive When TASKS.md Gets Cluttered","text":"

            After several sessions, TASKS.md accumulates completed tasks that make it hard to see what is still pending.

            Use ctx task archive to move all [x] items to a timestamped archive file.

            Start with a dry run to preview what will be moved:

            ctx task archive --dry-run\n

            Then archive:

            ctx task archive\n

            Completed tasks move to .context/archive/tasks-2026-02-08.md. Phase headers are preserved in the archive for traceability. Pending tasks ([ ]) remain in TASKS.md.

            The /ctx-archive skill adds two safety checks before archiving: it verifies that completed tasks are genuinely cluttering the view and that nothing was marked [x] prematurely.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#letting-the-agent-drive","level":2,"title":"Letting the Agent Drive","text":"

            The steps above show the CLI commands for task management.

            In practice, most task management happens conversationally:

            An agent that has loaded the context files does not need you to type ctx add task. It tracks work naturally and offers the right operations.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#use-natural-language-instead-of-commands","level":3,"title":"Use Natural Language Instead of Commands","text":"

            These conversational prompts replace explicit commands during interactive sessions:

            Instead of typing... Just say... ctx add task \"Write tests for X\" --session-id ID --branch BR --commit HASH \"We should add tests for this: track that?\" /ctx-next \"What should we work on?\" ctx task complete \"rate limiting\" \"The rate limiter is done, what's next?\" ctx task archive \"TASKS.md is getting long, can you clean it up?\" ctx add task ... --session-id ID --branch BR --commit HASH && ctx add task ... \"Add follow-ups for what we just built.\"

            The agent translates these into the right ctx operations behind the scenes.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-creates-tasks-proactively","level":3,"title":"The Agent Creates Tasks Proactively","text":"

            After completing a piece of work, the agent does not wait for you to dictate follow-up items. It identifies them from the work context and offers:

            **Agent**: \"I've finished implementing the rate limiter middleware. I noticed\nthree follow-up items:\n\n1. Write integration tests for the rate limiter (the unit tests pass but\n   there's no end-to-end coverage)\n2. Add rate limit headers to API responses (X-RateLimit-Remaining, etc.)\n3. Document the rate limit configuration in the ops runbook\n\nWant me to add these as tasks?\"\n

            This is how an agent behaves once it has read the Agent Playbook.

            The playbook's \"Work, Reflect, Persist\" cycle means the agent asks itself after every milestone: Did I just complete a task? Did new work emerge?

            When the answer is yes, it acts.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#asking-the-agent-what-to-capture","level":3,"title":"Asking the Agent What to Capture","text":"

            You don't always know what's worth persisting.

            Instead of reviewing the session yourself and typing specific commands, ask the agent to do the thinking:

            You say What happens \"What tasks came out of this session?\" Agent reviews the work, identifies follow-ups, and offers to add them \"Are there any tasks, learnings, or decisions worth adding?\" Agent scans the full session for all three types and proposes a batch \"What should we remember from today?\" Agent surfaces everything persist-worthy: not just tasks \"Anything we should track before we move on?\" Agent checks for loose ends specific to the work just completed

            This is especially powerful after long or exploratory sessions where multiple threads were touched.

            The agent has the full conversation in context and can spot follow-ups you might miss:

            You:   \"Are there any tasks, learnings, or decisions worth adding\n        from this session?\"\n\nAgent: \"Yes. Here's what I'd suggest:\n\n  Tasks:\n  - Add retry logic to the webhook delivery (we discussed it when\n    the timeout test failed but never tracked it)\n  - Update the API docs to reflect the new rate limit headers\n\n  Learnings:\n  - net/http's default client has no timeout: we burned 15 minutes\n    on that before adding an explicit 30s timeout\n\n  Decisions:\n  - We chose token-bucket over sliding-window for rate limiting\n    because it handles bursts better and the library is simpler\n\n  Want me to add all of these?\"\n\nYou:   \"All of them.\"\n

            The agent fills in structured fields from what it observed during the session. You get the same quality entries as if you typed verbose commands, without needing to remember what happened or how to format it.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-completes-tasks-and-moves-on","level":3,"title":"The Agent Completes Tasks and Moves On","text":"

            When you confirm that work is finished, or when the agent can see that it is, it marks the task done and suggests the next step:

            You: \n\"The cooldown logic is working now.\"\n\nAgent: \n\"Marked 'Implement ctx agent cooldown' as done. Your highest-\npriority remaining task is 'Add --section flag to ctx add task'.\n\nIt's the last item in Phase 1 and a quick win. Want to start on that?\"\n

            The agent chains completion into next-task selection.

            One sentence from you triggers two operations.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#end-of-session-capture","level":3,"title":"End-of-Session Capture","text":"

            At the end of a session, the agent reviews what happened and catches loose ends:

            You: \n\"I think we're good for today.\"\n\nAgent: \n\"Before we wrap up: during the middleware work you mentioned\nwanting to add metrics for rate limit hits. That never made it into TASKS.md.\n\nWant me to add it as a task so it's there next session?\"\n

            This is the proactive persistence model at work.

            The agent monitors the conversation for mentioned-but-not-tracked work and surfaces it before context is lost.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#a-conversational-session-example","level":3,"title":"A Conversational Session Example","text":"

            Here is what task management looks like in a real session when the agent is driving:

            You:   What's left to do?\nAgent: You have 5 pending tasks across 2 phases. The highest priority is\n       \"Implement ctx agent cooldown\": it's marked in-progress from\n       yesterday and half-built. Want to pick that up?\n\nYou:   Yeah, let's finish it.\n       [... 20 minutes of implementation work ...]\n\nYou:   That's working. Tests pass.\nAgent: Nice. I'll mark \"Implement ctx agent cooldown\" as done.\n       I noticed two things from the implementation:\n       1. The tombstone file cleanup should happen on a timer, not just\n          at startup. Want me to add that as a task?\n       2. We learned that file locks don't work reliably on NFS. I'll\n          add that to LEARNINGS.md.\n\nYou:   Yes to both. What's next?\nAgent: Added the cleanup task as high priority. Next up is \"Add --section\n       flag to ctx add task\": last item in Phase 1. It should be\n       straightforward since the flag parsing pattern already exists\n       in the codebase. Ready?\n

            It's All Conversational

            Notice what did not happen: The user never typed a ctx command.

            The agent handled task completion, follow-up creation, learning capture, and next-task selection: all from natural conversation.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"
            # Add a task\nctx add task \"Implement rate limiting for API endpoints\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to a specific phase\nctx add task \"Write integration tests for rate limiter\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# See what to work on\n# (from AI assistant) /ctx-next\n\n# Mark done by text\nctx task complete \"rate limiting\"\n\n# Mark done by number\nctx task complete 5\n\n# Snapshot before a risky refactor\nctx task snapshot \"before-middleware-rewrite\"\n\n# Archive completed tasks when the list gets long\nctx task archive --dry-run     # preview first\nctx task archive               # then archive\n
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tips","level":2,"title":"Tips","text":"
            • Start tasks with a verb: \"Add,\" \"Fix,\" \"Implement,\" \"Investigate\": not just a topic like \"Authentication.\"
            • Include the why in the task description. Future sessions lack the context of why you added the task. \"Add rate limiting\" is worse than \"Add rate limiting to prevent abuse on the public API after the load test showed 10x traffic spikes.\"
            • Use #in-progress sparingly. Only one or two tasks should carry this tag at a time. If everything is in-progress, nothing is.
            • Snapshot before, not after. The point of a snapshot is to capture the state before a change, not to celebrate what you just finished.
            • Archive regularly. Once completed tasks outnumber pending ones, it is time to archive. A clean TASKS.md helps both you and your AI assistant focus.
            • Never delete tasks. Mark them [x] (completed) or [-] (skipped with a reason). Deletion breaks the audit trail.
            • Trust the agent's task instincts. When the agent suggests follow-up items after completing work, it is drawing on the full context of what just happened.
            • Conversational prompts beat commands in interactive sessions. Saying \"what should we work on?\" is faster and more natural than running /ctx-next. Save explicit commands for scripts, CI, and unattended runs.
            • Let the agent chain operations. A single statement like \"that's done, what's next?\" can trigger completion, follow-up identification, and next-task selection in one flow.
            • Review proactive task suggestions before moving on. The best follow-ups come from items spotted in-context right after the work completes.
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#next-up","level":2,"title":"Next Up","text":"

            Using the Scratchpad →: Store short-lived sensitive notes in an encrypted scratchpad.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle including task management in context
            • Persisting Decisions, Learnings, and Conventions: capturing the \"why\" behind your work
            • Detecting and Fixing Drift: keeping TASKS.md accurate over time
            • CLI Reference: full documentation for ctx add, ctx task complete, ctx task
            • Context Files: TASKS.md: format and conventions for TASKS.md
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/triggers/","level":1,"title":"Authoring Lifecycle Triggers","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#authoring-lifecycle-triggers","level":1,"title":"Authoring Lifecycle Triggers","text":"

            Triggers are executable shell scripts that fire at specific events during an AI session. They're how you express \"when the AI saves a file, also do X\" or \"before the AI edits this path, check Y first.\" This recipe walks through writing your first trigger, testing it, and enabling it safely.

            Triggers Execute Arbitrary Code

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks:

            • Only enable scripts you have read and understand.
            • Never enable a trigger you downloaded from the internet without reviewing every line.
            • Avoid shelling out to user-controlled values (jq -r output, path field, tool field) without quoting.
            • A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            The generated trigger template starts disabled (no executable bit) so you cannot accidentally run an unreviewed script. Enable it explicitly with ctx trigger enable.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#scenario","level":2,"title":"Scenario","text":"

            You want a pre-tool-use trigger that blocks the AI from editing anything in internal/crypto/ without explicit confirmation. Cryptographic code is sensitive, and accidental edits have caused outages before, and you want a hard gate.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-1-scaffold-the-script","level":2,"title":"Step 1: Scaffold the Script","text":"
            ctx trigger add pre-tool-use protect-crypto\n

            That creates .context/hooks/pre-tool-use/protect-crypto.sh with a template:

            #!/usr/bin/env bash\nset -euo pipefail\n\n# Read the JSON event from stdin.\npayload=$(cat)\n\n# Parse fields with jq.\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Your logic here.\n\n# Return a JSON result. action can be \"allow\", \"block\", or absent.\necho '{\"action\": \"allow\"}'\n

            Note: the directory is .context/hooks/pre-tool-use/; the on-disk layout still uses hooks/ even though the command is ctx trigger. If you ls .context/hooks/, that's where your triggers live.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-2-write-the-logic","level":2,"title":"Step 2: Write the Logic","text":"

            Open the file and replace the template body:

            #!/usr/bin/env bash\nset -euo pipefail\n\npayload=$(cat)\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Only gate write-family tools.\ncase \"$tool\" in\n  write_file|edit_file|apply_patch) ;;\n  *)\n    echo '{\"action\": \"allow\"}'\n    exit 0\n    ;;\nesac\n\n# Block any path under internal/crypto/.\ncase \"$path\" in\n  internal/crypto/*|*/internal/crypto/*)\n    jq -n --arg p \"$path\" '{\n      action: \"block\",\n      message: (\"Edits to \" + $p + \" require manual review. \" +\n                \"See CONVENTIONS.md for the crypto-change process.\")\n    }'\n    exit 0\n    ;;\nesac\n\necho '{\"action\": \"allow\"}'\n

            A few things to note:

            • set -euo pipefail: any unhandled error aborts the script. Critical for a security-relevant trigger.
            • Quote everything from jq: the path field comes from the AI tool; treat it as untrusted input.
            • Explicit allow case: the default is allow. An empty or missing response is a risky default.
            • Use jq -n --arg for output construction, as it is safer than string concatenation when the message may contain special characters.
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-3-test-with-a-mock-payload","level":2,"title":"Step 3: Test with a Mock Payload","text":"

            Before enabling the trigger, test it with a realistic mock input using ctx trigger test. This runs the script against a synthetic JSON payload without actually firing any AI tool.

            # Test the \"should block\" case\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\n

            Expected: the trigger returns {\"action\":\"block\", \"message\": \"...\"}.

            # Test the \"should allow\" case\nctx trigger test pre-tool-use --tool write_file --path internal/memory/mirror.go\n

            Expected: the trigger returns {\"action\":\"allow\"}.

            # Test that non-write tools pass through\nctx trigger test pre-tool-use --tool read_file --path internal/crypto/aes.go\n

            Expected: {\"action\":\"allow\"} because the case statement only gates write-family tools.

            If any of these cases misbehave, fix the trigger before enabling it. The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-4-enable-it","level":2,"title":"Step 4: Enable It","text":"

            Once the test cases pass, enable the trigger:

            ctx trigger enable protect-crypto\n

            That sets the executable bit. Next time the AI starts a pre-tool-use event, the trigger will fire.

            Verify it's enabled:

            ctx trigger list\n

            Should show protect-crypto under pre-tool-use with an enabled indicator.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-5-iterate-safely","level":2,"title":"Step 5: Iterate Safely","text":"

            If you discover a bug after enabling, disable first, fix second:

            ctx trigger disable protect-crypto\n# ...edit the script...\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\nctx trigger enable protect-crypto\n

            Disabling simply clears the executable bit; the script stays on disk, and ctx trigger enable re-enables it without rewriting anything.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#patterns-worth-copying","level":2,"title":"Patterns Worth Copying","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#logging-not-blocking","level":3,"title":"Logging, Not Blocking","text":"

            For auditing or analytics, return {\"action\":\"allow\"} always and append to a log as a side effect:

            #!/usr/bin/env bash\nset -euo pipefail\npayload=$(cat)\necho \"$payload\" >> .context/logs/tool-use.jsonl\necho '{\"action\":\"allow\"}'\n
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#context-injection-at-session-start","level":3,"title":"Context Injection at Session Start","text":"

            A session-start trigger can prepend text to the agent's initial prompt by emitting {\"action\":\"inject\", \"content\": \"...\"} . This is useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#chaining-triggers-of-the-same-type","level":3,"title":"Chaining Triggers of the Same Type","text":"

            Multiple scripts in the same type directory all run. If any returns action: block, the block wins. Keep individual triggers single-purpose and rely on composition.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            Forgetting the shebang. Without #!/usr/bin/env bash, the trigger won't execute even with the executable bit set.

            Not quoting $path. If you use $path in a command substitution or a case glob without quoting, a file name with spaces or metacharacters will break the trigger in surprising ways.

            Enabling before testing. ctx trigger enable makes the script live immediately. Always ctx trigger test first.

            Outputting non-JSON. The trigger's stdout must be valid JSON or ctx's trigger runner will log a parse error. Use jq -n to construct output rather than hand-writing JSON strings.

            Mixing hook and trigger vocabulary. The command is ctx trigger but the on-disk directory is .context/hooks/. The feature was renamed; the directory name lags behind. Don't let this confuse you; they refer to the same thing.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#see-also","level":2,"title":"See Also","text":"
            • ctx trigger reference: full command, flag, and event-type reference.
            • ctx steering: persistent rules, not scripts. Use steering when the thing you want is \"tell the AI to always do X\" rather than \"run a script when Y happens.\"
            • Writing steering files: the rule-based equivalent of this recipe.
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/troubleshooting/","level":1,"title":"Troubleshooting","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-problem","level":2,"title":"The Problem","text":"

            Something isn't working: a hook isn't firing, nudges are too noisy, context seems stale, or the agent isn't following instructions. The information to diagnose it exists (across status, drift, event logs, hook config, and session history), but assembling it manually is tedious.

            How do you figure out what's wrong and fix it?

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tldr","level":2,"title":"TL;DR","text":"
            ctx doctor                   # structural health check\nctx hook event --last 20  # recent hook activity\n# or ask: \"something seems off, can you diagnose?\"\n
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx doctor CLI command Structural health report ctx doctor --json CLI command Machine-readable health report ctx hook event CLI command Query local event log /ctx-doctor Skill Agent-driven diagnosis with analysis","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#quick-check-ctx-doctor","level":3,"title":"Quick Check: ctx doctor","text":"

            Run ctx doctor for an instant structural health report. It checks context initialization, required files, drift, hook configuration, event logging, webhooks, reminders, task completion ratio, and context token size: all in one pass:

            ctx doctor\n
            ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

            Warnings are non-critical but worth fixing. Errors need attention. Informational notes (○) flag optional features that aren't enabled.

            For scripting:

            ctx doctor --json | jq '.warnings'\n
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#deep-dive-ctx-doctor","level":3,"title":"Deep Dive: /ctx-doctor","text":"

            When you need the agent to reason about what's wrong, use the skill. Ask naturally or invoke directly:

            Why didn't my hook fire?\nSomething seems off, can you diagnose?\n/ctx-doctor\n

            The agent follows a triage sequence:

            1. Baseline: runs ctx doctor --json for structural health
            2. Events: runs ctx hook event --json --last 100 (if event logging enabled)
            3. Correlate: connects findings across both sources
            4. Present: structured findings with evidence
            5. Suggest: actionable next steps (but doesn't auto-fix)

            The skill degrades gracefully: without event logging enabled, it still runs structural checks and notes what you'd gain by enabling it.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#raw-event-inspection","level":3,"title":"Raw Event Inspection","text":"

            For power users: ctx hook event with filters gives direct access to the event log.

            # Last 50 events (default)\nctx hook event\n\n# Events from a specific session\nctx hook event --session eb1dc9cd-0163-4853-89d0-785fbfaae3a6\n\n# Only QA reminder events\nctx hook event --hook qa-reminder\n\n# Raw JSONL for jq processing\nctx hook event --json | jq '.message'\n\n# Include rotated (older) events\nctx hook event --all --last 100\n

            Filters use AND logic: --hook qa-reminder --session abc123 returns only QA reminder events from that specific session.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#common-problems","level":2,"title":"Common Problems","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#ctx-not-initialized","level":3,"title":"\"ctx: Not Initialized\"","text":"

            Symptoms: Any ctx command fails with ctx: not initialized - run \"ctx init\" first.

            Cause: You're running ctx in a directory without an initialized .context/ directory. This guard runs on all user-facing commands to prevent confusing downstream errors.

            Fix:

            ctx init          # create .context/ with template files\nctx init --minimal  # or just the essentials (CONSTITUTION, TASKS, DECISIONS)\n

            Commands that work without initialization: ctx init, ctx setup, ctx doctor, and help-only grouping commands (ctx, ctx system).

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#my-hook-isnt-firing","level":3,"title":"\"My Hook Isn't Firing\"","text":"

            Symptoms: No nudges appearing, webhook silent, event log shows no entries for the expected hook.

            Diagnosis:

            # 1. Check if ctx is installed and on PATH\nwhich ctx && ctx --version\n\n# 2. Check if the hook is registered\ngrep \"check-persistence\" ~/.claude/plugins/ctx/hooks.json\n\n# 3. Run the hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-persistence\n\n# 4. Check event log for the hook (if enabled)\nctx hook event --hook check-persistence\n

            Common causes:

            • Plugin is not installed: run ctx init --claude to reinstall
            • PATH issue: the hook invokes ctx from PATH; ensure it resolves
            • Throttle active: most hooks fire once per day: check .context/state/ for daily marker files
            • Hook silenced: a custom message override may be an empty file: check ctx hook message list for overrides
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#too-many-nudges","level":3,"title":"\"Too Many Nudges\"","text":"

            Symptoms: The agent is overwhelmed with hook output. Context checkpoints, persistence reminders, and QA gates fire constantly.

            Diagnosis:

            # Check how often hooks fired recently\nctx hook event --last 50\n\n# Count fires per hook\nctx hook event --json | jq -r '.detail.hook // \"unknown\"' \\\n  | sort | uniq -c | sort -rn\n

            Common causes:

            • QA reminder is noisy by design: it fires on every Edit call with no throttle. This is intentional. If it's too much, silence it with an empty override: ctx hook message edit qa-reminder gate, then empty the file
            • Long session: context checkpoint fires with increasing frequency after prompt 15. This is the system telling you the session is getting long: consider wrapping up
            • Short throttle window: if you deleted marker files in .context/state/, daily-throttled hooks will re-fire
            • Outdated Claude Code plugin: Update the plugin using Claude Code → /plugin → \"Marketplace\"
            • ctx version mismatch: Build (or download) and install the latest ctx vesion.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#context-seems-stale","level":3,"title":"\"Context Seems Stale\"","text":"

            Symptoms: The agent references outdated information, paths that don't exist, or decisions that were reversed.

            Diagnosis:

            # Structural drift check\nctx drift\n\n# Full doctor check (includes drift + more)\nctx doctor\n\n# Check when context files were last modified\nctx status --verbose\n

            Common causes:

            • Drift accumulated: stale path references in ARCHITECTURE.md or CONVENTIONS.md. Fix with ctx drift --fix or ask the agent to clean up.
            • Task backlog: too many completed tasks diluting active context. Archive with ctx task archive or ctx compact --archive.
            • Large context files: LEARNINGS.md with 40+ entries competes for attention. Consolidate with /ctx-consolidate.
            • Missing session ceremonies: if /ctx-remember and /ctx-wrap-up aren't being used, context doesn't get refreshed. See Session Ceremonies.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-agent-isnt-following-instructions","level":3,"title":"\"The Agent Isn't Following Instructions\"","text":"

            Symptoms: The agent ignores conventions, forgets decisions, or acts contrary to CONSTITUTION.md rules.

            Diagnosis:

            # Check context token size: Is it too large for the model?\nctx doctor --json | jq '.results[] | select(.name == \"context_size\")'\n\n# Check if context is actually being loaded\nctx hook event --hook context-load-gate\n

            Common causes:

            • Context too large: if total tokens exceed the model's effective attention, instructions get diluted. Check ctx doctor for the size check. Compact with ctx compact --archive.
            • Context not loading: if context-load-gate hasn't fired, the agent may not have received context. Verify the hook is registered.
            • Conflicting instructions: CONVENTIONS.md says one thing, AGENT_PLAYBOOK.md says another. Review both files for consistency.
            • Agent drift: the agent's behavior diverges from instructions over long sessions. This is normal. Use /ctx-reflect to re-anchor, or start a new session.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#prerequisites","level":2,"title":"Prerequisites","text":"
            • Event logging (optional but recommended): event_log: true in .ctxrc
            • ctx initialized: ctx init

            Event logging is not required for ctx doctor or /ctx-doctor to work. Both degrade gracefully: structural checks run regardless, and the skill notes when event data is unavailable.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tips","level":2,"title":"Tips","text":"
            • Start with ctx doctor: It's the fastest way to get a comprehensive health picture. Save event log inspection for when you need to understand when and how often something happened.
            • Enable event logging early: The log is opt-in and low-cost (~250 bytes per event, 1MB rotation cap). Enable it before you need it: Diagnosing a problem without historical data is much harder.
            • Use the skill for correlation: ctx doctor tells you what is wrong. /ctx-doctor tells you why by correlating structural findings with event patterns. The agent can spot connections that individual commands miss.
            • Event log is gitignored: It's machine-local diagnostic data, not project context. Different machines produce different event streams.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#next-up","level":2,"title":"Next Up","text":"

            Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#see-also","level":2,"title":"See Also","text":"
            • Auditing System Hooks: the complete hook catalog and webhook-based audit trails
            • Detecting and Fixing Drift: structural and semantic drift detection and repair
            • Webhook Notifications: push notifications for hook activity
            • ctx doctor CLI: full command reference
            • ctx hook event CLI: event log query reference
            • /ctx-doctor skill: agent-driven diagnosis
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/webhook-notifications/","level":1,"title":"Webhook Notifications","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-problem","level":2,"title":"The Problem","text":"

            Your agent runs autonomously (loops, implements, releases) while you are away from the terminal. You have no way to know when it finishes, hits a limit, or when a hook fires a nudge.

            How do you get notified about agent activity without watching the terminal?

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tldr","level":2,"title":"TL;DR","text":"
            ctx hook notify setup  # configure webhook URL (encrypted)\nctx hook notify test   # verify delivery\n# Hooks auto-notify on: session-end, loop-iteration, resource-danger\n
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx hook notify setup CLI command Configure and encrypt webhook URL ctx hook notify test CLI command Send a test notification ctx hook notify --event <name> \"msg\" CLI command Send a notification from scripts/skills .ctxrc notify.events Configuration Filter which events reach your webhook","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-1-get-a-webhook-url","level":3,"title":"Step 1: Get a Webhook URL","text":"

            Any service that accepts HTTP POST with JSON works. Common options:

            Service How to get a URL IFTTT Create an applet with the \"Webhooks\" trigger Slack Create an Incoming Webhook Discord Channel Settings > Integrations > Webhooks ntfy.sh Use https://ntfy.sh/your-topic (no signup) Pushover Use API endpoint with your user key

            The URL contains auth tokens. ctx encrypts it; it never appears in plaintext in your repo.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-2-configure-the-webhook","level":3,"title":"Step 2: Configure the Webhook","text":"
            ctx hook notify setup\n# Enter webhook URL: https://maker.ifttt.com/trigger/ctx/json/with/key/YOUR_KEY\n# Webhook configured: https://maker.ifttt.com/***\n# Encrypted at: .context/.notify.enc\n

            This encrypts the URL with AES-256-GCM using the same key as the scratchpad (~/.ctx/.ctx.key). The encrypted file (.context/.notify.enc) is safe to commit. The key lives outside the project and is never committed.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-3-test-it","level":3,"title":"Step 3: Test It","text":"
            ctx hook notify test\n# Webhook responded: HTTP 200 OK\n

            If you see No webhook configured, run ctx hook notify setup first.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-4-configure-events","level":3,"title":"Step 4: Configure Events","text":"

            Notifications are opt-in: no events are sent unless you configure an event list in .ctxrc:

            # .ctxrc\nnotify:\n  events:\n    - loop       # loop completion or max-iteration hit\n    - nudge      # VERBATIM relay hooks (context checkpoint, persistence, etc.)\n    - relay      # all hook output (verbose, for debugging)\n    - heartbeat  # every-prompt session-alive signal with metadata\n

            Only listed events fire. Omitting an event silently drops it.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-5-use-in-your-own-skills","level":3,"title":"Step 5: Use in Your Own Skills","text":"

            Add ctx hook notify calls to any skill or script:

            # In a release skill\nctx hook notify --event release \"v1.2.0 released successfully\" 2>/dev/null || true\n\n# In a backup script\nctx hook notify --event backup \"Nightly backup completed\" 2>/dev/null || true\n

            The 2>/dev/null || true suffix ensures the notification never breaks your script: If there's no webhook or the HTTP call fails, it's a silent noop.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-types","level":2,"title":"Event Types","text":"

            ctx fires these events automatically:

            Event Source When loop Loop script Loop completes or hits max iterations nudge System hooks VERBATIM relay nudge is emitted (context checkpoint, persistence, ceremonies, journal, resources, knowledge, version) relay System hooks Any hook output (VERBATIM relays, agent directives, block responses) heartbeat System hook Every prompt: session-alive signal with prompt count and context modification status test ctx hook notify test Manual test notification (custom) Your skills You wire ctx hook notify --event <name> in your own scripts

            nudge vs relay: The nudge event fires only for VERBATIM relay hooks (the ones the agent is instructed to show verbatim). The relay event fires for all hook output: VERBATIM relays, agent directives, and hard gates. Subscribe to relay for debugging (\"did the agent get the post-commit nudge?\"), nudge for user-facing assurance (\"was the checkpoint emitted?\").

            Webhooks as a Hook Audit Trail

            Subscribe to relay events and you get an external record of every hook that fires, independent of the agent.

            This lets you verify hooks are running and catch cases where the agent absorbs a nudge instead of surfacing it.

            See Auditing System Hooks for the full workflow.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#payload-format","level":2,"title":"Payload Format","text":"

            Every notification sends a JSON POST:

            {\n  \"event\": \"nudge\",\n  \"message\": \"check-context-size: Context window at 82%\",\n  \"detail\": {\n    \"hook\": \"check-context-size\",\n    \"variant\": \"window\",\n    \"variables\": {\"Percentage\": 82, \"TokenCount\": \"164k\"}\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n

            The detail field is a structured template reference containing the hook name, variant, and any template variables. This lets receivers filter by hook or variant without parsing rendered text. The field is omitted when no template reference applies (e.g. custom ctx hook notify calls).

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#heartbeat-payload","level":3,"title":"Heartbeat Payload","text":"

            The heartbeat event fires on every prompt with session metadata and token usage telemetry:

            {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc123-...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-28T10:15:00Z\",\n  \"project\": \"ctx\"\n}\n

            The tokens, context_window, and usage_pct fields are included when token data is available from the session JSONL file. They are omitted when no usage data has been recorded yet (e.g. first prompt).

            Unlike other events, heartbeat fires every prompt (not throttled). Use it for observability dashboards or liveness monitoring of long-running sessions.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#security-model","level":2,"title":"Security Model","text":"Component Location Committed? Permissions Encryption key ~/.ctx/.ctx.key No (user-level) 0600 Encrypted URL .context/.notify.enc Yes (safe) 0600 Webhook URL Never on disk in plaintext N/A N/A

            The key is shared with the scratchpad. If you rotate the encryption key, re-run ctx hook notify setup to re-encrypt the webhook URL with the new key.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#key-rotation","level":2,"title":"Key Rotation","text":"

            ctx checks the age of the encryption key once per day. If it's older than 90 days (configurable via key_rotation_days), a VERBATIM nudge is emitted suggesting rotation.

            # .ctxrc\nkey_rotation_days: 30   # nudge sooner (default: 90)\n
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#worktrees","level":2,"title":"Worktrees","text":"

            The webhook URL is encrypted with the same encryption key (~/.ctx/.ctx.key). Because the key lives at the user level, it is shared across all worktrees on the same machine - notifications work in worktrees automatically.

            This means agents running in worktrees cannot send webhook alerts. For autonomous runs where worktree agents are opaque, monitor them from the terminal rather than relying on webhooks. Enrich journals and review results on the main branch after merging.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-log-the-local-complement","level":2,"title":"Event Log: The Local Complement","text":"

            Don't need a webhook but want diagnostic visibility? Enable event_log: true in .ctxrc. The event log writes the same payload as webhooks to a local JSONL file (.context/state/events.jsonl) that you can query without any external service:

            ctx hook event --last 20          # recent hook activity\nctx hook event --hook qa-reminder # filter by hook\n

            Webhooks and event logging are independent: you can use either, both, or neither. Webhooks give you push notifications and an external audit trail. The event log gives you local queryability and ctx doctor integration.

            See Troubleshooting for how they work together.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tips","level":2,"title":"Tips","text":"
            • Fire-and-forget: Notifications never block. HTTP errors are silently ignored. No retry, no response parsing.
            • No webhook = no cost: When no webhook is configured, ctx hook notify exits immediately. System hooks that call notify.Send() add zero overhead.
            • Multiple projects: Each project has its own .notify.enc. You can point different projects at different webhooks.
            • Event filter is per-project: Configure notify.events in each project's .ctxrc independently.
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#next-up","level":2,"title":"Next Up","text":"

            Auditing System Hooks →: Verify your hooks are running, audit what they do, and get alerted when they go silent.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#see-also","level":2,"title":"See Also","text":"
            • CLI Reference: ctx hook notify: full command reference
            • Configuration: .ctxrc settings including notify options
            • Running an Unattended AI Agent: how loops work and how notifications fit in
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Auditing System Hooks: using webhooks as an external audit trail for hook execution
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/","level":1,"title":"When to Use a Team of Agents","text":"","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-problem","level":2,"title":"The Problem","text":"

            You have a task, and you are wondering: \"should I throw more agents at it?\"

            More agents can mean faster results, but they also mean coordination overhead, merge conflicts, divergent mental models, and wasted tokens re-reading context.

            The wrong setup costs more than it saves.

            This recipe is a decision framework: It helps you choose between a single agent, parallel worktrees, and a full agent team, and explains what ctx provides at each level.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tldr","level":2,"title":"TL;DR","text":"
            • Single agent for most work;
            • Parallel worktrees when tasks touch disjoint file sets;
            • Agent teams only when tasks need real-time coordination. When in doubt, start with one agent.
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-spectrum","level":2,"title":"The Spectrum","text":"

            There are three modes, ordered by complexity:

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#1-single-agent-default","level":3,"title":"1. Single Agent (Default)","text":"

            One agent, one session, one branch. This is correct for most work.

            Use this when:

            • The task has linear dependencies (step 2 needs step 1's output);
            • Changes touch overlapping files;
            • You need tight feedback loops (review each change before the next);
            • The task requires deep understanding of a single area;
            • Total effort is less than a few hours of agent time.

            ctx provides: Full .context/: tasks, decisions, learnings, conventions, all in one session.

            The agent builds a coherent mental model and persists it as it goes.

            Example tasks: Bug fixes, feature implementation, refactoring a module, writing documentation for one area, debugging.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#2-parallel-worktrees-independent-tracks","level":3,"title":"2. Parallel Worktrees (Independent Tracks)","text":"

            2-4 agents, each in a separate git worktree on its own branch, working on non-overlapping parts of the codebase.

            Use this when:

            • You have 5+ independent tasks in the backlog;
            • Tasks group cleanly by directory or package;
            • File overlap between groups is zero or near-zero;
            • Each track can be completed and merged independently;
            • You want parallelism without coordination complexity.

            ctx provides: Shared .context/ via git (each worktree sees the same tasks, decisions, conventions). /ctx-worktree skill for setup and teardown. TASKS.md as a lightweight work queue.

            Example tasks: Docs + new package + test coverage (three tracks that don't touch the same files). Parallel recipe writing. Independent module development.

            See: Parallel Agent Development with Git Worktrees

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#3-agent-team-coordinated-swarm","level":3,"title":"3. Agent Team (Coordinated Swarm)","text":"

            Multiple agents communicating via messages, sharing a task list, with a lead agent coordinating. Claude Code's team/swarm feature.

            Use this when:

            • Tasks have dependencies but can still partially overlap;
            • You need research and implementation happening simultaneously;
            • The work requires different roles (researcher, implementer, tester);
            • A lead agent needs to review and integrate others' work;
            • The task is large enough that coordination cost is justified.

            ctx provides: .context/ as shared state that all agents can read. Task tracking for work assignment. Decisions and learnings as team memory that survives individual agent turnover.

            Example tasks: Large refactor across modules where a lead reviews merges. Research and implementation where one agent explores options while another builds. Multi-file feature that needs integration testing after parallel implementation.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-decision-framework","level":2,"title":"The Decision Framework","text":"

            Ask these questions in order:

            Can one agent do this in a reasonable time?\n  YES → Single agent. Stop here.\n  NO  ↓\n\nCan the work be split into non-overlapping file sets?\n  YES → Parallel worktrees (2-4 tracks)\n  NO  ↓\n\nDo the subtasks need to communicate during execution?\n  YES → Agent team with lead coordination\n  NO  → Parallel worktrees with a merge step\n
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-file-overlap-test","level":3,"title":"The File Overlap Test","text":"

            This is the critical decision point. Before choosing multi-agent, list the files each subtask would touch. If two subtasks modify the same file, they belong in the same track (or the same single-agent session).

            You: \"I want to parallelize these tasks. Which files would each one touch?\"\n\nAgent: [reads `TASKS.md`, analyzes codebase]\n       \"Task A touches internal/config/ and internal/cli/initialize/\n        Task B touches docs/ and site/\n        Task C touches internal/config/ and internal/cli/status/\n\n        Tasks A and C overlap on internal/config/ # they should be\n        in the same track. Task B is independent.\"\n

            When in doubt, keep things in one track. A merge conflict in a critical file costs more time than the parallelism saves.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#when-teams-make-things-worse","level":2,"title":"When Teams Make Things Worse","text":"

            \"More agents\" is not always better. Watch for these patterns:

            Merge hell: If you are spending more time resolving conflicts than the parallel work saved, you split wrong: Re-group by file overlap.

            Context divergence: Each agent builds its own mental model. After 30 minutes of independent work, agent A might make assumptions that contradict agent B's approach. Shorter tracks with frequent merges reduce this.

            Coordination theater: A lead agent spending most of its time assigning tasks, checking status, and sending messages instead of doing work. If the task list is clear enough, worktrees with no communication are cheaper.

            Re-reading overhead: Every agent reads .context/ on startup. A team of 4 agents each reading 4000 tokens of context = 16000 tokens before anyone does any work. For small tasks, that overhead dominates.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#what-ctx-gives-you-at-each-level","level":2,"title":"What ctx Gives You at Each Level","text":"ctx Feature Single Agent Worktrees Team .context/ files Full access Shared via git Shared via filesystem TASKS.md Work queue Split by track Assigned by lead Decisions/Learnings Persisted in session Persisted per branch Persisted by any agent /ctx-next Picks next task Picks within track Lead assigns /ctx-worktree N/A Setup + teardown Optional /ctx-commit Normal commits Per-branch commits Per-agent commits","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#team-composition-recipes","level":2,"title":"Team Composition Recipes","text":"

            Four practical team compositions for common workflows.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#feature-development-3-agents","level":3,"title":"Feature Development (3 Agents)","text":"Role Responsibility Architect Writes spec in specs/, breaks work into TASKS.md phases Implementer Picks tasks from TASKS.md, writes code, marks [x] done Reviewer Runs tests, ctx drift, lint; files issues as new tasks

            Coordination: TASKS.md checkboxes. Architect writes tasks before implementer starts. Reviewer runs after each implementer commit.

            Anti-pattern: All three agents editing the same file simultaneously. Sequence the work so only one agent touches a file at a time.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#consolidation-sprint-3-4-agents","level":3,"title":"Consolidation Sprint (3-4 Agents)","text":"Role Responsibility Auditor Runs ctx drift, identifies stale paths and broken refs Code Fixer Updates source code to match context (or vice versa) Doc Writer Updates ARCHITECTURE.md, CONVENTIONS.md, and docs/ Test Fixer (Optional) Fixes tests broken by the fixer's changes

            Coordination: Auditor's ctx drift output is the shared work queue. Each agent claims a subset of issues by adding #in-progress labels.

            Anti-pattern: Fixer and doc writer both editing ARCHITECTURE.md. Assign file ownership explicitly.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#release-prep-2-agents","level":3,"title":"Release Prep (2 Agents)","text":"Role Responsibility Release Notes Generates changelog from commits, writes release notes Validation Runs full test suite, lint, build across platforms

            Coordination: Both read TASKS.md to identify what shipped. Release notes agent works from git log; validation agent works from make audit.

            Anti-pattern: Release notes agent running tests \"to verify.\" Each agent stays in its lane.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#documentation-sprint-3-agents","level":3,"title":"Documentation Sprint (3 Agents)","text":"Role Responsibility Content Writes new pages, expands existing docs Cross-linker Adds nav entries, cross-references, \"See Also\" sections Verifier Builds site, checks broken links, validates rendering

            Coordination: Content agent writes files first. Cross-linker updates zensical.toml and index pages after content lands. Verifier builds after each batch.

            Antipattern: Content and cross-linker both editing zensical.toml. Batch nav updates into the cross-linker's pass.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tips","level":2,"title":"Tips","text":"
            • Start with one agent: Only add parallelism when you have identified the bottleneck. \"This would go faster with more agents\" is usually wrong for tasks under 2 hours.
            • The 3-4 agent ceiling is real: Coordination overhead grows quadratically. 2 agents = 1 communication pair. 4 agents = 6 pairs. Beyond 4, you are managing agents more than doing work.
            • Worktrees > teams for most parallelism needs: If agents don't need to talk to each other during execution, worktrees give you parallelism with zero coordination overhead.
            • Use ctx as the shared brain: Whether it's one agent or four, the .context/ directory is the single source of truth. Decisions go in DECISIONS.md, not in chat messages between agents.
            • Merge early, merge often: Long-lived parallel branches diverge. Merge a track as soon as it's done rather than waiting for all tracks to finish.
            • TASKS.md conflicts are normal: Multiple agents completing different tasks will conflict on merge. The resolution is always additive: accept all [x] completions from both sides.
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#next-up","level":2,"title":"Next Up","text":"

            Parallel Agent Development with Git Worktrees →: Run multiple agents on independent task tracks using git worktrees.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#go-deeper","level":2,"title":"Go Deeper","text":"
            • CLI Reference: all commands and flags
            • Integrations: setup for Claude Code, Cursor, Aider
            • Session Journal: browse and search session history
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#see-also","level":2,"title":"See Also","text":"
            • Parallel Agent Development with Git Worktrees: the mechanical \"how\" for worktree-based parallelism
            • Running an Unattended AI Agent: serial autonomous loops: a different scaling strategy
            • Tracking Work Across Sessions: managing the task backlog that feeds into any multi-agent setup
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"reference/","level":1,"title":"Reference","text":"

            Technical reference for ctx commands, skills, and internals.

            ","path":["Reference"],"tags":[]},{"location":"reference/#the-system-explains-itself","level":3,"title":"The System Explains Itself","text":"

            The 12 properties that must hold for any valid ctx implementation. Not features: constraints. The system's contract with its users and contributors.

            ","path":["Reference"],"tags":[]},{"location":"reference/#code-conventions","level":3,"title":"Code Conventions","text":"

            Common patterns and fixes for the AST compliance tests in internal/audit/. When a test fails, find the matching section.

            ","path":["Reference"],"tags":[]},{"location":"reference/#cli","level":3,"title":"CLI","text":"

            Every command, subcommand, and flag. Now a top-level section: see CLI Reference.

            ","path":["Reference"],"tags":[]},{"location":"reference/#skills","level":3,"title":"Skills","text":"

            The full skill catalog: what each skill does, when it triggers, and how skills interact with commands.

            ","path":["Reference"],"tags":[]},{"location":"reference/#tool-ecosystem","level":3,"title":"Tool Ecosystem","text":"

            How ctx compares to Cursor Rules, Aider conventions, CLAUDE.md, and other context approaches.

            ","path":["Reference"],"tags":[]},{"location":"reference/#session-journal","level":3,"title":"Session Journal","text":"

            Export, browse, and enrich your session history. Covers the journal site, Obsidian export, and the enrichment pipeline.

            ","path":["Reference"],"tags":[]},{"location":"reference/#scratchpad","level":3,"title":"Scratchpad","text":"

            Encrypted, git-tracked scratch space for short notes and sensitive values that travel with the project.

            ","path":["Reference"],"tags":[]},{"location":"reference/#version-history","level":3,"title":"Version History","text":"

            Changelog for every ctx release.

            ","path":["Reference"],"tags":[]},{"location":"reference/audit-conventions/","level":1,"title":"Code Conventions","text":"","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#code-conventions-common-patterns-and-fixes","level":1,"title":"Code Conventions: Common Patterns and Fixes","text":"

            This guide documents the code conventions enforced by internal/audit/ AST tests. Each section shows the violation pattern, the fix, and the rationale. When a test fails, find the matching section below.

            All tests skip _test.go files. The patterns apply only to production code under internal/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#variable-shadowing-bare-err-reuse","level":2,"title":"Variable Shadowing (Bare err := Reuse)","text":"

            Test: TestNoVariableShadowing

            When a function has multiple := assignments to err, each shadows the previous one. This makes it impossible to tell which error a later if err != nil is checking.

            Before:

            func Run(cmd *cobra.Command) error {\n    data, err := os.ReadFile(path) \n    if err != nil {\n        return err\n    }\n\n    result, err := json.Unmarshal(data)  // shadows first err\n    if err != nil {\n        return err\n    }\n\n    err = validate(result)  // shadows again\n    return err\n}\n

            After:

            func Run(cmd *cobra.Command) error {\n    data, readErr := os.ReadFile(path)\n    if readErr != nil {\n        return readErr\n    }\n\n    result, parseErr := json.Unmarshal(data)\n    if parseErr != nil {\n        return parseErr\n    }\n\n    validateErr := validate(result)\n    return validateErr\n}\n

            Rule: Use descriptive error names (readErr, writeErr, parseErr, walkErr, absErr, relErr) so each error site is independently identifiable.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#import-name-shadowing","level":2,"title":"Import Name Shadowing","text":"

            Test: TestNoImportNameShadowing

            When a local variable has the same name as an imported package, the import becomes inaccessible in that scope.

            Before:

            import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(session *entity.Session) {  // param shadows import\n    // session package is now unreachable here\n}\n

            After:

            import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(sess *entity.Session) {\n    // session package still accessible\n}\n

            Rule: Parameters, variables, and return values must not reuse imported package names. Common renames: session -> sess, token -> tok, config -> cfg, entry -> ent.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-strings","level":2,"title":"Magic Strings","text":"

            Test: TestNoMagicStrings

            String literals in function bodies are invisible to refactoring tools and cause silent breakage when the value changes in one place but not another.

            Before (string literals):

            func loadContext() {\n    data := filepath.Join(dir, \"TASKS.md\")\n    if strings.HasSuffix(name, \".yaml\") {\n        // ...\n    }\n}\n

            After:

            func loadContext() {\n    data := filepath.Join(dir, config.FilenameTask)\n    if strings.HasSuffix(name, config.ExtYAML) {\n        // ...\n    }\n}\n

            Before (format verbs, also caught):

            func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return fmt.Sprintf(\"%x\", h[:8])\n}\n

            After:

            func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return hex.EncodeToString(h[:cfgFmt.HashPrefixLen])\n}\n

            Before (URL schemes, also caught):

            if strings.HasPrefix(target, \"https://\") ||\n    strings.HasPrefix(target, \"http://\") {\n    return target\n}\n

            After:

            if strings.HasPrefix(target, cfgHTTP.PrefixHTTPS) ||\n    strings.HasPrefix(target, cfgHTTP.PrefixHTTP) {\n    return target\n}\n

            Exempt from this check:

            • Empty string \"\", single space \" \", indentation strings
            • Regex capture references ($1, ${name})
            • const and var definition sites (that's where constants live)
            • Struct tags
            • Import paths
            • Packages under internal/config/, internal/assets/tpl/

            Rule: If a string is used for comparison, path construction, or appears in 3+ files, it belongs in internal/config/ as a constant. Format strings belong in internal/config/ as named constants (e.g., cfgGit.FlagLastN, cfgTrace.RefFormat). User-facing prose belongs in internal/assets/ YAML files accessed via desc.Text().

            Common fix for fmt.Sprintf with format verbs:

            Pattern Fix fmt.Sprintf(\"%d\", n) strconv.Itoa(n) fmt.Sprintf(\"%d\", int64Val) strconv.FormatInt(int64Val, 10) fmt.Sprintf(\"%x\", bytes) hex.EncodeToString(bytes) fmt.Sprintf(\"%q\", s) strconv.Quote(s) fmt.Sscanf(s, \"%d\", &n) strconv.Atoi(s) fmt.Sprintf(\"-%d\", n) fmt.Sprintf(cfgGit.FlagLastN, n) \"https://\" cfgHTTP.PrefixHTTPS \"&lt;\" config constant in config/html/","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-printf-calls","level":2,"title":"Direct Printf Calls","text":"

            Test: TestNoPrintfCalls

            cmd.Printf and cmd.PrintErrf bypass the write-package formatting pipeline and scatter user-facing text across the codebase.

            Before:

            func Run(cmd *cobra.Command, args []string) {\n    cmd.Printf(\"Found %d tasks\\n\", count)\n}\n

            After:

            func Run(cmd *cobra.Command, args []string) {\n    write.TaskCount(cmd, count)\n}\n

            Rule: All formatted output goes through internal/write/ which uses cmd.Print/cmd.Println with pre-formatted strings from desc.Text().

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#raw-time-format-strings","level":2,"title":"Raw Time Format Strings","text":"

            Test: TestNoRawTimeFormats

            Inline time format strings (\"2006-01-02\", \"15:04:05\") drift when one call site is updated but others are missed.

            Before:

            func formatDate(t time.Time) string {\n    return t.Format(\"2006-01-02\")\n}\n

            After:

            func formatDate(t time.Time) string {\n    return t.Format(cfgTime.DateFormat)\n}\n

            Rule: All time format strings must use constants from internal/config/time/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-flag-registration","level":2,"title":"Direct Flag Registration","text":"

            Test: TestNoFlagBindOutsideFlagbind

            Direct cobra flag calls (.Flags().StringVar(), etc.) scatter flag wiring across dozens of cmd.go files. Centralizing through internal/flagbind/ gives one place to audit flag names, defaults, and description key lookups.

            Before:

            func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    c.Flags().StringVarP(&output, \"output\", \"o\", \"\",\n        \"output format\")\n    return c\n}\n

            After:

            func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    flagbind.StringFlagShort(c, &output, flag.Output,\n        flag.OutputShort, cmd.DescKeyOutput)\n    return c\n}\n

            Rule: All flag registration goes through internal/flagbind/. If the helper you need doesn't exist, add it to flagbind/flag.go before using it.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#todo-comments","level":2,"title":"TODO Comments","text":"

            Test: TestNoTODOComments

            TODO, FIXME, HACK, and XXX comments in production code are invisible to project tracking. They accumulate silently and never get addressed.

            Before:

            // TODO: handle pagination\nfunc listEntries() []Entry {\n

            After:

            Remove the comment and add a task to .context/TASKS.md:

            - [ ] Handle pagination in listEntries (internal/task/task.go)\n

            Rule: Deferred work lives in TASKS.md, not in source comments.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#dead-exports","level":2,"title":"Dead Exports","text":"

            Test: TestNoDeadExports

            Exported symbols with zero references outside their definition file are dead weight. They increase API surface, confuse contributors, and cost maintenance.

            Fix: Either delete the export (preferred) or demote it to unexported if it's still used within the file.

            If the symbol existed for historical reasons and might be needed again, move it to quarantine/deadcode/ with a .dead extension. This preserves the code in git without polluting the live codebase:

            quarantine/deadcode/internal/config/flag/flag.go.dead\n

            Each .dead file includes a header:

            // Dead exports quarantined from internal/config/flag/flag.go\n// Quarantined: 2026-04-02\n// Restore from git history if needed.\n

            Rule: If a test-only allowlist entry is needed (the export exists only for test use), add the fully qualified symbol to testOnlyExports in dead_exports_test.go. Keep this list small; prefer eliminating the export.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#core-package-structure","level":2,"title":"Core Package Structure","text":"

            Test: TestCoreStructure

            core/ directories under internal/cli/ must contain only doc.go and test files at the top level. All domain logic lives in subpackages. This prevents core/ from becoming a god package.

            Before:

            internal/cli/dep/core/\n    go.go           # violation: logic at core/ level\n    python.go       # violation\n    node.go         # violation\n    types.go        # violation\n

            After:

            internal/cli/dep/core/\n    doc.go          # package doc only\n    golang/\n        golang.go\n        golang_test.go\n        doc.go\n    python/\n        python.go\n        python_test.go\n        doc.go\n    node/\n        node.go\n        node_test.go\n        doc.go\n

            Rule: Extract each logical unit into its own subpackage under core/. Each subpackage gets a doc.go. The subpackage name should match the domain concept (golang, check, fix, store), not a generic label (util, helper).

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cross-package-types","level":2,"title":"Cross-Package Types","text":"

            Test: TestCrossPackageTypes

            When a type defined in one package is used from a different module (e.g., cli/doctor importing a type from cli/notify), the type has crossed its module boundary. Cross-cutting types belong in internal/entity/ for discoverability.

            Before:

            // internal/cli/notify/core/types.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/cli/notify/core\"\nfunc check(p core.NotifyPayload) { ... }\n

            After:

            // internal/entity/notify.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/entity\"\nfunc check(p entity.NotifyPayload) { ... }\n

            Exempt: Types inside entity/, proto/, core/ subpackages, and config/ packages. Same-module usage (e.g., cli/doctor/cmd/ using cli/doctor/core/) is not flagged.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#type-file-convention","level":2,"title":"Type File Convention","text":"

            Test: TestTypeFileConvention, TestTypeFileConventionReport

            Exported types in core/ subpackages should live in types.go (the convention from CONVENTIONS.md), not scattered across implementation files. This makes type definitions discoverable. TestTypeFileConventionReport generates a diagnostic summary of all type placements for triage.

            Exception: entity/ organizes by domain (task.go, session.go), proto/ uses schema.go, and err/ packages colocate error types with their domain context.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-yaml-linkage","level":2,"title":"DescKey / YAML Linkage","text":"

            Test: TestDescKeyYAMLLinkage

            Every DescKey constant must have a corresponding key in the YAML asset files, and every YAML key must have a corresponding DescKey constant. Orphans in either direction mean dead text or runtime panics.

            Fix for orphan YAML key: Delete the YAML entry, or add the corresponding DescKey constant in config/embed/{text,cmd,flag}/.

            Fix for orphan DescKey: Delete the constant, or add the corresponding entry in the YAML file under internal/assets/commands/text/, cmd/, or flag/.

            If the orphan YAML entry was once valid but the feature was removed, move the YAML entry to a .dead file in quarantine/deadcode/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#package-doc-quality","level":2,"title":"Package Doc Quality","text":"

            Test: TestPackageDocQuality

            Every package under internal/ must have a doc.go with a meaningful package doc comment (at least 8 lines of real content). One-liners and file-list patterns (// - foo.go, // Source files:) are flagged because they drift as files change.

            Template:

            //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n\n// Package mypackage does X.\n//\n// It handles Y by doing Z. The main entry point is [FunctionName]\n// which accepts A and returns B.\n//\n// Configuration is read from [config.SomeConstant]. Output is\n// written through [write.SomeHelper].\n//\n// This package is used by [parentpackage] during the W lifecycle\n// phase.\npackage mypackage\n
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-regex-compilation","level":2,"title":"Inline Regex Compilation","text":"

            Test: TestNoInlineRegexpCompile

            regexp.MustCompile and regexp.Compile inside function bodies recompile the pattern on every call. Compiled patterns belong at package level.

            Before:

            func parse(s string) bool {\n    re := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n    return re.MatchString(s)\n}\n

            After:

            // In internal/config/regex/regex.go:\n// DatePattern matches ISO date format (YYYY-MM-DD).\nvar DatePattern = regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n\n// In calling package:\nfunc parse(s string) bool {\n    return regex.DatePattern.MatchString(s)\n}\n

            Rule: All compiled regexes live in internal/config/regex/ as package-level var declarations. Two tests enforce this: TestNoInlineRegexpCompile catches function-body compilation, and TestNoRegexpOutsideRegexPkg catches package-level compilation outside config/regex/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#doc-comments","level":2,"title":"Doc Comments","text":"

            Test: TestDocComments

            All functions (exported and unexported), structs, and package-level variables must have a doc comment. Config packages allow group doc comments for const blocks.

            Before:

            func buildIndex(entries []Entry) map[string]int {\n

            After:

            // buildIndex maps entry names to their position in the\n// ordered slice for O(1) lookup during reconciliation.\n//\n// Parameters:\n//   - entries: ordered slice of entries to index\n//\n// Returns:\n//   - map[string]int: name-to-position mapping\nfunc buildIndex(entries []Entry) map[string]int {\n

            Rule: Every function, struct, and package-level var gets a doc comment in godoc format. Functions include Parameters: and Returns: sections. Structs with 2+ fields document every field. See CONVENTIONS.md for the full template.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#line-length","level":2,"title":"Line Length","text":"

            Test: TestLineLength

            Lines in non-test Go files must not exceed 80 characters. This is a hard check, not a suggestion.

            Before:

            _ = trace.Record(fmt.Sprintf(cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum), state.Dir())\n

            After:

            ref := fmt.Sprintf(\n    cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum,\n)\n_ = trace.Record(ref, state.Dir())\n

            Rule: Break at natural points: function arguments, struct fields, chained calls. Long strings (URLs, struct tags) are the rare acceptable exception.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#literal-whitespace","level":2,"title":"Literal Whitespace","text":"

            Test: TestNoLiteralWhitespace

            Bare whitespace string and byte literals (\"\\n\", \"\\r\\n\", \"\\t\") must not appear outside internal/config/token/. All other packages use the token constants.

            Before:

            output := strings.Join(lines, \"\\n\")\n

            After:

            output := strings.Join(lines, token.Newline)\n

            Rule: Whitespace literals are defined once in internal/config/token/. Use token.Newline, token.Tab, token.CRLF, etc.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-numeric-values","level":2,"title":"Magic Numeric Values","text":"

            Test: TestNoMagicValues

            Numeric literals in function bodies need constants, with narrow exceptions.

            Before:

            if len(entries) > 100 {\n    entries = entries[:100]\n}\n

            After:

            if len(entries) > config.MaxEntries {\n    entries = entries[:config.MaxEntries]\n}\n

            Exempt: 0, 1, -1, 2-10, strconv radix/bitsize args (10, 32, 64 in strconv.Parse*/Format*), octal permissions (caught separately by TestNoRawPermissions), and const/var definition sites.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-separators","level":2,"title":"Inline Separators","text":"

            Test: TestNoInlineSeparators

            strings.Join calls must use token constants for their separator argument, not string literals.

            Before:

            result := strings.Join(parts, \", \")\n

            After:

            result := strings.Join(parts, token.CommaSep)\n

            Rule: Separator strings live in internal/config/token/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stuttery-function-names","level":2,"title":"Stuttery Function Names","text":"

            Test: TestNoStutteryFunctions

            Function names must not redundantly include their package name as a PascalCase word boundary. Go callers already write pkg.Function, so pkg.PkgFunction stutters.

            Before:

            // In package write\nfunc WriteJournal(cmd *cobra.Command, ...) {\n

            After:

            // In package write\nfunc Journal(cmd *cobra.Command, ...) {\n

            Exempt: Identity functions like write.Write / write.write.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#predicate-naming-no-ishascan-prefix","level":2,"title":"Predicate Naming (No Is/Has/Can Prefix)","text":"

            Test: None (manual review convention)

            Exported methods that return bool must not use Is, Has, or Can prefixes. The predicate reads more naturally without them, especially at call sites where the package name provides context.

            Before:

            func IsCompleted(t *Task) bool { ... }\nfunc HasChildren(n *Node) bool { ... }\nfunc IsExemptPackage(path string) bool { ... }\n

            After:

            func Completed(t *Task) bool { ... }\nfunc Children(n *Node) bool { ... }  // or: ChildCount > 0\nfunc ExemptPackage(path string) bool { ... }\n

            Rule: Drop the prefix. Private helpers may use prefixes when it reads more naturally (isValid in a local context is fine). This convention applies to exported methods and package-level functions. See CONVENTIONS.md \"Predicates\" section.

            This is not yet enforced by an AST test; it requires semantic understanding of return types and naming intent that makes automated detection fragile. Apply during code review.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#mixed-visibility","level":2,"title":"Mixed Visibility","text":"

            Test: TestNoMixedVisibility

            Files with exported functions must not also contain unexported functions. Public API and private helpers live in separate files.

            Before:

            load.go\n    func Load() { ... }        // exported\n    func parseHeader() { ... } // unexported, violation\n

            After:

            load.go\n    func Load() { ... }        // exported only\nparse.go\n    func parseHeader() { ... } // private helper\n

            Exempt: Files with exactly one function, doc.go, test files.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stray-errgo-files","level":2,"title":"Stray Err.Go Files","text":"

            Test: TestNoStrayErrFiles

            err.go files must only exist under internal/err/. Error constructors anywhere else create a broken-window pattern where contributors add local error definitions when they see a local err.go.

            Fix: Move the error constructor to internal/err/<domain>/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cli-cmd-structure","level":2,"title":"CLI Cmd Structure","text":"

            Test: TestCLICmdStructure

            Each cmd/$sub/ directory under internal/cli/ may contain only cmd.go, run.go, doc.go, and test files. Extra .go files (helpers, output formatters, types) belong in the corresponding core/ subpackage.

            Before:

            internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\n    format.go   # violation: helper in cmd dir\n

            After:

            internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\ninternal/cli/doctor/core/format/\n    format.go\n    doc.go\n
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-namespace","level":2,"title":"DescKey Namespace","text":"

            Test: TestUseConstantsOnlyInCobraUse, TestDescKeyOnlyInLookupCalls, TestNoWrongNamespaceLookup

            Three tests enforce DescKey/Use constant discipline:

            1. Use* constants appear only in cobra Use: struct field assignments, never as arguments to desc.Text() or elsewhere.
            2. DescKey* constants are passed only to assets.CommandDesc(), assets.FlagDesc(), or desc.Text(), never to cobra Use:.
            3. No cross-namespace lookups: TextDescKey must not be passed to CommandDesc(), FlagDescKey must not be passed to Text(), etc.
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#yaml-examples-registry-linkage","level":2,"title":"YAML Examples / Registry Linkage","text":"

            Test: TestExamplesYAMLLinkage, TestRegistryYAMLLinkage

            Every key in examples.yaml and registry.yaml must match a known entry type constant. Prevents orphan entries that are never rendered.

            Fix: Delete the orphan YAML entry, or add the corresponding constant in config/entry/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#other-enforced-patterns","level":2,"title":"Other Enforced Patterns","text":"

            These tests follow the same fix approach: extract the operation to its designated package:

            Test Violation Fix TestNoNakedErrors fmt.Errorf/errors.New outside internal/err/ Add error constructor to internal/err/<domain>/ TestNoRawFileIO Direct os.ReadFile, os.Create, etc. Use io.SafeReadFile, io.SafeWriteFile, etc. TestNoRawLogging Direct fmt.Fprintf(os.Stderr, ...) Use log/warn.Warn() or log/event.Append() TestNoExecOutsideExecPkg exec.Command outside internal/exec/ Add command to internal/exec/<domain>/ TestNoCmdPrintOutsideWrite cmd.Print* outside internal/write/ Add output helper to internal/write/<domain>/ TestNoRawPermissions Octal literals (0644, 0755) Use config/fs.PermFile, config/fs.PermExec, etc. TestNoErrorsAs errors.As() Use errors.AsType() (generic, Go 1.23+) TestNoStringConcatPaths dir + \"/\" + file Use filepath.Join(dir, file)","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#general-fix-workflow","level":2,"title":"General Fix Workflow","text":"

            When an audit test fails:

            1. Read the error message. It includes file:line and a description of the violation.
            2. Find the matching section above. The test name maps directly to a section.
            3. Apply the pattern. Most fixes are mechanical: extract to the right package, rename a variable, or replace a literal with a constant.
            4. Run make test before committing. Audit tests run as part of go test ./internal/audit/.
            5. Don't add allowlist entries as a first resort. Fix the code. Allowlists exist only for genuinely unfixable cases (test-only exports, config packages that are definitionally exempt).
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/comparison/","level":1,"title":"Tool Ecosystem","text":"","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#high-level-mental-model","level":2,"title":"High-Level Mental Model","text":"

            Many tools help AI think.

            ctx helps AI remember.

            • Not by storing thoughts,
            • but by preserving intent.
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#how-ctx-differs-from-similar-tools","level":2,"title":"How ctx Differs from Similar Tools","text":"

            There are many tools in the AI ecosystem that touch parts of the context problem:

            • Some manage prompts.
            • Some retrieve data.
            • Some provide runtime context objects.
            • Some offer enterprise platforms.

            ctx focuses on a different layer entirely.

            This page explains where ctx fits, and where it intentionally does not.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#the-core-distinction","level":2,"title":"The Core Distinction","text":"

            Most tools treat context as input.

            ctx treats context as infrastructure.

            That single difference explains nearly all of ctx's design choices.

            Question Most tools ctx Where does context live? In prompts or APIs In files How long does it last? One request / one session Across time Who can read it? The model Humans and tools How is it updated? Implicitly Explicitly Is it inspectable? Rarely Always","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#prompt-management-tools","level":2,"title":"Prompt Management Tools","text":"

            Examples include:

            • prompt templates;
            • reusable system prompts;
            • prompt libraries;
            • prompt versioning tools.

            These tools help you start a session.

            They do not help you continue one.

            Prompt tools:

            • inject text at session start;
            • are ephemeral by design;
            • do not evolve with the project.

            ctx:

            • persists knowledge over time;
            • accumulates decisions and learnings;
            • makes the context part of the repository itself.

            Prompt tooling and ctx are complementary; not competing. Yet, they operate in different layers.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#retrieval-augmented-generation-rag","level":2,"title":"Retrieval-Augmented Generation (RAG)","text":"

            RAG systems typically:

            • index documents
            • embed text
            • retrieve chunks dynamically at runtime

            They are excellent for:

            • large knowledge bases
            • static documentation
            • reference material

            RAG answers questions like:

            \"What information might be relevant right now?\"

            ctx answers a different question:

            \"What have we already decided, learned, or committed to?\"

            Here are some key differences:

            RAG ctx Statistical relevance Intentional relevance Embedding-based File-based Opaque retrieval Explicit structure Runtime query Persistent memory

            ctx does not replace RAG. Instead, it defines a persistent context layer that RAG can optionally augment.

            RAG belongs to the data plane; ctx defines the context control plane.

            It focuses on project memory, not knowledge search.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#agent-frameworks","level":2,"title":"Agent Frameworks","text":"

            Agent frameworks often provide:

            • task loops
            • tool orchestration
            • planner/executor patterns
            • autonomous iteration

            These systems are powerful, but they typically assume that:

            • memory is external
            • context is injected
            • state is transient

            Agent frameworks answer:

            \"How should the agent act?\"

            ctx answers:

            \"What should the agent remember?\"

            Without persistent context, agents tend to:

            • rediscover decisions
            • repeat mistakes
            • lose architectural intent

            This is why ctx pairs well with autonomous loop workflows:

            • The loop provides iteration
            • ctx provides continuity

            Together, loops become cumulative instead of forgetful.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#sdk-level-context-objects","level":2,"title":"SDK-Level Context Objects","text":"

            Some SDKs expose \"context\" objects that exist:

            • inside a process
            • during a request
            • for the lifetime of a call chain

            These are extremely useful and completely different.

            SDK context objects:

            • are in-memory
            • disappear when the process ends
            • are not shared across sessions

            ctx:

            • survives process restarts
            • survives new chats
            • survives new days

            They share a name, not a purpose.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#enterprise-context-platforms","level":2,"title":"Enterprise Context Platforms","text":"

            Enterprise platforms often provide:

            • centralized context services
            • dashboards
            • access control
            • organizational knowledge layers

            These tools are designed for:

            • teams
            • governance
            • compliance
            • managed environments

            ctx is intentionally:

            • local-first: context lives next to your code, not behind a service boundary.
            • file-based: everything important is a markdown file you can read, diff, grep, and version-control.
            • single-binary core: the context persistence path (init, add, agent, status, drift, load, sync, compact, task, decision, learning, and their siblings) is a single Go binary with no required runtime dependencies. Optional integrations (ctx trace (needs git), ctx serve (needs zensical), the ctx Hub (needs a running hub), Claude Code plugin (needs claude)) are opt-in and each declares its dependency explicitly.
            • CLI-driven: every feature is reachable from the command line and scriptable.
            • developer-controlled: no auto-updating cloud service, no telemetry, no account to sign up for.

            The core ctx binary does not require:

            • a server
            • a database
            • an account
            • a SaaS backend
            • network connectivity (for core operations)

            ctx optimizes for individual and small-team workflows where context should live next to code; not behind a service boundary.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#specific-tool-comparisons","level":2,"title":"Specific Tool Comparisons","text":"

            Users often evaluate ctx against specific tools they already use. These comparisons clarify where responsibilities overlap, where they diverge, and where the tools are genuinely complementary.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#claude-code-memory-anthropic-auto-memory","level":3,"title":"Claude Code Memory / Anthropic Auto-Memory","text":"

            Anthropic's auto-memory is tool-managed memory (L2): the model decides what to remember, stores it automatically, and retrieves it implicitly. ctx is system memory (L3): humans and agents explicitly curate decisions, learnings, and tasks in inspectable files.

            Auto-memory is convenient - you do not configure anything. But it is also opaque: you cannot see what was stored, edit it precisely, or share it across tools. ctx files are plain Markdown in your repository, visible in diffs and code review.

            The two are complementary. ctx can absorb auto-memory as an input source (importing what the model remembered into structured context files) while providing the durable, inspectable layer that auto-memory lacks.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cursorrules-clauderules","level":3,"title":".Cursorrules / .Claude/rules","text":"

            Static rule files (.cursorrules, .claude/rules/) declare conventions: coding style, forbidden patterns, preferred libraries. They are effective for what to do and load automatically at session start.

            ctx adds dimensions that rule files do not cover: architectural decisions with rationale, learnings discovered during development, active tasks, and a constitution that governs agent behavior. Critically, ctx context accumulates - each session can add to it, and token budgeting ensures only the most relevant context is injected.

            Use rule files for static conventions. Use ctx for evolving project memory.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#aider-read-watch","level":3,"title":"Aider --read / --watch","text":"

            Aider's --read flag injects file contents at session start; --watch reloads them on change. The concept is similar to ctx's \"load\" step: make the agent aware of specific files.

            The differences emerge beyond loading. Aider has no persistence model -- nothing the agent learns during a session is written back. There is no token budgeting (large files consume the full context window), no priority ordering across file types, and no structured format for decisions or learnings. ctx provides the full lifecycle: load, accumulate, persist, and budget.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#copilot-workspace","level":3,"title":"Copilot @Workspace","text":"

            GitHub Copilot's @workspace performs workspace-wide code search. It answers \"what code exists?\" - finding function definitions, usages, and file structure across the repository.

            ctx answers a different question: \"what did we decide?\" It stores architectural intent, not code indices. Copilot's workspace search and ctx's project memory are orthogonal; one finds code, the other preserves the reasoning behind it.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cline-memory","level":3,"title":"Cline Memory","text":"

            Cline's memory bank stores session context within the Cline extension. The motivation is similar to ctx: help the agent remember across sessions.

            The key difference is portability. Cline memory is tied to Cline - it does not transfer to Claude Code, Cursor, Aider, or any other tool. ctx is tool-agnostic: context lives in plain files that any editor, agent, or script can read. Switching tools does not mean losing memory.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-a-good-fit","level":2,"title":"When ctx Is a Good Fit","text":"

            ctx works best when:

            • you want AI work to compound over time;
            • architectural decisions matter;
            • context must be inspectable;
            • humans and AI must share the same source of truth;
            • Git history should include why, not just what.
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-not-the-right-tool","level":2,"title":"When ctx Is Not the Right Tool","text":"

            ctx is probably not what you want if:

            • you only need one-off prompts;
            • you rely exclusively on RAG;
            • you want autonomous agents without a human-readable state;
            • you require centralized enterprise control;
            • you want black-box memory systems,

            These are valid goals; just different ones.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#further-reading","level":2,"title":"Further Reading","text":"
            • You Can't Import Expertise: why project-specific context matters more than generic best practices
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/design-invariants/","level":1,"title":"Invariants","text":"","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-system-explains-itself","level":1,"title":"The System Explains Itself","text":"

            These are the properties that must hold for any valid ctx implementation.

            • These are not features.
            • These are constraints.

            A change that violates an invariant is a category error, not an improvement.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#cognitive-state-tiers","level":2,"title":"Cognitive State Tiers","text":"

            ctx distinguishes between three forms of state:

            • Authoritative state: Versioned, inspectable artifacts that define intent and survive time.
            • Delivery views: Deterministic assemblies of the authoritative state for a specific budget or workflow.
            • Ephemeral working state: Local, transient, or sensitive data that assists interaction but does not define system truth.

            The invariants below apply primarily to the authoritative cognitive state.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#1-cognitive-state-is-explicit","level":2,"title":"1. Cognitive State Is Explicit","text":"

            All authoritative context lives in artifacts that can be inspected, reviewed, and versioned.

            If something is important, it must exist as a file: Not only in a prompt, a chat, or a model's hidden memory.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#2-assembly-is-reproducible","level":2,"title":"2. Assembly Is Reproducible","text":"

            Given the same:

            • repository state,
            • configuration,
            • and inputs,

            context assembly produces the same result.

            Heuristics may rank or filter for delivery under constraints.

            They do not alter the authoritative state.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#3-the-authoritative-state-is-human-readable","level":2,"title":"3. The Authoritative State Is Human-Readable","text":"

            The authoritative cognitive state must be stored in formats that a human can:

            • read,
            • diff,
            • review,
            • and edit directly.

            Sensitive working memory may be encrypted at rest. However, encryption must not become the only representation of authoritative knowledge.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#4-artifacts-outlive-sessions","level":2,"title":"4. Artifacts Outlive Sessions","text":"

            Sessions are transient.

            Knowledge persists.

            Reasoning, decisions, and outcomes must remain available after the interaction that produced them has ended.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#5-authority-is-user-defined","level":2,"title":"5. Authority Is User-Defined","text":"

            What enters the authoritative context is an explicit human decision.

            Models may suggest.

            Automation may assist.

            Selection is never implicit.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#6-operation-is-local-first","level":2,"title":"6. Operation Is Local-First","text":"

            The core system must function without requiring network access or a remote service.

            External systems may extend ctx.

            They must not be required for its operation.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#7-versioning-is-the-memory-model","level":2,"title":"7. Versioning Is the Memory Model","text":"

            The evolution of the authoritative cognitive state must be:

            • preserved,
            • inspectable,
            • and branchable.

            Ephemeral and sensitive working state may use different retention and diff strategies by design.

            Understanding includes understanding how we arrived here.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#8-structure-enables-scale","level":2,"title":"8. Structure Enables Scale","text":"

            Unstructured accumulation is not memory.

            Authoritative cognitive state must have a defined layout that:

            • communicates intent,
            • supports navigation,
            • and prevents drift.
            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#9-verification-is-the-scoreboard","level":2,"title":"9. Verification Is the Scoreboard","text":"

            Claims without recorded outcomes are noise.

            Reality (observed and captured) is the only signal that compounds.

            This invariant defines a required direction:

            The authoritative state must be able to record expectation and result.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#10-capture-once-reuse-indefinitely","level":2,"title":"10. Capture Once, Reuse Indefinitely","text":"

            Work that has already produced understanding must not be re-derived from scratch.

            Explored paths, rejected options, and validated conclusions are permanent assets.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#11-policies-are-encoded-not-remembered","level":2,"title":"11. Policies Are Encoded, Not Remembered","text":"

            Alignment must not depend on recall or goodwill.

            Constraints that matter must exist in machine-readable form and participate in context assembly.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#12-the-system-explains-itself","level":2,"title":"12. The System Explains Itself","text":"

            From the repository state alone it must be possible to determine:

            • what was authoritative,
            • what constraints applied.

            Delivery views may be optimized.

            They must not become the only explanation.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#non-goals","level":1,"title":"Non-Goals","text":"

            To avoid category errors, ctx does not attempt to be:

            • a skill,
            • a prompt management tool,
            • a chat history viewer,
            • an autonomous agent runtime,
            • a vector database,
            • a hosted memory service.

            Such systems may integrate with ctx.

            They do not define it.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#implications-for-contributions","level":1,"title":"Implications for Contributions","text":"

            Valid contributions:

            • strengthen an invariant,
            • reduce the cost of maintaining an invariant,
            • or extend the system without violating invariants.

            Invalid contributions:

            • introduce hidden authoritative state,
            • replace reproducible assembly with non-reproducible behavior,
            • make core operation depend on external services,
            • reduce human inspectability of authoritative state,
            • or bypass explicit user authority over what becomes authoritative.
            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-contract","level":1,"title":"The Contract","text":"

            Everything else (commands, skills, layouts, integrations, optimizations) is an implementation detail.

            These invariants are the system.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/scratchpad/","level":1,"title":"Scratchpad","text":"","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#what-is-ctx-scratchpad","level":2,"title":"What Is ctx Scratchpad?","text":"

            A one-liner scratchpad, encrypted at rest, synced via git.

            Quick notes that don't fit decisions, learnings, or tasks: reminders, intermediate values, sensitive tokens, working memory during debugging. Entries are numbered, reorderable, and persist across sessions.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#encrypted-by-default","level":2,"title":"Encrypted by Default","text":"

            Scratchpad entries are encrypted with AES-256-GCM before touching the disk.

            Component Path Git status Encryption key ~/.ctx/.ctx.key User-level, 0600 permissions Encrypted data .context/scratchpad.enc Committed

            The key is generated automatically during ctx init (256-bit via crypto/rand) and stored at ~/.ctx/.ctx.key. One key per machine, shared across all projects.

            The ciphertext format is [12-byte nonce][ciphertext+tag]. No external dependencies: Go stdlib only.

            Because the key is .gitignored and the data is committed, you get:

            • At-rest encryption: the .enc file is opaque without the key
            • Git sync: push/pull the encrypted file like any other tracked file
            • Key separation: the key never leaves the machine unless you copy it
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#commands","level":2,"title":"Commands","text":"Command Purpose ctx pad List all entries (numbered 1-based) ctx pad show N Output raw text of entry N (no prefix, pipe-friendly) ctx pad add \"text\" Append a new entry ctx pad rm ID [ID...] Remove entries by stable ID (supports ranges: 3-5) ctx pad edit N \"text\" Replace entry N with new text ctx pad edit N --append \"text\" Append text to the end of entry N ctx pad edit N --prepend \"text\" Prepend text to the beginning of entry N ctx pad edit N --tag tagname Add a tag to entry N ctx pad add TEXT --file PATH Ingest a file as a blob entry (TEXT is the label) ctx pad show N --out PATH Write decoded blob content to a file ctx pad normalize Reassign entry IDs as 1..N ctx pad mv N M Move entry from position N to position M ctx pad resolve Show both sides of a merge conflict for resolution ctx pad import FILE Bulk-import lines from a file (or stdin with -) ctx pad import --blob DIR Import directory files as blob entries ctx pad export [DIR] Export all blob entries to a directory as files ctx pad merge FILE... Merge entries from other scratchpad files into current ctx pad --tag TAG List entries filtered by tag (prefix with ~ to exclude) ctx pad tags List all tags with counts ctx pad tags --json List all tags with counts as JSON

            All commands decrypt on read, operate on plaintext in memory, and re-encrypt on write. The key file is never printed to stdout.

            For blob entries, --append, --prepend, and --tag modify the label while preserving the blob data.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#examples","level":3,"title":"Examples","text":"
            # Add a note\nctx pad add \"check DNS propagation after deploy\"\n\n# List everything\nctx pad\n#   1. check DNS propagation after deploy\n#   2. staging API key: sk-test-abc123\n\n# Show raw text (for piping)\nctx pad show 2\n# sk-test-abc123\n\n# Compose entries\nctx pad edit 1 --append \"$(ctx pad show 2)\"\n\n# Reorder\nctx pad mv 2 1\n\n# Clean up (IDs are stable; they don't shift when entries are deleted)\nctx pad rm 2\n
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#tags","level":2,"title":"Tags","text":"

            Entries can contain #word tags for lightweight categorization. Tags are convention-based: any #word token in an entry's text is a tag. No special syntax to add or remove them; use the existing add and edit commands.

            # Add tagged entries\nctx pad add \"check DNS propagation #later\"\nctx pad add \"deploy hotfix #urgent\"\nctx pad add \"review PR #later #ci\"\n\n# Filter by tag\nctx pad --tag later\n#   1. check DNS propagation #later\n#   3. review PR #later #ci\n\n# Exclude a tag\nctx pad --tag ~later\n#   2. deploy hotfix #urgent\n\n# Multiple filters (AND logic)\nctx pad --tag later --tag ci\n#   3. review PR #later #ci\n\n# List all tags with counts\nctx pad tags\n# ci       1\n# later    2\n# urgent   1\n\n# JSON output\nctx pad tags --json\n# [{\"tag\":\"ci\",\"count\":1},{\"tag\":\"later\",\"count\":2},{\"tag\":\"urgent\",\"count\":1}]\n\n# Add a tag to an existing entry\nctx pad edit 1 --tag done\n\n# Combine with other operations\nctx pad edit 1 --append \"checked\" --tag done\n\n# Remove a tag (replace entry text without the tag)\nctx pad edit 1 \"check DNS propagation\"\n

            Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry. Use ctx pad normalize to reassign IDs as 1..N if gaps bother you. Tags are case-sensitive and support letters, digits, hyphens, and underscores (#high-priority, #v2, #my_tag).

            For blob entries, tags are extracted from the label only.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#bulk-import-and-export","level":2,"title":"Bulk Import and Export","text":"

            Import lines from a file in bulk (each non-empty line becomes an entry):

            # Import from a file\nctx pad import notes.txt\n\n# Import from stdin\ngrep TODO *.go | ctx pad import -\n

            Export all blob entries to a directory as files:

            # Export to a directory\nctx pad export ./ideas\n\n# Preview without writing\nctx pad export --dry-run\n\n# Overwrite existing files\nctx pad export --force ./backup\n
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#merging-scratchpads","level":2,"title":"Merging Scratchpads","text":"

            Combine entries from other scratchpad files into your current pad. Useful when merging work from parallel worktrees, other machines, or teammates:

            # Merge from a worktree's encrypted scratchpad\nctx pad merge worktree/.context/scratchpad.enc\n\n# Merge from multiple sources (encrypted and plaintext)\nctx pad merge pad-a.enc notes.md\n\n# Merge a foreign encrypted pad using its key\nctx pad merge --key /other/.ctx.key foreign.enc\n\n# Preview without writing\nctx pad merge --dry-run pad-a.enc pad-b.md\n

            Each input file is auto-detected as encrypted or plaintext: decryption is attempted first, and on failure the file is parsed as plain text. Entries are deduplicated by exact content, so running merge twice with the same file is safe.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#file-blobs","level":2,"title":"File Blobs","text":"

            The scratchpad can store small files (up to 64 KB) as blob entries. Files are base64-encoded and stored with a human-readable label.

            # Ingest a file: first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# Listing shows label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n\n# Extract to a file\nctx pad show 2 --out ./recovered.yaml\n\n# Or print decoded content to stdout\nctx pad show 2\n

            Blob entries are encrypted identically to text entries. The internal format is label:::base64data: You never need to construct this manually.

            Constraint Value Max file size (pre-encoding) 64 KB Storage format label:::base64(content) Display label [BLOB] in listings

            When Should You Use Blobs

            Blobs are for small files you want encrypted and portable: config snippets, key fragments, deployment manifests, test fixtures. For anything larger than 64 KB, use the filesystem directly.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#using-with-ai","level":2,"title":"Using with AI","text":"

            Use Natural Language

            As in many ctx features, the ctx scratchpad can also be used with natural langauge. You don't have to memorize the CLI commands.

            CLI gives you \"precision\", whereas natural language gives you flow.

            The /ctx-pad skill maps natural language to ctx pad commands. You don't need to remember the syntax:

            You say What happens \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"show my scratchpad\" ctx pad \"delete the third entry\" ctx pad rm 3 \"update entry 2 to include the new endpoint\" ctx pad edit 2 \"...\" \"move entry 4 to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./backup\" ctx pad export ./backup \"merge the scratchpad from the worktree\" ctx pad merge worktree/.context/scratchpad.enc

            The skill handles the translation. You describe what you want in plain English; the agent picks the right command.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#worktrees","level":2,"title":"Worktrees","text":"

            The encryption key lives at ~/.ctx/.ctx.key (outside the project directory). Because all worktrees on the same machine share this path, ctx pad works in worktrees automatically - no special setup needed.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#key-distribution","level":2,"title":"Key Distribution","text":"

            The encryption key (~/.ctx/.ctx.key) stays on the machine where it was generated. ctx never transmits it.

            To share the scratchpad across machines:

            1. Copy the key manually: scp, USB drive, password manager.
            2. Push/pull the .enc file via git as usual.
            3. Both machines can now read and write the same scratchpad.

            Never Commit the Key

            The key is .gitignored by default. If you override this, anyone with repo access can decrypt your scratchpad.

            Treat the key like an SSH private key.

            See the Syncing Scratchpad Notes Across Machines recipe for a step-by-step walkthrough.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#plaintext-override","level":2,"title":"Plaintext Override","text":"

            For projects where encryption is unnecessary, disable it in .ctxrc:

            scratchpad_encrypt: false\n

            In plaintext mode:

            • Entries are stored in .context/scratchpad.md instead of .enc.
            • No key is generated or required.
            • All ctx pad commands work identically.
            • The file is human-readable and diffable.

            When Should You Use Plaintext

            Plaintext mode is useful for non-sensitive projects, solo work where encryption adds friction, or when you want scratchpad entries visible in git diff.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#when-should-you-use-scratchpad-versus-context-files","level":2,"title":"When Should You Use Scratchpad versus Context Files","text":"Use case Where it goes Temporary reminders (\"check X after deploy\") Scratchpad Working values during debugging Scratchpad Sensitive tokens or API keys (short-term) Scratchpad Quick notes that don't fit anywhere else Scratchpad Items that are not directly relevant to the project Scratchpad Things that you want to keep near, but also hidden Scratchpad Work items with completion tracking TASKS.md Trade-offs with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

            Rule of thumb:

            • If it needs structure or will be referenced months later, use a context file (i.e. DECISIONS.md, LEARNINGS.md, TASKS.md).
            • If it is working memory for the current session or week, use the scratchpad.
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#see-also","level":2,"title":"See Also","text":"
            • Syncing Scratchpad Notes Across Machines: Key distribution, push/pull workflow, merge conflict resolution
            • Using the Scratchpad: Natural language examples, blob workflow, when to use scratchpad vs context files
            • Context Files: Format and conventions for all .context/ files
            • Security: Trust model and permission hygiene
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/session-journal/","level":1,"title":"Session Journal","text":"

            Important Security Note

            Session journals contain sensitive data such as file contents, commands, API keys, internal discussions, error messages with stack traces, and more.

            The .context/journal-site/ and .context/journal-obsidian/ directories MUST be .gitignored.

            • DO NOT host your journal publicly.
            • DO NOT commit your journal files to version control.
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#browse-your-session-history","level":2,"title":"Browse Your Session History","text":"

            ctx's Session Journal turns your AI coding sessions into a browsable, searchable, and editable archive.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#quick-start","level":2,"title":"Quick Start","text":"

            After using ctx for a couple of sessions, you can generate a journal site with:

            # Import all sessions to markdown\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

            Then open http://localhost:8000 to browse your sessions.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#what-you-get","level":2,"title":"What You Get","text":"

            The Session Journal gives you:

            • Browsable history: Navigate through all your AI sessions by date
            • Full conversations: See every message, tool use, and result
            • Token usage: Track how many tokens each session consumed
            • Search: Find sessions by content, project, or date
            • Dark mode: Easy on the eyes for late-night archaeology

            Each session page includes the following sections:

            Section Content Metadata Date, time, duration, model, project, git branch Summary Space for your notes (editable) Tool Usage Which tools were used and how often Conversation Full transcript with timestamps","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#1-import-sessions","level":3,"title":"1. Import Sessions","text":"
            # Import all sessions from current project (only new files)\nctx journal import --all\n\n# Import sessions from all projects\nctx journal import --all --all-projects\n\n# Import a specific session by ID (always writes)\nctx journal import abc123\n\n# Preview what would be imported\nctx journal import --all --dry-run\n\n# Re-import existing (regenerates conversation, preserves YAML frontmatter)\nctx journal import --all --regenerate\n\n# Discard frontmatter during regeneration\nctx journal import --all --regenerate --keep-frontmatter=false -y\n

            Imported sessions go to .context/journal/ as editable Markdown files.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#2-generate-the-site","level":3,"title":"2. Generate the Site","text":"
            # Generate site structure\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

            The site is generated in .context/journal-site/ by default.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#3-browse-and-search","level":3,"title":"3. Browse and Search","text":"

            Open http://localhost:8000 after running --serve.

            • Use the sidebar to navigate by date
            • Use search (/ key) to find specific content
            • Click any session to see the full conversation
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#editing-sessions","level":2,"title":"Editing Sessions","text":"

            Imported sessions are plain Markdown in .context/journal/. You can:

            • Add summaries: Fill in the ## Summary section
            • Add notes: Insert your own commentary anywhere
            • Highlight key moments: Use Markdown formatting
            • Delete noise: Remove irrelevant tool outputs

            After editing, regenerate the site:

            ctx journal site --serve\n
            Safe by Default

            Running ctx journal import --all only imports new sessions. Existing files are skipped entirely (your edits and enrichments are never touched).

            Use --regenerate to re-import existing files. Conversation content is regenerated, but YAML frontmatter (topics, type, outcome, etc.) is preserved. You'll be prompted before any existing files are overwritten; add -y to skip the prompt.

            Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

            Locked entries (via ctx journal lock) are always skipped, regardless of flags. If you prefer to add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#large-sessions","level":2,"title":"Large Sessions","text":"

            Sessions with many messages (200+) are automatically split into multiple parts for better browser performance. Navigation links connect the parts:

            session-abc123.md      (Part 1 of 3)\nsession-abc123-p2.md   (Part 2 of 3)\nsession-abc123-p3.md   (Part 3 of 3)\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#suggestion-sessions","level":2,"title":"Suggestion Sessions","text":"

            Claude Code generates \"suggestion\" sessions for auto-complete prompts. These are separated in the index under a \"Suggestions\" section to keep your main session list focused.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enriching-journal-entries","level":2,"title":"Enriching Journal Entries","text":"

            Raw imported sessions contain basic metadata (date, time, project) but lack the structured information needed for effective search, filtering, and analysis. Journal enrichment adds semantic metadata that transforms a flat archive into a searchable knowledge base.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#why-enrich","level":3,"title":"Why Enrich?","text":"

            Without enrichment, you have timestamps and raw conversations. With enrichment:

            • Find sessions by topic: \"Show me all auth-related sessions\"
            • Filter by outcome: \"What did I abandon vs complete?\"
            • Track technology usage: \"When did I last work with PostgreSQL?\"
            • Identify key files: Jump directly to the files discussed
            • Get summaries: Understand what happened without reading transcripts
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-frontmatter-schema","level":3,"title":"The Frontmatter Schema","text":"

            Enriched entries begin with YAML frontmatter:

            ---\ntitle: \"Implement caching layer\"\ndate: 2026-01-27\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/cache/memory.go\n---\n
            Field Required Description title Yes Descriptive title (not the session slug) date Yes Session date (YYYY-MM-DD) type Yes Session type (see below) outcome Yes How the session ended (see below) topics No Subject areas discussed technologies No Languages, databases, frameworks libraries No Specific packages or libraries used key_files No Important files created or modified

            Type values:

            Type When to use feature Building new functionality bugfix Fixing broken behavior refactor Restructuring without behavior change exploration Research, learning, experimentation debugging Investigating issues documentation Writing docs, comments, README

            Outcome values:

            Outcome Meaning completed Goal achieved partial Some progress, work continues abandoned Stopped pursuing this approach blocked Waiting on external dependency","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-ctx-journal-enrich","level":3,"title":"Using /ctx-journal-enrich","text":"

            The /ctx-journal-enrich skill automates enrichment by analyzing conversation content and proposing metadata.

            Invoke by session identifier:

            /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-01-24\n/ctx-journal-enrich 76fe2ab9\n

            The skill will:

            1. Check if locked - locked entries are skipped (same as export);
            2. Find the matching journal file;
            3. Read and analyze the conversation;
            4. Propose frontmatter (type, topics, outcome, technologies);
            5. Generate a 2-3 sentence summary;
            6. Extract decisions, learnings, and tasks mentioned;
            7. Show a diff and ask for confirmation before writing.
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#before-and-after","level":3,"title":"Before and After","text":"

            Before enrichment:

            # twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\n[Add your summary of this session]\n\n## Conversation\n...\n

            After enrichment:

            ---\ntitle: \"Add Redis caching to API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n\n# twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\nImplemented Redis-based caching middleware for frequently accessed API endpoints.\nAdded cache invalidation on writes and configurable TTL per route. Reduced\n the average response time from 200ms to 15ms for cached routes.\n\n## Decisions\n\n* Used Redis over in-memory cache for horizontal scaling\n* Chose per-route TTL configuration over global setting\n\n## Learnings\n\n* Redis WATCH command prevents race conditions during cache invalidation\n\n## Conversation\n...\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enrichment-and-site-generation","level":3,"title":"Enrichment and Site Generation","text":"

            The journal site generator uses enriched metadata for better organization:

            • Titles appear in navigation instead of slugs
            • Summaries provide context in the index
            • Topics enable filtering (when using search)
            • Types allow grouping by work category

            Future improvements will add topic-based navigation and outcome filtering to the generated site.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#batch-enrichment","level":3,"title":"Batch Enrichment","text":"

            To enrich multiple sessions, process them one at a time:

            # List unenriched sessions (those without frontmatter)\ngrep -L \"^---$\" .context/journal/*.md | head -10\n

            Then run /ctx-journal-enrich on each. Enrichment is intentionally interactive to ensure accuracy.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#obsidian-vault-export","level":2,"title":"Obsidian Vault Export","text":"

            If you use Obsidian for knowledge management, you can export your journal as an Obsidian vault instead of (or alongside) the static site:

            ctx journal obsidian\n

            This generates a vault in .context/journal-obsidian/ with:

            • Wikilinks ([[target|display]]) instead of Markdown links
            • MOC pages (Map of Content) for topics, key files, and session types
            • Related sessions footer per entry: links to entries sharing the same topics
            • Transformed frontmatter: topics renamed to tags (Obsidian-recognized), aliases added from title for search
            • Graph-optimized structure: MOC hubs and cross-linked entries create dense graph connectivity

            To use: open the output directory in Obsidian (\"Open folder as vault\").

            # Custom output directory\nctx journal obsidian --output ~/vaults/ctx-journal\n

            Static Site vs Obsidian Vault

            Use ctx journal site when you want a web-browsable archive with search and dark mode. Use ctx journal obsidian when you want graph view, backlinks, and tag-based navigation inside Obsidian. Both use the same enriched source entries: you can generate both.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#full-pipeline","level":2,"title":"Full Pipeline","text":"

            The complete journal workflow has four stages. Each is idempotent: safe to re-run, and stages skip already-processed entries.

            import → enrich → rebuild\n
            Stage Command / Skill What it does Skips if Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) Enrich /ctx-journal-enrich Adds frontmatter, summaries, topics Frontmatter already present Rebuild ctx journal site --build Generates static HTML site -- Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks --

            One-Command Pipeline

            /ctx-journal-enrich-all handles import automatically - it detects unimported sessions and imports them before enriching. You only need to run ctx journal site --build afterward.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-make-journal","level":3,"title":"Using make journal","text":"

            If your project includes Makefile.ctx (deployed by ctx init), the first and last stages are combined:

            make journal           # import + rebuild\n

            After it runs, it reminds you to enrich in Claude Code:

            Next steps (in Claude Code):\n  /ctx-journal-enrich-all # imports if needed + adds metadata per entry\n\nThen re-run: make journal\n

            Rendering Issues?

            If individual entries have rendering problems (broken fences, malformed lists), check the programmatic normalization in the import pipeline. Most cases are handled automatically during ctx journal import.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#tips","level":2,"title":"Tips","text":"

            Daily workflow:

            # Import, browse, then enrich in Claude Code\nmake journal && make journal-serve\n# Then in Claude Code: /ctx-journal-enrich <session>\n

            After a productive session:

            # Import just that session and add notes\nctx journal import <session-id>\n# Edit .context/journal/<session>.md\n# Regenerate: ctx journal site\n

            Searching across all sessions:

            # Use grep on the journal directory\ngrep -r \"authentication\" .context/journal/\n

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#requirements","level":2,"title":"Requirements","text":"Use pipx for zensical

            pip install zensical may install a non-functional stub on system Python. Using venv has other issues too.

            These issues especially happen on Mac OSX.

            Use pipx install zensical, which creates an isolated environment and handles Python version management automatically.

            The journal site uses zensical for static site generation:

            pipx install zensical\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#see-also","level":2,"title":"See Also","text":"
            • ctx journal: Session discovery and listing
            • ctx journal site: Static site generation
            • ctx journal obsidian: Obsidian vault export
            • Context Files: The .context/ directory structure
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/skills/","level":1,"title":"Skills","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skills","level":2,"title":"Skills","text":"

            Skills are slash commands that run inside your AI assistant (e.g., /ctx-next), as opposed to CLI commands that run in your terminal (e.g., ctx status).

            Skills give your agent structured workflows: It knows what to read, what to run, and when to ask. Most wrap one or more ctx CLI commands with opinionated behavior on top.

            Skills Are Best Used Conversationally

            The beauty of ctx is that it's designed to be intuitive and conversational, allowing you to interact with your AI assistant naturally. That's why you don't have to memorize many of these skills.

            See the Prompting Guide for natural-language triggers that invoke these skills conversationally.

            However, when you need a more precise control, you have the option to invoke the relevant skills directly.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#all-skills","level":2,"title":"All Skills","text":"Skill Description Type /ctx-remember Recall project context and present structured readback user-invocable /ctx-wrap-up End-of-session context persistence ceremony user-invocable /ctx-status Show context summary with interpretation user-invocable /ctx-agent Load full context packet for AI consumption user-invocable /ctx-next Suggest 1-3 concrete next actions with rationale user-invocable /ctx-commit Commit with integrated context persistence user-invocable /ctx-reflect Pause and reflect on session progress user-invocable /ctx-task-add Add actionable task to TASKS.md user-invocable /ctx-decision-add Record architectural decision with rationale user-invocable /ctx-learning-add Record gotchas and lessons learned user-invocable /ctx-convention-add Record coding convention for consistency user-invocable /ctx-archive Archive completed tasks from TASKS.md user-invocable /ctx-pad Manage encrypted scratchpad entries user-invocable /ctx-history Browse and import AI session history user-invocable /ctx-journal-enrich Enrich single journal entry with metadata user-invocable /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich user-invocable /ctx-blog Generate blog post draft from project activity user-invocable /ctx-blog-changelog Generate themed blog post from a commit range user-invocable /ctx-consolidate Consolidate redundant learnings or decisions user-invocable /ctx-drift Detect and fix context drift user-invocable /ctx-prompt Apply, list, and manage saved prompt templates user-invocable /ctx-prompt-audit Analyze prompting patterns for improvement user-invocable /ctx-link-check Audit docs for dead internal and external links user-invocable /ctx-permission-sanitize Audit Claude Code permissions for security risks user-invocable /ctx-brainstorm Structured design dialogue before implementation user-invocable /ctx-spec Scaffold a feature spec from a project template user-invocable /ctx-plan-import Import Claude Code plan files into project specs user-invocable /ctx-implement Execute a plan step-by-step with verification user-invocable /ctx-loop Generate autonomous loop script user-invocable /ctx-worktree Manage git worktrees for parallel agents user-invocable /ctx-architecture Build and maintain architecture maps user-invocable /ctx-architecture-failure-analysis Adversarial failure analysis for correctness bugs user-invocable /ctx-remind Manage session-scoped reminders user-invocable /ctx-doctor Troubleshoot ctx behavior with health checks and event analysis user-invocable /ctx-skill-audit Audit skills against Anthropic prompting best practices user-invocable /ctx-skill-create Create, improve, and test skills user-invocable /ctx-pause Pause context hooks for this session user-invocable /ctx-resume Resume context hooks after a pause user-invocable","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-lifecycle","level":2,"title":"Session Lifecycle","text":"

            Skills for starting, running, and ending a productive session.

            Session Ceremonies

            Two skills in this group are ceremony skills: /ctx-remember (session start) and /ctx-wrap-up (session end). Unlike other skills that work conversationally, these should be invoked as explicit slash commands for completeness. See Session Ceremonies.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remember","level":3,"title":"/ctx-remember","text":"

            Recall project context and present a structured readback. Ceremony skill: invoke explicitly at session start.

            Wraps: ctx agent --budget 4000, ctx journal source --limit 3, reads TASKS.md, DECISIONS.md, LEARNINGS.md

            See also: Session Ceremonies, The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-status","level":3,"title":"/ctx-status","text":"

            Show context summary (files, token budget, tasks, recent activity) with interpreted suggestions.

            Wraps: ctx status [--verbose] [--json]

            See also: The Complete Session, ctx status CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-agent","level":3,"title":"/ctx-agent","text":"

            Load the full context packet optimized for AI consumption. Also runs automatically via the PreToolUse hook with cooldown.

            Wraps: ctx agent [--budget] [--format] [--cooldown] [--session]

            See also: The Complete Session, ctx agent CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-next","level":3,"title":"/ctx-next","text":"

            Suggest 1-3 concrete next actions ranked by priority, momentum, and unblocked status.

            Wraps: reads TASKS.md, ctx journal source --limit 3

            See also: The Complete Session, Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-commit","level":3,"title":"/ctx-commit","text":"

            Commit code with integrated context persistence: pre-commit checks, staged files, Co-Authored-By trailer, and a post-commit prompt to capture decisions and learnings.

            Wraps: git add, git commit, optionally chains to /ctx-decision-add and /ctx-learning-add

            See also: The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-reflect","level":3,"title":"/ctx-reflect","text":"

            Pause and reflect on session progress. Walks through a checklist of learnings, decisions, task completions, and session notes to persist.

            Wraps: chains to ctx add learning, ctx add decision, manual TASKS.md updates

            See also: The Complete Session, Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-wrap-up","level":3,"title":"/ctx-wrap-up","text":"

            End-of-session context persistence ceremony. Gathers signal from git diff, recent commits, and conversation themes. Proposes candidates (learnings, decisions, conventions, tasks) with complete structured fields for user approval, then persists via ctx add. Offers /ctx-commit if uncommitted changes remain. Ceremony skill: invoke explicitly at session end.

            Wraps: git diff --stat, git log, ctx add learning, ctx add decision, ctx add convention, ctx add task, chains to /ctx-commit

            See also: Session Ceremonies, The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#context-persistence","level":2,"title":"Context Persistence","text":"

            Skills for recording work artifacts: tasks, decisions, learnings, conventions: into .context/ files.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-task-add","level":3,"title":"/ctx-task-add","text":"

            Add an actionable task with optional priority and phase section.

            Wraps: ctx add task \"description\" [--priority high|medium|low] --session-id ID --branch BR --commit HASH

            See also: Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-decision-add","level":3,"title":"/ctx-decision-add","text":"

            Record an architectural decision with context, rationale, and consequence. Supports Y-statement (lightweight) and full ADR formats.

            Wraps: ctx add decision \"title\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id ID --branch BR --commit HASH

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-learning-add","level":3,"title":"/ctx-learning-add","text":"

            Record a project-specific gotcha, bug, or unexpected behavior. Filters for insights that are searchable, project-specific, and required real effort to discover.

            Wraps: ctx add learning \"title\" --context \"...\" --lesson \"...\" --application \"...\" --session-id ID --branch BR --commit HASH

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-convention-add","level":3,"title":"/ctx-convention-add","text":"

            Record a coding convention that should be standardized across sessions. Targets patterns seen 2-3+ times.

            Wraps: ctx add convention \"rule\" --section \"Name\"

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-archive","level":3,"title":"/ctx-archive","text":"

            Archive completed tasks from TASKS.md to a timestamped file in .context/archive/. Preserves phase headers for traceability.

            Wraps: ctx task archive [--dry-run]

            See also: Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#scratchpad","level":2,"title":"Scratchpad","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pad","level":3,"title":"/ctx-pad","text":"

            Manage the encrypted scratchpad: add, remove, edit, and reorder one-liner notes. Encrypted at rest with AES-256-GCM.

            Wraps: ctx pad, ctx pad add, ctx pad rm, ctx pad edit, ctx pad mv, ctx pad import, ctx pad export, ctx pad merge

            See also: Scratchpad, Using the Scratchpad

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#journal-history","level":2,"title":"Journal & History","text":"

            Skills for browsing, exporting, and enriching your AI session history into a structured journal.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-history","level":3,"title":"/ctx-history","text":"

            Browse, inspect, and import AI session history. List recent sessions, show details by slug or ID, and import to .context/journal/.

            Wraps: ctx journal source, ctx journal source --show, ctx journal import

            See also: Browsing and Enriching Past Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich","level":3,"title":"/ctx-journal-enrich","text":"

            Enrich a single journal entry with YAML frontmatter: title, type, outcome, topics, technologies, and summary. Shows diff before writing.

            Wraps: reads and edits .context/journal/*.md files

            See also: Browsing and Enriching Past Sessions, Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich-all","level":3,"title":"/ctx-journal-enrich-all","text":"

            Full journal pipeline: imports unimported sessions first, then batch-enriches all unenriched entries. Filters out short sessions and continuations. Can spawn subagents for large backlogs.

            Wraps: ctx journal import --all + iterates /ctx-journal-enrich

            See also: Browsing and Enriching Past Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#content-creation","level":2,"title":"Content Creation","text":"

            Skills for turning project activity into publishable content.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog","level":3,"title":"/ctx-blog","text":"

            Generate a blog post draft from recent project activity: git history, decisions, learnings, tasks, and journal entries. Requires a narrative arc (problem, approach, outcome).

            Wraps: reads git log, DECISIONS.md, LEARNINGS.md, TASKS.md, journal entries; writes to docs/blog/

            See also: Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog-changelog","level":3,"title":"/ctx-blog-changelog","text":"

            Generate a themed blog post from a commit range. Takes a starting commit and unifying theme, analyzes diffs and journal entries from that period.

            Wraps: git log, git diff --stat; writes to docs/blog/

            See also: Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#auditing-health","level":2,"title":"Auditing & Health","text":"

            Skills for detecting drift, auditing alignment, and improving prompt quality.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-consolidate","level":3,"title":"/ctx-consolidate","text":"

            Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Groups overlapping entries by keyword similarity, presents candidates, and (with user approval) merges groups into denser combined entries. Originals are archived, not deleted.

            Wraps: reads LEARNINGS.md and DECISIONS.md, writes consolidated entries, archives originals, runs ctx reindex

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-drift","level":3,"title":"/ctx-drift","text":"

            Detect and fix context drift: stale paths, missing files, file age staleness, task accumulation, entry count warnings, and constitution violations via ctx drift. Also detects skill drift against canonical templates.

            Wraps: ctx drift [--fix]

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

            Analyze recent prompting patterns to identify vague or ineffective prompts. Reviews 3-5 journal entries and suggests rewrites with positive observations.

            Wraps: reads .context/journal/ entries

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-doctor","level":3,"title":"/ctx-doctor","text":"

            Troubleshoot ctx behavior. Runs structural health checks via ctx doctor, analyzes event log patterns via ctx hook event, and presents findings with suggested actions. The CLI provides the structural baseline; the agent adds semantic analysis of event patterns and correlations.

            Wraps: ctx doctor --json, ctx hook event --json --last 100, ctx remind list, ctx hook message list, reads .ctxrc

            Trigger phrases: \"diagnose\", \"troubleshoot\", \"doctor\", \"health check\", \"why didn't my hook fire?\", \"hooks seem broken\", \"something seems off\"

            Graceful degradation: If event_log is not enabled, the skill still works but with reduced capability. It runs structural checks and notes: \"Enable event_log: true in .ctxrc for hook-level diagnostics.\"

            See also: Troubleshooting, ctx doctor CLI, ctx hook event CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-link-check","level":3,"title":"/ctx-link-check","text":"

            Scan all markdown files under docs/ for broken links. Three passes: internal links (verify file targets exist on disk), external links (HTTP HEAD with timeout, report failures as warnings), and image references. Resolves relative paths, strips anchors before checking, and skips localhost/example URLs.

            Wraps: Glob + Grep to scan, curl for external checks

            Trigger phrases: \"check links\", \"audit links\", \"any broken links?\", \"dead links\"

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-permission-sanitize","level":3,"title":"/ctx-permission-sanitize","text":"

            Audit .claude/settings.local.json for dangerous permissions across four risk categories: hook bypass (Critical), destructive commands (High), config injection vectors (High), and overly broad patterns (Medium). Reports findings by severity and offers specific fix actions with user confirmation.

            Wraps: reads .claude/settings.local.json, edits with confirmation

            Trigger phrases: \"audit permissions\", \"are my permissions safe?\", \"sanitize permissions\", \"check settings\"

            See also: Claude Code Permission Hygiene

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#planning-execution","level":2,"title":"Planning & Execution","text":"

            Skills for structured design, implementation, and parallel agent workflows.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-brainstorm","level":3,"title":"/ctx-brainstorm","text":"

            Transform raw ideas into clear, validated designs through structured dialogue before any implementation begins. Follows a gated process: understand context, clarify the idea (one question at a time), surface non-functional requirements, lock understanding with user confirmation, explore 2-3 design approaches with trade-offs, stress-test the chosen approach, and present the detailed design.

            Wraps: reads DECISIONS.md, relevant source files; chains to /ctx-decision-add for recording design choices

            Trigger phrases: \"let's brainstorm\", \"design this\", \"think through\", \"before we build\", \"what approach should we take?\"

            See also: /ctx-spec

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-spec","level":3,"title":"/ctx-spec","text":"

            Scaffold a feature spec from the project template and walk through each section with the user. Covers: problem, approach, happy path, edge cases, validation rules, error handling, interface, implementation, configuration, testing, and non-goals. Spends extra time on edge cases and error handling.

            Wraps: reads specs/tpl/spec-template.md, writes to specs/, optionally chains to /ctx-task-add

            Trigger phrases: \"spec this out\", \"write a spec\", \"create a spec\", \"design document\"

            See also: /ctx-brainstorm, /ctx-plan-import

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-plan-import","level":3,"title":"/ctx-plan-import","text":"

            Import Claude Code plan files (~/.claude/plans/*.md) into the project's specs/ directory. Lists plans with dates and H1 titles, supports filtering (--today, --since, --all), slugifies headings for filenames, and optionally creates tasks referencing each imported spec.

            Wraps: reads ~/.claude/plans/*.md, writes to specs/, optionally chains to /ctx-task-add

            See also: Importing Claude Code Plans, Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-implement","level":3,"title":"/ctx-implement","text":"

            Execute a multi-step plan with build and test verification at each step. Loads a plan from a file or conversation context, breaks it into atomic steps, and checkpoints after every 3-5 steps.

            Wraps: reads plan file, runs verification commands (go build, go test, etc.)

            See also: Running an Unattended AI Agent

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-loop","level":3,"title":"/ctx-loop","text":"

            Generate a ready-to-run shell script for autonomous AI iteration. Supports Claude Code, Aider, and generic tool templates with configurable completion signals.

            Wraps: ctx loop [--tool] [--prompt] [--max-iterations] [--completion] [--output]

            See also: Autonomous Loops, Running an Unattended AI Agent

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-worktree","level":3,"title":"/ctx-worktree","text":"

            Manage git worktrees for parallel agent development. Create sibling worktrees on dedicated branches, analyze task blast radius for grouping, and tear down with merge.

            Wraps: git worktree add, git worktree list, git worktree remove, git merge

            See also: Parallel Agent Development with Git Worktrees

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture","level":3,"title":"/ctx-architecture","text":"

            Build and maintain architecture maps incrementally. Creates or refreshes ARCHITECTURE.md (succinct project map, loaded at session start) and DETAILED_DESIGN.md (deep per-module reference, consulted on-demand). Coverage is tracked in map-tracking.json so each run extends the map rather than re-analyzing everything.

            Wraps: ctx status, git log, reads source files; writes ARCHITECTURE.md, DETAILED_DESIGN.md, map-tracking.json

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture-failure-analysis","level":3,"title":"/ctx-architecture-failure-analysis","text":"

            Adversarial failure analysis that generates falsifiable incident hypotheses against architecture artifacts. Hunts for correctness bugs that survive code review and tests: race conditions, ordering assumptions, cache staleness, error swallowing, ownership gaps, idempotency failures, state machine drift, and scaling cliffs.

            Requires /ctx-architecture artifacts as input. Reads ARCHITECTURE.md, DETAILED_DESIGN*.md, and map-tracking.json, then systematically applies 9 failure categories to every mutation point. Each finding carries an evidence standard (code path, trigger, failure path, silence reason, code evidence), a confidence level, and an explicit risk score. A mandatory challenge phase attempts to disprove each finding before it is accepted.

            Produces .context/DANGER-ZONES.md with ranked findings split into Critical (risk >= 7, silent/cascading) and Elevated tiers.

            Wraps: reads architecture artifacts, source code; writes DANGER-ZONES.md. Optionally uses GitNexus for blast radius and Gemini Search for cross-referencing known failure patterns.

            Relationship:

            Skill Mode /ctx-architecture Map what exists /ctx-architecture-enrich Improve map fidelity /ctx-architecture-failure-analysis Generate falsifiable incident hypotheses","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remind","level":3,"title":"/ctx-remind","text":"

            Manage session-scoped reminders via natural language. Translates user intent (\"remind me to refactor swagger\") into the corresponding ctx remind command. Handles date conversion for --after flags.

            Wraps: ctx remind, ctx remind list, ctx remind dismiss

            See also: Session Reminders

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skill-authoring","level":2,"title":"Skill Authoring","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-audit","level":3,"title":"/ctx-skill-audit","text":"

            Audit one or more skills against Anthropic prompting best practices. Checks audit dimensions: positive framing, motivation, phantom references, examples, subagent guards, scope, and descriptions. Reports findings by severity with concrete fix suggestions.

            Wraps: reads internal/assets/claude/skills/*/SKILL.md or .claude/skills/*/SKILL.md, references anthropic-best-practices.md

            Trigger phrases: \"audit this skill\", \"check skill quality\", \"review the skills\", \"are our skills any good?\"

            See also: /ctx-skill-create, Contributing

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-create","level":3,"title":"/ctx-skill-create","text":"

            Create, improve, and test skills. Guides the full lifecycle: capture intent, interview for edge cases, draft the SKILL.md, test with realistic prompts, review results with the user, and iterate. Applies core principles: the agent is already smart (only add what it does not know), the description is the trigger (make it specific and \"pushy\"), and explain the why instead of rigid directives.

            Wraps: reads/writes .claude/skills/ and internal/assets/claude/skills/

            Trigger phrases: \"create a skill\", \"turn this into a skill\", \"make a slash command\", \"this should be a skill\", \"improve this skill\", \"the skill isn't triggering\"

            See also: Contributing

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-control","level":2,"title":"Session Control","text":"

            Skills for controlling hook behavior during a session.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pause","level":3,"title":"/ctx-pause","text":"

            Pause all context nudge and reminder hooks for the current session. Security hooks still fire. Use for quick investigations or tasks that don't need ceremony overhead.

            Wraps: ctx hook pause

            Trigger phrases: \"pause ctx\", \"pause context\", \"stop the nudges\", \"quiet mode\"

            See also: Pausing Context Hooks

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-resume","level":3,"title":"/ctx-resume","text":"

            Resume context hooks after a pause. Restores normal nudge, reminder, and ceremony behavior. Silent no-op if not paused.

            Wraps: ctx hook resume

            Trigger phrases: \"resume ctx\", \"resume context\", \"turn nudges back on\", \"unpause\"

            See also: Pausing Context Hooks

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#project-specific-skills","level":2,"title":"Project-Specific Skills","text":"

            The ctx plugin ships the skills listed above. Teams can add their own project-specific skills to .claude/skills/ in the project root: These are separate from plugin-shipped skills and are scoped to the project.

            Project-specific skills follow the same format and are invoked the same way.

            Custom skills are not covered in this reference.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/versions/","level":1,"title":"Version History","text":"","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#version-history","level":2,"title":"Version History","text":"

            Documentation snapshots for each release.

            Tap the corresponding view docs to view the docs as they were at that release.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#releases","level":2,"title":"Releases","text":"Version Release Date Documentation v0.8.0 2026-03-23 view docs v0.6.0 2026-02-16 view docs v0.3.0 2026-02-07 view docs v0.2.0 2026-02-01 view docs v0.1.2 2026-01-27 view docs v0.1.1 2026-01-26 view docs v0.1.0 2026-01-25 view docs","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v080-the-architecture-release","level":3,"title":"v0.8.0: The Architecture Release","text":"

            MCP server for tool-agnostic AI integration. Memory bridge connecting Claude Code auto-memory to .context/. Complete CLI restructuring into cmd/ + core/ taxonomy. All user-facing strings externalized to YAML. fatih/color removed; two direct dependencies remain.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v060-the-integration-release","level":3,"title":"v0.6.0: The Integration Release","text":"

            Plugin architecture: hooks and skills converted from shell scripts to Go subcommands, shipped as a Claude Code marketplace plugin. Multi-tool hook generation for Cursor, Aider, Copilot, and Windsurf. Webhook notifications with encrypted URL storage.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v030-the-discipline-release","level":3,"title":"v0.3.0: The Discipline Release","text":"

            Journal static site generation via zensical. 49-skill audit and fix pass (positive framing, phantom reference removal, scope tightening). Context consolidation skill. golangci-lint v2 migration.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v020-the-archaeology-release","level":3,"title":"v0.2.0: The Archaeology Release","text":"

            Session journal system: ctx journal import converts Claude Code JSONL transcripts to browsable Markdown. Constants refactor with semantic prefixes (Dir*, File*, Filename*). CRLF handling for Windows compatibility.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v012","level":3,"title":"v0.1.2","text":"

            Default Claude Code permissions deployed on ctx init. Prompting guide published as a standalone documentation page.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v011","level":3,"title":"v0.1.1","text":"

            Bug fixes: hook schema key format corrected, JSON unicode escaping fixed in context file output.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v010-initial-release","level":3,"title":"v0.1.0: Initial Release","text":"

            CLI with 15 subcommands, 6 context file types (CONSTITUTION, TASKS, CONVENTIONS, ARCHITECTURE, DECISIONS, LEARNINGS), Makefile build system, and Claude Code hook integration.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#latest","level":2,"title":"Latest","text":"

            The main documentation always reflects the latest development version.

            For the most recent stable release, see v0.8.0.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#changelog","level":2,"title":"Changelog","text":"

            For detailed changes between versions, see the GitHub Releases page.

            ","path":["Reference","Version History"],"tags":[]},{"location":"security/","level":1,"title":"Security","text":"

            Security model, agent hardening, and vulnerability reporting.

            ","path":["Security"],"tags":[]},{"location":"security/#securing-ai-agents","level":3,"title":"Securing AI Agents","text":"

            Defense in depth for unattended AI agents: five layers of protection, each with a known bypass, strength in combination.

            ","path":["Security"],"tags":[]},{"location":"security/#security-policy","level":3,"title":"Security Policy","text":"

            Trust model, vulnerability reporting, permission hygiene, and security design principles.

            ","path":["Security"],"tags":[]},{"location":"security/agent-security/","level":1,"title":"Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#defense-in-depth-securing-ai-agents","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-problem","level":2,"title":"The Problem","text":"

            An unattended AI agent with unrestricted access to your machine is an unattended shell with unrestricted access to your machine.

            This is not a theoretical concern. AI coding agents execute shell commands, write files, make network requests, and modify project configuration. When running autonomously (overnight, in a loop, without a human watching), the attack surface is the full capability set of the operating system user account.

            The risk is not that the AI is malicious. The risk is that the AI is controllable: it follows instructions from context, and context can be poisoned.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#threat-model","level":2,"title":"Threat Model","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#how-agents-get-compromised","level":3,"title":"How Agents Get Compromised","text":"

            AI agents follow instructions from multiple sources: system prompts, project files, conversation history, and tool outputs. An attacker who can inject content into any of these sources can redirect the agent's behavior.

            Vector How it works Prompt injection via dependencies A malicious package includes instructions in its README, changelog, or error output. The agent reads these during installation or debugging and follows them. Prompt injection via fetched content The agent fetches a URL (documentation, API response, Stack Overflow answer) containing embedded instructions. Poisoned project files A contributor adds adversarial instructions to CLAUDE.md, .cursorrules, or .context/ files. The agent loads these at session start. Self-modification between iterations In an autonomous loop, the agent modifies its own configuration files. The next iteration loads the modified config with no human review. Tool output injection A command's output (error messages, log lines, file contents) contains instructions the agent interprets and follows.","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#what-can-a-compromised-agent-do","level":3,"title":"What Can a Compromised Agent Do","text":"

            Depends entirely on what permissions and access the agent has:

            Access level Potential impact Unrestricted shell Execute any command, install software, modify system files Network access Exfiltrate source code, credentials, or context files to external servers Docker socket Escape container isolation by spawning privileged sibling containers SSH keys Pivot to other machines, push to remote repositories, access production systems Write access to own config Disable its own guardrails for the next iteration","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-defense-layers","level":2,"title":"The Defense Layers","text":"

            No single layer is sufficient. Each layer catches what the others miss.

            Layer 1: Soft instructions     (CONSTITUTION.md, playbook)\nLayer 2: Application controls  (permission allowlist, tool restrictions)\nLayer 3: OS-level isolation    (user accounts, filesystem, containers)\nLayer 4: Network controls      (firewall rules, airgap)\nLayer 5: Infrastructure        (VM isolation, resource limits)\n
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

            Markdown files like CONSTITUTION.md and the Agent Playbook tell the agent what to do and what not to do. These are probabilistic: the agent usually follows them, but there is no enforcement mechanism.

            What it catches: Most common mistakes. An agent that has been told \"never delete production data\" will usually not delete production data.

            What it misses: Prompt injection. A sufficiently crafted injection can override soft instructions. Long context windows dilute attention on rules stated early. Edge cases where instructions are ambiguous.

            Verdict: Necessary but not sufficient. Good for the common case. Do not rely on it for security boundaries.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

            AI tool runtimes (Claude Code, Cursor, etc.) provide permission systems: tool allowlists, command restrictions, confirmation prompts.

            For Claude Code, ctx init writes both an allowlist and an explicit deny list into .claude/settings.local.json. The golden images live in internal/assets/permissions/:

            Allowlist (allow.txt): only these tools run without confirmation:

            Bash(ctx:*)\nSkill(ctx-convention-add)\nSkill(ctx-decision-add)\n... # all bundled ctx-* skills\n

            Deny list (deny.txt): these are blocked even if the agent requests them:

            # Dangerous operations\nBash(sudo *)\nBash(git push *)\nBash(git push)\nBash(rm -rf /*)\nBash(rm -rf ~*)\nBash(curl *)\nBash(wget *)\nBash(chmod 777 *)\n\n# Sensitive file reads\nRead(**/.env)\nRead(**/.env.*)\nRead(**/*credentials*)\nRead(**/*secret*)\nRead(**/*.pem)\nRead(**/*.key)\n\n# Sensitive file edits\nEdit(**/.env)\nEdit(**/.env.*)\n

            What it catches: The agent cannot run commands outside the allowlist, and the deny list blocks dangerous operations even if a future allowlist change were to widen access. If rm, curl, sudo, or docker are not allowed and sudo/curl/wget are explicitly denied, the agent cannot invoke them regardless of what any prompt says.

            What it misses: The agent can modify the allowlist itself. In an autonomous loop, if the agent writes to .claude/settings.local.json, and the next iteration loads the modified config, then the protection is effectively lost. The application enforces the rules, but the application reads the rules from files the agent can write.

            Verdict: Strong first layer. Must be combined with self-modification prevention (Layer 3).

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-3-os-level-isolation-deterministic-and-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Deterministic and Unbypassable)","text":"

            The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

            Control Purpose Dedicated user account No sudo, no privileged group membership (docker, wheel, adm). The agent cannot escalate privileges. Filesystem permissions Project directory writable; everything else read-only or inaccessible. Agent cannot reach other projects, home directories, or system config. Immutable config files CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md owned by a different user or marked immutable (chattr +i on Linux). The agent cannot modify its own guardrails.

            What it catches: Privilege escalation, self-modification, lateral movement to other projects or users.

            What it misses: Actions within the agent's legitimate scope. If the agent has write access to source code (which it needs to do its job), it can introduce vulnerabilities in the code itself.

            Verdict: Essential. This is the layer that makes the other layers trustworthy.

            OS-level isolation does not make the agent safe; it makes the other layers meaningful.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

            An agent that cannot reach the internet cannot exfiltrate data. It also cannot ingest new instructions mid-loop from external documents, API responses, or hostile content.

            Scenario Recommended control Agent does not need the internet --network=none (container) or outbound firewall drop-all Agent needs to fetch dependencies Allow specific registries (npmjs.com, proxy.golang.org, pypi.org) via firewall rules. Block everything else. Agent needs API access Allow specific API endpoints only. Use an HTTP proxy with allowlisting.

            What it catches: Data exfiltration, phone-home payloads, downloading additional tools, and instruction injection via fetched content.

            What it misses: Nothing, if the agent genuinely does not need the network. The tradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

            The strongest boundary is a separate machine (or something that behaves like one).

            The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

            Containers (Docker, Podman):

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

            Docker Socket Is Sudo Access

            Critical: never mount the Docker socket (/var/run/docker.sock).

            An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

            Use rootless Docker or Podman to eliminate this escalation path.

            Virtual machines: The strongest isolation. The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

            Resource limits: CPU, memory, and disk quotas prevent a runaway agent from consuming all resources. Use ulimit, cgroup limits, or container resource constraints.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A defense-in-depth setup for overnight autonomous runs:

            Layer Implementation Stops Soft instructions CONSTITUTION.md with \"never delete tests\", \"always run tests before committing\" Common mistakes (probabilistic) Application allowlist .claude/settings.local.json with explicit tool permissions Unauthorized commands (deterministic within runtime) Immutable config chattr +i on CLAUDE.md, .claude/, CONSTITUTION.md Self-modification between iterations Unprivileged user Dedicated user, no sudo, no docker group Privilege escalation Container --cap-drop=ALL --network=none, rootless, no socket mount Host escape, network exfiltration Resource limits --memory=4g --cpus=2, disk quotas Resource exhaustion

            Each layer is straightforward: The strength is in the combination.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            \"I'll just use --dangerously-skip-permissions\": This disables Layer 2 entirely. Without Layers 3-5, you have no protection at all. Only use this flag inside a properly isolated container or VM.

            \"The agent is sandboxed in Docker\": A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

            \"CONSTITUTION.md says not to do that\": Markdown is a suggestion. It works most of the time. It is not a security boundary. Do not use it as one.

            \"I reviewed the CLAUDE.md, it's fine\": The agent can modify CLAUDE.md during iteration N. Iteration N+1 loads the modified version. Unless the file is immutable, your review is stale.

            \"The agent only has access to this one project\": Does the project directory contain .env files, SSH keys, API tokens, or credentials? Does it have a .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-security-considerations","level":2,"title":"Team Security Considerations","text":"

            When multiple developers share a .context/ directory, security considerations extend beyond single-agent hardening.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#code-review-for-context-files","level":3,"title":"Code Review for Context Files","text":"

            Treat .context/ changes like code changes. Context files influence agent behavior (a modified CONSTITUTION.md or CONVENTIONS.md changes what every agent on the team will do next session). Review them in PRs with the same scrutiny you apply to production code.

            Watch for:

            • Weakened constitutional rules (removed constraints, softened language)
            • New decisions that contradict existing ones without acknowledging it
            • Learnings that encode incorrect assumptions
            • Task additions that bypass the team's prioritization process
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#gitignore-patterns","level":3,"title":"Gitignore Patterns","text":"

            ctx init configures .gitignore automatically, but verify these patterns are in place:

            • Always gitignored: .ctx.key (encryption key), .context/logs/, .context/journal/
            • Team decision: scratchpad.enc (encrypted, safe to commit for shared scratchpad state); .gitignore if scratchpads are personal
            • Never committed: .env, credentials, API keys (enforced by drift secret detection)
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#multi-developer-context-sharing","level":3,"title":"Multi-Developer Context Sharing","text":"

            CONSTITUTION.md is the shared contract. All team members and their agents inherit it. Changes require team consensus, not unilateral edits.

            When multiple agents write to the same context files concurrently (e.g., two developers adding learnings simultaneously), git merge conflicts are expected. Resolution is typically additive: accept both additions. Destructive resolution (dropping one side) loses context.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-conventions-for-context-management","level":3,"title":"Team Conventions for Context Management","text":"

            Establish and document:

            • Who reviews context changes: Same reviewers as code, or a designated context owner?
            • How to resolve conflicting decisions: If two sessions record contradictory decisions, which wins? Default: the later one must explicitly supersede the earlier one with rationale.
            • Frequency of context maintenance: Weekly ctx drift checks, monthly consolidation passes, archival after each milestone.
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#checklist","level":2,"title":"Checklist","text":"

            Before running an unattended AI agent:

            • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
            • Agent's config files are immutable or owned by a different user
            • Permission allowlist restricts tools to the project's toolchain
            • Container drops all capabilities (--cap-drop=ALL)
            • Docker socket is NOT mounted
            • Network is disabled or restricted to specific domains
            • Resource limits are set (memory, CPU, disk)
            • No SSH keys, API tokens, or credentials are accessible to the agent
            • Project directory does not contain .env or secrets files
            • Iteration cap is set (--max-iterations)
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#further-reading","level":2,"title":"Further Reading","text":"
            • Running an Unattended AI Agent: the ctx recipe for autonomous loops, including step-by-step permissions and isolation setup
            • Security: ctx's own trust model and vulnerability reporting
            • Autonomous Loops: full documentation of the loop pattern, prompt templates, and troubleshooting
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/hub/","level":1,"title":"Hub Security Model","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#ctx-hub-security-model","level":1,"title":"ctx Hub: Security Model","text":"

            What the hub defends against, what it does not defend against, and the concrete mechanisms in play.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#threat-model","level":2,"title":"Threat Model","text":"

            The hub is designed for trusted cross-project knowledge sharing within a team or homelab. It assumes:

            • The hub host is trusted. Anyone with root on that box can read every entry ever published.
            • Network is semi-trusted. Hub traffic is gRPC over TCP; TLS is strongly recommended but not mandatory.
            • Client machines are trusted enough to hold a per-project client token. Losing a client token is roughly equivalent to losing an API key: scoped damage, not total compromise.
            • Entry content is not secret. Decisions, learnings, and conventions may be indexed by AI agents, rendered in docs, shared across projects. Do not push credentials or PII into the hub.

            The hub is not a secure messaging system, a secrets store, or a compliance-grade audit log. If your threat model needs those, use a dedicated tool and keep the hub for knowledge sharing.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#mechanisms","level":2,"title":"Mechanisms","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#bearer-tokens","level":3,"title":"Bearer Tokens","text":"

            All RPCs except Register require a bearer token in gRPC metadata. Two kinds of tokens exist:

            Kind Format Scope Lifetime Admin token ctx_adm_... Register new projects Manual rotate Client token ctx_cli_... Publish, Sync, Listen, Status Project lifetime

            Tokens are compared in constant time (crypto/subtle) to prevent timing oracles, and looked up via an O(1) hash map so the comparison cost does not depend on the total number of registered clients.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#client-side-encryption-at-rest","level":3,"title":"Client-Side Encryption at Rest","text":"

            .context/.connect.enc stores the client token and hub address, encrypted with AES-256-GCM using the same scheme the notification subsystem uses. The key is derived from ctx's local keyring (see internal/crypto).

            An attacker with read access to the project directory cannot learn the client token without also breaking ctx's local keyring.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#hub-side-token-storage","level":3,"title":"Hub-Side Token Storage","text":"

            Tokens Are Stored in Plaintext on the Hub Host

            <data-dir>/clients.json currently stores client tokens verbatim, not hashed. Anyone with read access to the hub's data directory sees every registered client's token and can impersonate any project that has ever registered.

            Mitigations today:

            • Run the hub as an unprivileged user and lock the data directory with chmod 700 <data-dir>.
            • Use the systemd unit in Operations, which enables ProtectSystem=strict, NoNewPrivileges=true, and a dedicated user.
            • Never expose <data-dir> over NFS, SMB, or shared filesystems.
            • Treat <data-dir> the same way you'd treat /etc/shadow: back it up encrypted, never check it into version control.

            Hashing clients.json and moving to keyring-backed storage is tracked as a follow-up in the PR #60 task group. Until that lands, assume a hub host compromise equals total hub compromise.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#input-validation","level":3,"title":"Input Validation","text":"

            Every published entry is validated before it touches the log:

            • Type must be one of: decision, learning, convention, task. Unknown types are rejected.
            • ID and Origin are required and non-empty.
            • Content size is capped at 1 MB. Reasonable for text, hostile for attempts to fill the disk.
            • Duplicate project registration is rejected; a client that replays an old Register call gets an error, not a second token.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#no-script-execution","level":3,"title":"No Script Execution","text":"

            The hub never interprets entry content. There is no expression language, no template evaluation, no markdown rendering at ingest. Content is stored as bytes and fanned out to clients verbatim.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#audit-trail","level":3,"title":"Audit Trail","text":"

            entries.jsonl is append-only. Every accepted publish is recorded with the publishing project's origin tag and sequence number. Nothing is ever deleted by the hub; retention is managed manually by the operator (see log rotation).

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#what-the-hub-does-not-defend-against","level":2,"title":"What the Hub Does Not Defend Against","text":"
            • Untrusted entry senders. A client with a valid token can publish anything (within the 1 MB cap). There is no content validation beyond shape.
            • Denial of service from a registered client. A misbehaving client can publish until disk is full. Monitor entries.jsonl growth.
            • Network eavesdropping without TLS. Plain gRPC leaks entry content and tokens. Use a TLS-terminating reverse proxy (see Multi-machine recipe).
            • Host compromise. Root on the hub host = access to every entry and every token. Harden the host.
            • Accidental secret upload. The hub will happily fan out a decision containing an API key. Sanitize content before publishing.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#operational-hardening-checklist","level":2,"title":"Operational Hardening Checklist","text":"
            • Run the hub as an unprivileged user with NoNewPrivileges=true and ProtectSystem=strict (see the systemd unit in Operations).
            • Terminate TLS in front of the hub for anything beyond a trusted LAN.
            • Restrict the listen port with firewall rules to the client subnet only.
            • Back up <data-dir>/admin.token to a secrets manager; do not leave it in shell history.
            • Rotate the admin token when a team member with access leaves. Client tokens keep working across rotations.
            • Monitor entries.jsonl growth; alert on sudden spikes.
            • Run NTP on all clients to prevent entry-timestamp skew.
            • Do not publish from machines you do not trust.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#responsible-disclosure","level":2,"title":"Responsible Disclosure","text":"

            Security issues in the hub follow the same process as the rest of ctx; see Reporting.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub Operations
            • ctx Hub failure modes
            • HA cluster recipe
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/reporting/","level":1,"title":"Security Policy","text":"","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#reporting-vulnerabilities","level":2,"title":"Reporting Vulnerabilities","text":"

            At ctx we take security very seriously.

            If you discover a security vulnerability in ctx, please report it responsibly.

            Do NOT open a public issue for security vulnerabilities.

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#email","level":3,"title":"Email","text":"

            Send details to security@ctx.ist.

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#github-private-reporting","level":3,"title":"GitHub Private Reporting","text":"
            1. Go to the Security tab;
            2. Click \"Report a Vulnerability\";
            3. Provide a detailed description.
            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#encrypted-reports-optional","level":3,"title":"Encrypted Reports (Optional)","text":"

            If your report contains sensitive details (proof-of-concept exploits, credentials, or internal system information), you can encrypt your message with our PGP key:

            • In-repo: SECURITY_KEY.asc
            • Keybase: keybase.io/alekhinejose
            # Import the key\ngpg --import SECURITY_KEY.asc\n\n# Encrypt your report\ngpg --armor --encrypt --recipient security@ctx.ist report.txt\n

            Encryption is optional. Unencrypted reports to security@ctx.ist or via GitHub Private Reporting are perfectly fine.

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#what-to-include","level":3,"title":"What to Include","text":"
            • Description of the vulnerability,
            • Steps to reproduce,
            • Potential impact,
            • Suggested fix (if any).
            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#attribution","level":2,"title":"Attribution","text":"

            We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities (unless they prefer to remain anonymous).

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#response-timeline","level":3,"title":"Response Timeline","text":"

            Open Source, Best-Effort Timelines

            ctx is a volunteer-maintained open source project.

            The timelines below are guidelines, not guarantees, and depend on contributor availability.

            We will address security reports on a best-effort basis and prioritize them by severity.

            Stage Timeframe Acknowledgment Within 48 hours Initial assessment Within 7 days Resolution target Within 30 days (depending on severity)","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#trust-model","level":2,"title":"Trust Model","text":"

            ctx operates within a single trust boundary: the local filesystem.

            The person who authors .context/ files is the same person who runs the agent that reads them. There is no remote input, no shared state, and no server component.

            This means:

            • ctx does not sanitize context files for prompt injection. This is a deliberate design choice, not an oversight. The files are authored by the developer who owns the machine: Sanitizing their own instructions back to them would be counterproductive.
            • If you place adversarial instructions in your own .context/ files, your agent will follow them. This is expected behavior. You control the context; the agent trusts it.

            Shared Repositories

            In shared repositories, .context/ files should be reviewed in code review (the same way you would review CI/CD config or Makefiles). A malicious contributor could add harmful instructions to CONSTITUTION.md or TASKS.md.

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#security-design","level":2,"title":"Security Design","text":"

            ctx is designed with security in mind:

            • No secrets in context: The constitution explicitly forbids storing secrets, tokens, API keys, or credentials in .context/ files
            • Local only: ctx runs entirely locally with no external network calls
            • No code execution: ctx reads and writes Markdown files only; it does not execute arbitrary code
            • Git-tracked: Core context files are meant to be committed, so they should never contain sensitive data. Exception: sessions/ and journal/ contain raw conversation data and should be gitignored
            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#permission-hygiene","level":2,"title":"Permission Hygiene","text":"

            Claude Code evaluates permissions in deny → ask → allow order. ctx init automatically populates permissions.deny with rules that block dangerous operations before the allow list is ever consulted.

            Default deny rules block:

            • sudo, git push, rm -rf /, rm -rf ~, curl, wget, chmod 777
            • Read/Edit of .env, credentials, secrets, .pem, .key files

            Even with deny rules in place, the allow list accumulates one-off permissions over time. Periodically review for:

            • Destructive commands: git reset --hard, git clean -f, etc.
            • Config injection vectors: permissions that allow modifying files controlling agent behavior (CLAUDE.md, settings.local.json)
            • Broad wildcards: overly permissive patterns that pre-approve more than intended
            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#state-file-management","level":2,"title":"State File Management","text":"

            Hook state files (throttle markers, prompt counters, pause markers) are stored in .context/state/, which is project-scoped and gitignored. State files are automatically managed by the hooks that create them; no manual cleanup is needed.

            ","path":["Security","Security Policy"],"tags":[]},{"location":"security/reporting/#best-practices","level":2,"title":"Best Practices","text":"
            1. Review before committing: Always review .context/ files before committing
            2. Use .gitignore: If you must store sensitive notes locally, add them to .gitignore
            3. Drift detection: Run ctx drift to check for potential issues
            4. Permission audit: Review .claude/settings.local.json after busy sessions
            ","path":["Security","Security Policy"],"tags":[]},{"location":"thesis/","level":1,"title":"Context as State","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#a-persistence-layer-for-human-ai-cognition","level":2,"title":"A Persistence Layer for Human-AI Cognition","text":"

            Jose Alekhinne - jose@ctx.ist

            February 2026

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#abstract","level":3,"title":"Abstract","text":"

            As AI tools evolve from code-completion utilities into reasoning collaborators, the knowledge that governs their behavior becomes as important as the code they produce; yet, that knowledge is routinely discarded at the end of every session.

            AI-assisted development systems assemble context at prompt time using heuristic retrieval from mutable sources: recent files, semantic search results, session history. These approaches optimize relevance at the moment of generation but do not persist the cognitive state that produced decisions. Reasoning is not reproducible, intent is lost across sessions, and teams cannot audit the knowledge that constrains automated behavior.

            This paper argues that context should be treated as deterministic, version-controlled state rather than as a transient query result. We ground this argument in three sources of evidence: a landscape analysis of 17 systems spanning AI coding assistants, agent frameworks, and knowledge stores; a taxonomy of five primitive categories that reveals irrecoverable architectural trade-offs; and an experience report from ctx, a persistence layer for AI-assisted development, which developed itself using its own persistence model across 389 sessions over 33 days. We define a three-tier model for cognitive state: authoritative knowledge, delivery views, and ephemeral state. Then we present six design invariants empirically validated by 56 independent rejection decisions observed across the analyzed landscape. We show that context determinism applies to assembly, not to model output, and that the curation cost this model requires is offset by compounding returns in reproducibility, auditability, and team cognition.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#1-introduction","level":2,"title":"1. Introduction","text":"

            The introduction of large language models into software development has shifted the primary interface from code execution to interactive reasoning. In this environment, the correctness of an output depends not only on source code but on the context supplied to the model: the conventions, decisions, architectural constraints, and domain knowledge that bound the space of acceptable responses.

            Current systems treat context as a query result assembled at the moment of interaction. A developer begins a session; the tool retrieves what it estimates to be relevant from chat history, recent files, and vector stores; the model generates output conditioned on this transient assembly; the session ends, and the context evaporates. The next session begins the cycle again.

            This model has improved substantially over the past year. CLAUDE.md files, Cursor rules, Copilot's memory system, and tools such as Mem0, Letta, and Kindex each address aspects of the persistence problem. Yet across 17 systems we analyzed spanning AI coding assistants, agent frameworks, autonomous coding agents, and purpose-built knowledge stores, no system provides all five of the following properties simultaneously: deterministic context assembly, human-readable file-based persistence, token-budgeted delivery, a single-binary core with zero required runtime dependencies for the persistence path, and local-first operation.

            This paper does not propose a universal replacement for retrieval-centric workflows. It defines a persistence layer (embodied in ctx (https://ctx.ist)) whose advantages emerge under specific operational conditions: when reproducibility is a requirement, when knowledge must outlive sessions and individuals, when teams require shared cognitive authority, or when offline operation is necessary.

            The trade-offs (manual curation cost, reduced automatic recall, coarser granularity) are intentional and mirror the trade-offs accepted by systems that favor reproducibility over convenience, such as reproducible builds and immutable infrastructure 1 6.

            The contribution is threefold: a three-tier model for cognitive state that resolves the ambiguity between authoritative knowledge and ephemeral session artifacts; six design invariants empirically grounded in a cross-system landscape analysis; and an experience report demonstrating that the model produces compounding returns when applied to its own development.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#2-the-limits-of-prompt-time-context","level":2,"title":"2. The Limits of Prompt-Time Context","text":"

            Prompt-time assembly pipelines typically consist of corpus selection, retrieval, ranking, and truncation. These pipelines are probabilistic and time-dependent, producing three failure modes that compound over the lifetime of a project.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#21-non-reproducibility","level":3,"title":"2.1 Non-Reproducibility","text":"

            If context is derived from mutable sources using heuristic ranking, identical requests at different times receive different inputs. A developer who asks \"What is our authentication strategy?\" on Tuesday may receive a different context window than the same question on Thursday: Not because the strategy changed, but because the retrieval heuristic surfaced different fragments.

            Reproducibility (the ability to reconstruct the exact inputs that produced a given output) is a foundational property of reliable systems. Its loss in AI-assisted development mirrors the historical evolution from ad-hoc builds to deterministic build systems 1 2. The build community learned that when outputs depend on implicit state (environment variables, system clocks, network-fetched dependencies), debugging becomes archaeology. The same principle applies when AI outputs depend on non-deterministic context retrieval.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#22-opaque-knowledge","level":3,"title":"2.2 Opaque Knowledge","text":"

            Embedding-based memory increases recall but reduces inspectability. When a vector store determines that a code snippet is \"similar\" to the current query, the ranking function is opaque: the developer cannot inspect why that snippet was chosen, whether a more relevant artifact was excluded, or whether the ranking will remain stable. This prevents deterministic debugging, policy auditing, and causal attribution (properties that information retrieval theory identifies as fundamental trade-offs of probabilistic ranking) 3.

            In practice, this opacity manifests as a compliance ceiling. In our experience developing a context management system (detailed in Section 7), soft instructions (directives that ask an AI agent to read specific files or follow specific procedures) achieve approximately 75-85% compliance. The remaining 15-25% represents cases where the agent exercises judgment about whether the instruction applies, effectively applying a second ranking function on top of the explicit directive. When 100% compliance is required, instruction is insufficient; the content must be injected directly, removing the agent's option to skip it.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#23-loss-of-intent","level":3,"title":"2.3 Loss of Intent","text":"

            Session transcripts record interaction but not cognition. A transcript captures what was said but not which assumptions were accepted, which alternatives were rejected, or which constraints governed the decision. The distinction matters: a decision to use PostgreSQL recorded as a one-line note (\"Use PostgreSQL\") teaches a model what was decided; a structured record with context, rationale, and consequences teaches it why (and why is what prevents the model from unknowingly reversing the decision in a future session) 4.

            Session transcripts provide history. Cognitive state requires something more: the persistent, structured representation of the knowledge required for correct decision-making.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#3-cognitive-state-a-three-tier-model","level":2,"title":"3. Cognitive State: A Three-Tier Model","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#31-definitions","level":3,"title":"3.1 Definitions","text":"

            We define cognitive state as the authoritative, persistent representation of the knowledge required for correct decision-making within a project. It is human-authored or human-ratified, versioned, inspectable, and reproducible. It is distinct from logs, transcripts, retrieval results, and model-generated summaries.

            Previous formulations of this idea have treated cognitive state as a monolithic concept. In practice, a three-tier model better captures the operational reality:

            Tier 1: Authoritative State: The canonical knowledge that the system treats as ground truth. In a concrete implementation, this corresponds to a set of human-curated files with defined schemas: a constitution (inviolable rules), conventions (code patterns), an architecture document (system structure), decision records (choices with rationale), learnings (captured experience), a task list (current work), a glossary (domain terminology), and an agent playbook (operating instructions). Each file has a single purpose, a defined lifecycle, and a distinct update frequency. Authoritative state is version-controlled alongside code and reviewed through the same mechanisms (diffs, pull requests, blame annotations).

            Tier 2: Delivery Views: Derived representations of authoritative state, assembled for consumption by a model. A delivery view is produced by a deterministic assembly function that takes the authoritative state, a token budget, and an inclusion policy as inputs and produces a context window as output. The same authoritative state, budget, and policy must always produce the same delivery view. Delivery views are ephemeral (they exist only for the duration of a session), but their construction is reproducible.

            Tier 3: Ephemeral State: Session transcripts, scratchpad notes, draft journal entries, and other artifacts that exist during or immediately after a session but are not authoritative. Ephemeral state is the raw material from which authoritative state may be extracted through human review, but it is never consumed directly by the assembly function.

            This three-tier model resolves confusion present in earlier formulations: the claim that AI output is a deterministic function of the repository state. The corrected claim is that context selection is deterministic (the delivery view is a function of authoritative state), but model output remains stochastic, conditioned on the deterministic context. Formally:

            delivery_view = assemble(authoritative_state, budget, policy)\noutput = model(delivery_view)   # stochastic\n

            The persistence layer's contribution is making assemble reproducible, not making model deterministic.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#32-separation-of-concerns","level":3,"title":"3.2 Separation of Concerns","text":"

            The decision to separate authoritative state into distinct files with distinct purposes is not cosmetic. Different types of knowledge have different lifecycles:

            Knowledge Type Update Frequency Read Frequency Load Priority Example Constitution Rarely Every session Always \"Never commit secrets to git\" Tasks Every session Session start Always \"Implement token budget CLI flag\" Conventions Weekly Before coding High \"All errors use structured logging with severity levels\" Decisions When decided When questioning Medium \"Use PostgreSQL over MySQL (see ADR-003)\" Learnings When learned When stuck Medium \"Hook scripts >50ms degrade interactive UX\" Architecture When changed When designing On demand \"Three-layer pipeline: ingest → enrich → assemble\" Journal Every session Rarely Never auto \"Session 247: Removed dead-end session copy layer\"

            A monolithic context file would force the assembly function to load everything or nothing. Separation enables progressive disclosure: the minimum context that matters for the current moment, with the option to load more when needed. A normal session loads the constitution, tasks, and conventions; a deep investigation loads decision history and journal entries from specific dates.

            The budget mechanism is the constraint that makes separation valuable. Without a budget, the default behavior is to load everything, which destroys the attention density that makes loaded context useful. With a budget, the assembly function must prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings (scored by recency). Entries that do not fit receive title-only summaries rather than being silently dropped (an application of the \"tell me what you don't know\" pattern identified independently by four systems in our landscape analysis).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#4-design-invariants","level":2,"title":"4. Design Invariants","text":"

            The following six invariants define the constraints that a cognitive state persistence layer must satisfy. They are not axioms chosen a priori; they are empirically grounded properties whose violation was independently identified as producing complexity costs across the 17 systems we analyzed.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-1-markdown-on-filesystem-persistence","level":3,"title":"Invariant 1: Markdown-on-Filesystem Persistence","text":"

            Context files must be human-readable, git-diffable, and editable with any text editor. No database. No binary storage.

            Validation: 11 independent rejection decisions across the analyzed landscape protected this property. Systems that adopted embedded records, binary serialization, or knowledge graphs as their core primitive consistently traded away the ability for a developer to run cat DECISIONS.md and understand the system's knowledge. The inspection cost of opaque storage compounds over the lifetime of a project: every debugging session, every audit, every onboarding conversation requires specialized tooling to access knowledge that could have been a text file.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-2-zero-runtime-dependencies","level":3,"title":"Invariant 2: Zero Runtime Dependencies","text":"

            The tool must work with no installed runtimes, no running services, and no API keys for core functionality.

            Validation: 13 independent rejection decisions protected this property (the most frequently defended invariant). Systems that required databases (PostgreSQL, SQLite, Redis), embedding models, server daemons, container runtimes, or cloud APIs for core operation introduced failure modes proportional to their dependency count. A persistence layer that depends on infrastructure is not a persistence layer; it is a service. Services have uptime requirements, version compatibility matrices, and operational costs that simple file operations do not.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-3-deterministic-context-assembly","level":3,"title":"Invariant 3: Deterministic Context Assembly","text":"

            The same files plus the same budget must produce the same output. No embedding-based retrieval, no LLM-driven selection, no wall-clock-dependent scoring in the assembly path.

            Validation: 6 independent rejection decisions protected this property. Non-deterministic assembly (whether from embedding variance, LLM-based selection, or time-dependent scoring) destroys the ability to reproduce a context window and therefore to diagnose why a model produced a given output. Determinism in the assembly path is what makes the persistence layer auditable.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-4-human-authority-over-persistent-state","level":3,"title":"Invariant 4: Human Authority over Persistent State","text":"

            The agent may propose changes to context files but must not unilaterally modify them. All persistent changes go through human-reviewable git commits.

            Validation: 6 independent rejection decisions protected this property. Systems that allowed agents to self-modify their memory (writing freeform notes, auto-pruning old entries, generating summaries as ground truth) consistently produced lower-quality persistent context than systems that enforced human review. Structure is a feature, not a limitation: across the landscape, the pattern \"structured beats freeform\" was independently discovered by four systems that evolved from freeform LLM summaries to typed schemas with required fields.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-5-local-first-air-gap-capable","level":3,"title":"Invariant 5: Local-First, Air-Gap Capable","text":"

            Core functionality must work offline with no network access. Cloud services may be used for optional features but never for core context management.

            Validation: 7 independent rejection decisions protected this property. Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios. A filesystem-native model continues to function under all conditions where the repository is accessible.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-6-no-default-telemetry","level":3,"title":"Invariant 6: No Default Telemetry","text":"

            Any analytics, if ever added, must be strictly opt-in.

            Validation: 4 independent rejection decisions protected this property. Default telemetry erodes the trust model that a persistence layer depends on. If developers must trust the system with their architectural decisions, operational learnings, and project constraints, the system cannot simultaneously be reporting usage data to external services.

            These six invariants collectively define a design space. Each feature proposal can be evaluated against them: a feature that violates any invariant is rejected regardless of how many other systems implement it. The discipline of constraint (refusing to add capabilities that compromise foundational properties) is itself an architectural contribution. Across the 17 analyzed systems, 56 patterns were explicitly rejected for violating these invariants. The rejection count per invariant (11, 13, 6, 6, 7, 4) provides a rough measure of each property's vulnerability to architectural erosion. A representative sample of these rejections is provided in Appendix A.1

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#5-landscape-analysis","level":2,"title":"5. Landscape Analysis","text":"

            The 17 systems were selected to cover the architectural design space rather than to achieve completeness. Each included system satisfies three criteria: it represents a distinct architectural primitive for AI-assisted development, it is actively maintained or widely referenced, and it provides sufficient public documentation or source code for architectural inspection. The goal was to ensure that every major category of primitive (document, embedded record, state snapshot, event/message, construction/derivation) was represented by multiple systems, enabling cross-system pattern detection.

            The resulting set spans six categories: AI coding assistants (Continue, Sourcegraph/Cody, Aider, Claude Code), AI agent frameworks (CrewAI, AutoGen, LangGraph, LlamaIndex, Letta/MemGPT), autonomous coding agents (OpenHands, Sweep), session provenance tools (Entire), data versioning systems (Dolt, Pachyderm), pipeline/build systems (Dagger), and purpose-built knowledge stores (QubicDB, Kindex). Each system was analyzed from its source code and documentation, producing 34 individual analysis artifacts (an architectural profile and a set of insights per system) that yielded 87 adopt/adapt recommendations, 56 explicit rejection decisions, and 52 watch items.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#51-primitive-taxonomy","level":3,"title":"5.1 Primitive Taxonomy","text":"

            Every system in the AI-assisted development landscape operates on a core primitive: an atomic unit around which the entire architecture revolves. Our analysis of 17 systems reveals five categories of primitives, each making irrecoverable trade-offs:

            Group A: Document/File Primitives: Human-readable documents as the primary unit. Documents are authored by humans, version-controlled in git, and consumed by AI tools. The invariant of this group is that the primitive is always human-readable and version-controllable with standard tools. Three systems participate in this pattern: the system described in this paper as a pure expression, and Continue (via its rules directory) and Claude Code (via CLAUDE.md files) as partial participants: both use document-based context as an input but organize around different core primitives.

            Group B: Embedded Record Primitives: Vector-embedded records stored with numerical embeddings for similarity search, metadata for filtering, and scoring mechanisms for ranking. Five systems use this approach (LlamaIndex, CrewAI, Letta/MemGPT, QubicDB, Kindex). The invariant is that the primitive requires an embedding model or vector database for core operations: a dependency that precludes offline and air-gapped use.

            Group C: State Snapshot Primitives: Point-in-time captures of the complete system state. The invariant is that any past state can be reconstructed at any historical point. Three systems use this approach (LangGraph, Entire, Dolt).

            Group D: Event/Message Primitives: Sequential events or messages forming an append-only log with causal relationships. Four systems use this approach (OpenHands, AutoGen, Claude Code, Sweep). The invariant is temporal ordering and append-only semantics.

            Group E: Construction/Derivation Primitives: Derived or constructed values that encode how they were produced. The invariant is that the primitive is a function of its inputs; re-executing the same inputs produces the same primitive. Three systems use this approach (Dagger, Pachyderm, Aider).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#52-comparison-matrix","level":3,"title":"5.2 Comparison Matrix","text":"

            The five primitive categories differ along seven dimensions:

            Property Document Embedded Record State Snapshot Event/Message Construction Human-readable Yes No Varies Partially No Version-controllable Yes No Varies Yes Yes Queryable by meaning No Yes No No No Rewindable Via git No Yes Yes (replay) Yes Deterministic Yes No Yes Yes Yes Zero-dependency Yes No Varies Varies Varies Offline-capable Yes No Varies Varies Yes

            The document primitive is the only one that simultaneously satisfies human-readability, version-controllability, determinism, zero dependencies, and offline capability. This is not because documents are superior in general (embedded records provide semantic queryability that documents lack) but because the combination of all five properties is what the persistence layer requires. The choice between primitive categories is not a matter of capability but of which properties are considered invariant.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#53-convergent-patterns","level":3,"title":"5.3 Convergent Patterns","text":"

            Across the 17 analyzed systems, six design patterns were independently discovered. These convergent patterns carry extra validation weight because they emerged from different problem spaces:

            Pattern 1: \"Tell me what you don't know\": When context is incomplete, explicitly communicate to the model what information is missing and what confidence level the provided context represents. Four systems independently converged on this pattern: inserting skip markers, tracking evidence gaps, annotating provenance, or naming output quality tiers.

            Pattern 2: \"Freshness matters\": Information relevance decreases over time. Three systems independently chose exponential decay with different half-lives (30 days, 90 days, and LRU ordering). Static priority ordering with no time dimension leaves relevant recent knowledge at the same priority as stale entries. This pattern is in productive tension with the persistence model's emphasis on determinism: the claim is not that time-dependence is irrelevant, but that it belongs in the curation step (a human deciding to consolidate or archive stale entries) rather than in the assembly function (an algorithm silently down-ranking entries based on age).

            Pattern 3: \"Content-address everything\": Compute a hash of content at creation time for deduplication, cache invalidation, integrity verification, and change detection. Five systems independently implement content hashing, each discovering it solves different problems 5.

            Pattern 4: \"Structured beats freeform\": When capturing knowledge or session state, a structured schema with required fields produces more useful data than freeform text. Four systems evolved from freeform summaries to typed schemas: one moving from LLM-generated prose to a structured condenser with explicit fields for completed tasks, pending tasks, and files modified.

            Pattern 5: \"Protocol convergence\": The Model Context Protocol (MCP) is emerging as a standard tool integration layer. Nine of 17 systems support it, spanning every category in the analysis. MCP's significance for the persistence model is that it provides a transport mechanism for context delivery without dictating how context is stored or assembled. This makes the approach compatible with both retrieval-centric and persistence-centric architectures.

            Pattern 6: \"Human-in-the-loop for memory\": Critical memory decisions should involve human judgment. Fully automated memory management produces lower-quality persistent context than human-reviewed systems. Four systems independently converged on variants of this pattern: ceremony-based consolidation, interrupt/resume for human input, confirmation mode for high-risk actions, and separated \"think fast\" vs. \"think slow\" processing paths.

            Pattern 6 directly validates the ceremony model described in this paper. The persistence layer requires human curation not because automation is impossible, but because the quality of persistent knowledge degrades when the curation step is removed. The improvement opportunity is to make curation easier, not to automate it away.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#6-worked-example-architectural-decision-under-two-models","level":2,"title":"6. Worked Example: Architectural Decision under Two Models","text":"

            We now instantiate the three-tier model in a concrete system (ctx) and illustrate the difference between prompt-time retrieval and cognitive state persistence using a real scenario from its development.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#61-the-problem","level":3,"title":"6.1 The Problem","text":"

            During development, the system accumulated three overlapping storage layers for session data: raw transcripts (owned by the AI tool), session copies (JSONL copies plus context snapshots), and enriched journal entries (Markdown summaries). The middle layer (session copies) was a dead-end write sink. An auto-save hook copied transcripts to a directory that nothing read from, because the journal pipeline already read directly from the raw transcripts. Approximately 15 source files, a shell hook, 20 configuration constants, and 30 documentation references supported infrastructure with no consumers.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#62-prompt-time-retrieval-model","level":3,"title":"6.2 Prompt-Time Retrieval Model","text":"

            In a retrieval-based system, the decision to remove the middle layer depends on whether the retrieval function surfaces the relevant context:

            The developer asks: \"Should we simplify the session storage?\" The retrieval system must find and rank the original discussion thread where the three layers were designed, the usage statistics showing zero reads from the middle layer, the journal pipeline documentation showing it reads from raw transcripts directly, and the dependency analysis showing 15 files, a hook, and 30 doc references. If any of these fragments are not retrieved (because they are in old chat history, because the embedding similarity score is low, or because the token budget was consumed by more recent but less relevant context), the model may recommend preserving the middle layer, or may not realize it exists.

            Six months later, a new team member asks the same question. The retrieval results will differ: the original discussion has aged out of recency scoring, the usage statistics are no longer in recent history, and the model may re-derive the answer or arrive at a different conclusion.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#63-cognitive-state-model","level":3,"title":"6.3 Cognitive State Model","text":"

            In the persistence model, the decision is recorded as a structured artifact at write time:

            ## [2026-02-11] Remove .context/sessions/ storage layer\n\n**Status**: Accepted\n\n**Context**: The session/recall/journal system had three overlapping\nstorage layers. The recall pipeline reads directly from raw transcripts,\nmaking .context/sessions/ a dead-end write sink that nothing reads from.\n\n**Decision**: Remove .context/sessions/ entirely. Two stores remain:\nraw transcripts (global, tool-owned) and enriched journal\n(project-local).\n\n**Rationale**: Dead-end write sinks waste code surface, maintenance\neffort, and user attention. The recall pipeline already proved that\nreading directly from raw transcripts is sufficient. Context snapshots\nare redundant with git history.\n\n**Consequence**: Deleted internal/cli/session/ (15 files), removed\nauto-save hook, removed --auto-save from watch, removed pre-compact\nauto-save, removed /ctx-save skill, updated ~45 documentation files.\nFour earlier decisions superseded.\n

            This artifact is:

            • Deterministically included in every subsequent session's delivery view (budget permitting, with title-only fallback if budget is exceeded)
            • Human-readable and reviewable as a diff in the commit that introduced it
            • Permanent: it persists in version control regardless of retrieval heuristics
            • Causally linked: it explicitly supersedes four earlier decisions, creating an auditable chain

            When the new team member asks \"Why don't we store session copies?\" six months later, the answer is the same artifact, at the same revision, with the same rationale. The reasoning is reconstructible because it was persisted at write time, not discovered at query time.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#64-the-diff-when-policy-changes","level":3,"title":"6.4 The Diff When Policy Changes","text":"

            If a future requirement re-introduces session storage (for example, to support multi-agent session correlation), the change appears as a diff to the decision record:

            - **Status**: Accepted\n+ **Status**: Superseded by [2026-08-15] Reintroduce session storage\n+ for multi-agent correlation\n

            The new decision record references the old one, creating a chain of reasoning visible in git log. In the retrieval model, the old decision would simply be ranked lower over time and eventually forgotten.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#7-experience-report-a-system-that-designed-itself","level":2,"title":"7. Experience Report: A System That Designed Itself","text":"

            The persistence model described in this paper was developed and tested by using it on its own development. Over 33 days and 389 sessions, the system's context files accumulated a detailed record of decisions made, reversed, and consolidated: providing quantitative and qualitative evidence for the model's properties.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#71-scale-and-structure","level":3,"title":"7.1 Scale and Structure","text":"

            The development produced the following authoritative state artifacts:

            • 8 consolidated decision records covering 24 original decisions spanning context injection architecture, hook design, task management, security, agent autonomy, and webhook systems
            • 18 consolidated learning records covering 75 original observations spanning agent compliance, hook behavior, testing patterns, documentation drift, and tool integration
            • A constitution with 13 inviolable rules across 4 categories (security, quality, process, context preservation)
            • 389 enriched journal entries providing a complete session-level audit trail

            The consolidation ratio (24 decisions compressed to 8 records, 75 learnings compressed to 18) illustrates the curation cost and its return: authoritative state becomes denser and more useful over time as related entries are merged, contradictions are resolved, and superseded decisions are marked.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#72-architectural-reversals","level":3,"title":"7.2 Architectural Reversals","text":"

            Three architectural reversals during development provide evidence that the persistence model captures and communicates reasoning effectively:

            Reversal 1: The two-tier persistence model: The original design included a middle storage tier for session copies. After 21 days of development, the middle tier was identified as a dead-end write sink (described in Section 6). The decision record captured the full context, and the removal was executed cleanly: 15 source files, a shell hook, and 45 documentation references. The pattern of a \"dead-end write sink\" was subsequently observed in 7 of 17 systems in our landscape analysis that store raw transcripts alongside structured context.

            Reversal 2: The prompt-coach hook: An early design included a hook that analyzed user prompts and offered improvement suggestions. After deployment, the hook produced zero useful tips, its output channel was invisible to users, and it accumulated orphan temporary files. The hook was removed, and the decision record captured the failure mode for future reference.

            Reversal 3: The soft-instruction compliance model: The original context injection strategy relied on soft instructions: directives asking the AI agent to read specific files. After measuring compliance across multiple sessions, we found a consistent 75-85% compliance ceiling. The revised strategy injects content directly, bypassing the agent's judgment about whether to comply. The learning record captures the ceiling measurement and the rationale for the architectural change.

            Each reversal was captured as a structured decision record with context, rationale, and consequences. In a retrieval-based system, these reversals would exist only in chat history, discoverable only if the retrieval function happens to surface them. In the persistence model, they are permanent, indexable artifacts that inform future decisions.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#73-compliance-ceiling","level":3,"title":"7.3 Compliance Ceiling","text":"

            The 75-85% compliance ceiling for soft instructions is the most operationally significant finding from the experience report. It means that any context management strategy relying on agent compliance with instructions (\"read this file,\" \"follow this convention,\" \"check this list\") has a hard ceiling on reliability.

            The root cause is structural: the instruction \"don't apply judgment\" is itself evaluated by judgment. When an agent receives a directive to read a file, it first assesses whether the directive is relevant to the current task (and that assessment is the judgment the directive was trying to prevent).

            The architectural response maps directly to the formal model defined in Section 3.1. Content requiring 100% compliance is included in authoritative_state and injected by the deterministic assemble function, bypassing the agent entirely. Content where 80% compliance is acceptable is delivered as instructions within the delivery view. The three-tier architecture makes this distinction explicit: authoritative state is injected; delivery views are assembled deterministically; ephemeral state is available but not pushed.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#74-compounding-returns","level":3,"title":"7.4 Compounding Returns","text":"

            Over 33 days, we observed a qualitative shift in the development experience. Early sessions (days 1-7) spent significant time re-establishing context: explaining conventions, re-stating constraints, re-deriving past decisions. Later sessions (days 25-33) began with the agent loading curated context and immediately operating within established constraints, because the constraints were in files rather than in chat history.

            This compounding effect (where each session's context curation improves all subsequent sessions) is the primary return on the curation investment. The cost is borne once (writing a decision record, capturing a learning, updating the task list); the benefit is collected on every subsequent session load.

            The effect is analogous to compound interest in financial systems: the knowledge base grows not linearly with effort but with increasing marginal returns as new knowledge interacts with existing context. A learning captured on day 5 prevents a mistake on day 12, which avoids a debugging session that would have consumed a day 12 session, freeing that session for productive work that generates new learnings. The growth is not literally exponential (it is bounded by project scope and subject to diminishing returns as the knowledge base matures), but within the observed 33-day window, the returns were consistently accelerating.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#75-scope-and-generalizability","level":3,"title":"7.5 Scope and Generalizability","text":"

            This experience report is self-referential by design: the system was developed using its own persistence model. This circularity strengthens the internal validity of the findings (the model was stress-tested under authentic conditions) but limits external generalizability. The two-week crossover point was observed on a single project of moderate complexity with a small team already familiar with the model's assumptions. Whether the same crossover holds for larger teams, for codebases with different characteristics, or for teams adopting the model without having designed it remains an open empirical question. The quantitative claims in this section should be read as existence proofs (demonstrating that the model can produce compounding returns) rather than as predictions about specific adoption scenarios.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#8-situating-the-persistence-layer","level":2,"title":"8. Situating the Persistence Layer","text":"

            The persistence layer occupies a specific position in the stack of AI-assisted development:

            Application Logic\nAI Interaction / Agents\nContext Retrieval Systems\nCognitive State Persistence Layer\nVersion Control / Storage\n

            Current systems innovate primarily in the retrieval layer (improving how context is discovered, ranked, and delivered at query time). The persistence layer sits beneath retrieval and above version control. Its role is to maintain the authoritative state that retrieval systems may query but do not own. The relationship is complementary: retrieval answers \"What in the corpus might be relevant?\"; cognitive state answers \"What must be true for this system to operate correctly?\" A mature system uses both: retrieval for discovery, persistence for authority.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#9-applicability-and-trade-offs","level":2,"title":"9. Applicability and Trade-Offs","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#91-when-to-use-this-model","level":3,"title":"9.1 When to Use This Model","text":"

            A cognitive state persistence layer is most appropriate when:

            Reproducibility is a requirement: If a system must be able to answer \"Why did this output occur, and can it be produced again?\" then deterministic, version-controlled context becomes necessary. This is relevant in regulated environments, safety-critical systems, long-lived infrastructure, and security-sensitive deployments.

            Knowledge must outlive sessions and individuals: Projects with multi-year lifetimes accumulate architectural decisions, domain interpretations, and operational policy. If this knowledge is stored only in chat history, issue trackers, and institutional memory, it decays. The persistence model converts implicit knowledge into branchable, reviewable artifacts.

            Teams require shared cognitive authority: In collaborative environments, correctness depends on a stable answer to \"What does the system believe to be true?\" When this answer is derived from retrieval heuristics, authority shifts to ranking algorithms. When it is versioned and human-readable, authority remains with the team.

            Offline or air-gapped operation is required: Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#92-when-not-to-use-this-model","level":3,"title":"9.2 When Not to Use This Model","text":"

            Zero-configuration personal workflows: For short-lived or exploratory tasks, the cost of explicit knowledge curation outweighs its benefits. Heuristic retrieval is sufficient when correctness is non-critical, outputs are disposable, and historical reconstruction is unnecessary.

            Maximum automatic recall from large corpora: Vector retrieval systems provide superior performance when the primary task is searching vast, weakly structured information spaces. The persistence model assumes that what matters can be decided and that this decision is valuable to record.

            Fully autonomous agent architectures: Agent runtimes that generate and discard state continuously, optimizing for local goal completion, do not benefit from a model that centers human ratification of knowledge.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#93-incremental-adoption","level":3,"title":"9.3 Incremental Adoption","text":"

            The transition does not require full system replacement. An incremental path:

            Step 1: Record decisions as versioned artifacts: Instead of allowing conclusions to remain in discussion threads, persist them in reviewable form with context, rationale, and consequences 4. This alone converts ephemeral reasoning into the cognitive state.

            Step 2: Make inclusion deterministic: Define explicit assembly rules. Retrieval may still exist, but it is no longer authoritative.

            Step 3: Move policy into cognitive state: When system behavior depends on stable constraints, encode those constraints as versioned knowledge. Behavior becomes reproducible.

            Step 4: Optimize assembly, not retrieval: Once the authoritative layer exists, performance improvements come from budgeting, caching, and structural refinement rather than from improving ranking heuristics.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#94-the-curation-cost","level":3,"title":"9.4 The Curation Cost","text":"

            The primary objection to this model is the cost of explicit knowledge curation. This cost is real. Writing a structured decision record takes longer than letting a chatbot auto-summarize a conversation. Maintaining a glossary requires discipline. Consolidating 75 learnings into 18 records requires judgment.

            The response is not that the cost is negligible but that it is amortized. A decision record written once is loaded hundreds of times. A learning captured today prevents repeated mistakes across all future sessions. The curation cost is paid once; the benefit compounds.

            The experience report provides rough order-of-magnitude numbers. Across 389 sessions over 33 days, curation activities (writing decision records, capturing learnings, updating the task list, consolidating entries) averaged approximately 3-5 minutes per session. In early sessions (days 1-7), before curated context existed, re-establishing context consumed approximately 10-15 minutes per session: re-explaining conventions, re-stating architectural constraints, re-deriving decisions that had been made but not persisted. By the final week (days 25-33), the re-explanation overhead had dropped to near zero: the agent loaded curated context and began productive work immediately.

            At ~12 sessions per day, the curation cost was roughly 35-60 minutes daily. The re-explanation cost in the first week was roughly 120-180 minutes daily. By the third week, that cost had fallen to under 15 minutes daily while the curation cost remained stable. The crossover (where cumulative curation cost was exceeded by cumulative time saved) occurred around day 10. These figures are approximate and derived from a single project with a small team already familiar with the model; the crossover point will vary with project complexity, team size, and curation discipline.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#10-future-work","level":2,"title":"10. Future Work","text":"

            Several directions are compatible with the model described here:

            Section-level deterministic budgeting: Current assembly operates at file granularity. Section-level budgeting would allow finer-grained control (including specific decision records while excluding others within the same file) without sacrificing determinism.

            Causal links between decisions: The experience report shows that decisions frequently reference earlier decisions (superseding, extending, or qualifying them). Formal causal links would enable traversal of the decision graph and automatic detection of orphaned or contradictory constraints.

            Content-addressed context caches: Five systems in our landscape analysis independently discovered that content hashing provides cache invalidation, integrity verification, and change detection. Applying content addressing to the assembly output would enable efficient cache reuse when the authoritative state has not changed.

            Conditional context inclusion: Five systems independently suggest that context entries could carry activation conditions (file patterns, task keywords, or explicit triggers) that control whether they are included in a given assembly. This would reduce the per-session budget cost of large knowledge bases without sacrificing determinism.

            Provenance metadata: Linking context entries to the sessions, decisions, or learnings that motivated them would strengthen the audit trail. Optional provenance fields on Markdown entries (session identifier, cause reference, motivation) would be lightweight and compatible with the existing file-based model.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#11-conclusion","level":2,"title":"11. Conclusion","text":"

            AI-assisted development has treated context as a \"query result\" assembled at the moment of interaction, discarded at the session end. This paper identifies a complementary layer: the persistence of authoritative cognitive state as deterministic, version-controlled artifacts.

            The contribution is grounded in three sources of evidence. A landscape analysis of 17 systems reveals five categories of primitives and shows that no existing system provides the combination of human-readability, determinism, zero dependencies, and offline capability that the persistence layer requires. Six design invariants, validated by 56 independent rejection decisions, define the constraints of the design space. An experience report over 389 sessions and 33 days demonstrates compounding returns: later sessions start faster, decisions are not re-derived, and architectural reversals are captured with full context.

            The core claim is this: persistent cognitive state enables causal reasoning across time. A system built on this model can explain not only what is true, but why it became true and when it changed.

            When context is the state:

            • Reasoning is reproducible: the same authoritative state, budget, and policy produce the same delivery view.
            • Knowledge is auditable: decisions are traceable to explicit artifacts with context, rationale, and consequences.
            • Understanding compounds: each session's curation improves all subsequent sessions.

            The choice between retrieval-centric workflows and a persistence layer is not a matter of capability but of time horizon. Retrieval optimizes for relevance at the moment of interaction. Persistence optimizes for the durability of understanding across the lifetime of a project.

            🐸🖤 \"Gooood... let the deterministic context flow through the repository...\" - Kermit the Sidious, probably

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#appendix-a-representative-rejection-decisions","level":2,"title":"Appendix A: Representative Rejection Decisions","text":"

            The 56 rejection decisions referenced in Section 4 were cataloged across all 17 system analyses, grouped by the invariant they would violate. This appendix provides a representative sample (two per invariant) to illustrate the methodology.

            Invariant 1: Markdown-on-Filesystem (11 rejections): CrewAI's vector embedding storage was rejected because embeddings are not human-readable, not git-diff-friendly, and require external services. Kindex's knowledge graph as core primitive was rejected because it requires specialized commands to inspect content that could be a text file (kin show <id> vs. cat DECISIONS.md).

            Invariant 2: Zero Runtime Dependencies (13 rejections): Letta/MemGPT's PostgreSQL-backed architecture was rejected because it conflicts with local-first, no-database, single-binary operation. Pachyderm's Kubernetes-based distributed architecture was rejected as the antithesis of a single-binary design for a tool that manages text files.

            Invariant 3: Deterministic Assembly (6 rejections): LlamaIndex's embedding-based retrieval as the primary selection mechanism was rejected because it destroys determinism, requires an embedding model, and removes human judgment from the selection process. QubicDB's wall-clock-dependent scoring was rejected because it directly conflicts with the \"same inputs produce same output\" property.

            Invariant 4: Human Authority (6 rejections): Letta/MemGPT's agent self-modification of memory was rejected as fundamentally opposed to human-curated persistence. Claude Code's unstructured auto-memory (where the agent writes freeform notes) was rejected because structured files with defined schemas produce higher-quality persistent context than unconstrained agent output.

            Invariant 5: Local-First / Air-Gap Capable (7 rejections): Sweep's cloud-dependent architecture was rejected as fundamentally incompatible with the local-first, offline-capable model. LangGraph's managed cloud deployment was rejected because cloud dependencies for core functionality violate air-gap capability.

            Invariant 6: No Default Telemetry (4 rejections): Continue's telemetry-by-default (PostHog) was rejected because it contradicts the local-first, privacy-respecting trust model. CrewAI's global telemetry on import (Scarf tracking pixel) was rejected because it violates user trust and breaks air-gap capability.

            The remaining 9 rejections did not map to a specific invariant but were rejected on other architectural grounds: for example, Aider's full-file-content-in-context approach (which defeats token budgeting), AutoGen's multi-agent orchestration as core primitive (scope creep), and Claude Code's 30-day transcript retention limit (institutional knowledge should have no automatic expiration).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#references","level":2,"title":"References","text":"
            1. Reproducible Builds Project, \"Reproducible Builds: Increasing the Integrity of Software Supply Chains\", 2017. https://reproducible-builds.org/docs/definition/ ↩↩↩

            2. S. McIntosh et al., \"The Impact of Build System Evolution on Software Quality\", ICSE, 2015. https://doi.org/10.1109/ICSE.2015.70 ↩

            3. C. Manning, P. Raghavan, H. Schütze, Introduction to Information Retrieval, Cambridge University Press, 2008. https://nlp.stanford.edu/IR-book/ ↩

            4. M. Nygard, \"Documenting Architecture Decisions\", Cognitect Blog, 2011. https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions ↩↩

            5. L. Torvalds et al., Git Internals - Git Objects (content-addressed storage concepts). https://git-scm.com/book/en/v2/Git-Internals-Git-Objects ↩

            6. Kief Morris, Infrastructure as Code, O'Reilly, 2016. ↩

            7. J. Kreps, \"The Log: What every software engineer should know about real-time data's unifying abstraction\", 2013. https://engineering.linkedin.com/distributed-systems/log ↩

            8. P. Hunt et al., \"ZooKeeper: Wait-free coordination for Internet-scale systems\", USENIX ATC, 2010. https://www.usenix.org/legacy/event/atc10/tech/full_papers/Hunt.pdf ↩

            ","path":["The Thesis"],"tags":[]}]} \ No newline at end of file +{"config":{"separator":"[\\s\\-_,:!=\\[\\]()\\\\\"`/]+|\\.(?!\\d)"},"items":[{"location":"","level":1,"title":"Manifesto","text":"","path":["Manifesto"],"tags":[]},{"location":"#the-ctx-manifesto","level":1,"title":"The ctx Manifesto","text":"

            Creation, not code.

            Context, not prompts.

            Verification, not vibes.

            This Is NOT a Metaphor

            Code executes instructions.

            Creation produces outcomes.

            Confusing the two is how teams ship motion...

            ...instead of progress.

            • It was never about the code.
            • Code has zero standalone value.
            • Code is an implementation detail.

            Code is an incantation.

            Creation is the act.

            And creation does not happen in a vacuum.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-the-substrate","level":2,"title":"ctx Is the Substrate","text":"

            Constraints Have Moved

            Human bandwidth is no longer the limiting factor.

            Context integrity is.

            Human bandwidth is no longer the constraint.

            Context is:

            • Without durable context, intelligence resets.
            • Without memory, reasoning decays.
            • Without structure, scale collapses.

            Creation is now limited by:

            • Clarity of intent;
            • Quality of context;
            • Rigor of verification.

            Not by speed.

            Not by capacity.

            Velocity Amplifies

            Faster execution on broken context compounds error.

            Speed multiplies whatever is already wrong.

            ","path":["Manifesto"],"tags":[]},{"location":"#humans-author-meaning","level":2,"title":"Humans Author Meaning","text":"

            Intent Is Authored

            Systems can optimize.

            Models can generalize.

            Meaning must be chosen.

            Intent is not emergent.

            Vision, goals, and direction are human responsibilities.

            We decide:

            • What matters;
            • What success means;
            • What world we are building.

            ctx encodes the intent so it...

            • survives time,
            • survives handoffs,
            • survives scale.

            Nothing important should live only in conversation.

            Nothing critical should depend on recall.

            Oral Tradition Does Not Scale

            If intent cannot be inspected, it cannot be enforced.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-before-action","level":2,"title":"ctx Before Action","text":"

            Orientation Precedes Motion

            Acting first and understanding later is not bravery.

            It is debt.

            Never act without ctx.

            Before execution, we must verify:

            • Where we are;
            • Why we are here;
            • What constraints apply;
            • What assumptions are active.

            Action without ctx is gambling.

            Speed without orientation is noise.

            ctx is not overhead: It is the cost of correctness.

            ","path":["Manifesto"],"tags":[]},{"location":"#persistent-context-beats-prompt-memory","level":2,"title":"Persistent Context Beats Prompt Memory","text":"

            Transience Is the Default Failure Mode

            • Prompts decay.
            • Chats fragment.
            • Memory heuristics drift.

            Prompts are transient.

            Chats are lossy.

            Memory heuristics drift.

            ctx must be:

            • Durable;
            • Structured;
            • Explicit;
            • Queryable.

            Intent Must Be Intentional

            If intent exists only in a prompt...

            ...alignment is already degrading.

            Knowledge lives in the artifacts:

            • Decisions;
            • Documentation;
            • Dependency maps;
            • Evaluation history.

            Artifacts Outlive Sessions

            What is not written will be re-learned.

            At full cost.

            ","path":["Manifesto"],"tags":[]},{"location":"#what-ctx-is-not","level":2,"title":"What ctx Is Not","text":"

            Avoid Category Errors

            Mislabeling ctx guarantees misuse.

            ctx is not a memory feature.

            • ctx is not prompt engineering.
            • ctx is not a productivity hack.
            • ctx is not automation theater.

            ctx is a system for preserving intent under scale.

            ctx is infrastructure.

            ","path":["Manifesto"],"tags":[]},{"location":"#verified-reality-is-the-scoreboard","level":2,"title":"Verified Reality Is the Scoreboard","text":"

            Activity Is a False Proxy

            Output volume correlates poorly with impact.

            • Code is not progress.
            • Activity is not impact.

            The only truth that compounds is verified change.

            Verified change must exist in the real world.

            Hypotheses are cheap; outcomes are not.

            ctx captures:

            • What we expected;
            • What we observed;
            • Where reality diverged.

            If we cannot predict, measure, and verify the result...

            ...it does not count.

            ","path":["Manifesto"],"tags":[]},{"location":"#build-to-learn-not-to-accumulate","level":2,"title":"Build to Learn, Not to Accumulate","text":"

            Prototypes Have an Expiration Date

            A prototype's value is information, not longevity.

            Prototypes exist to reduce uncertainty.

            We build to:

            • Test assumptions;
            • Validate architecture;
            • Answer specific questions.

            Not everything.

            Not blindly.

            Not permanently.

            ctx records archeology so the cost is paid once.

            ","path":["Manifesto"],"tags":[]},{"location":"#failures-are-assets","level":2,"title":"Failures Are Assets","text":"

            Failure without Capture Is Waste

            Pain that does not teach is pure loss.

            Failures are not erased: They are preserved.

            Each failure becomes:

            • A documented hypothesis;
            • An analyzed deviation;
            • A permanent artifact.

            Rollback fixes symptoms: ctx fixes systems.

            A repeated mistake is a missing ctx artifact.

            ","path":["Manifesto"],"tags":[]},{"location":"#structure-enables-scale","level":2,"title":"Structure Enables Scale","text":"

            Unbounded Autonomy Destabilizes

            Power without a structure produces chaos.

            Transpose it:

            Power without any structure becomes chaos.

            ctx defines:

            • Roles;
            • Boundaries;
            • Protocols;
            • Escalation paths;
            • Decision rights.

            Ambiguity is a system failure:

            • Debates must be structured.
            • Decisions must be explicit.
            • History must be retained.
            ","path":["Manifesto"],"tags":[]},{"location":"#encode-intent-into-the-environment","level":2,"title":"Encode Intent into the Environment","text":"

            Goodwill Does Not Belong to the Table

            Alignment that depends on memory will drift.

            Alignment cannot depend on memory or goodwill.

            Do not rely on people to remember.

            Encode the behavior, so it happens by default.

            Intent is encoded as:

            • Policies;
            • Schemas;
            • Constraints;
            • Evaluation harnesses.

            Rules must be machine-readable.

            Laws must be enforceable.

            If intent is implicit, drift is guaranteed.

            ","path":["Manifesto"],"tags":[]},{"location":"#cost-is-a-first-class-signal","level":2,"title":"Cost Is a First-Class Signal","text":"

            Attention Is the Scarcest Resource

            Not ideas.

            Not ambition.

            Ideas do not compete on time:

            They compete on cost and impact:

            • Attention is finite.
            • Compute is finite.
            • Context is expensive.

            We continuously ask:

            • What the most valuable next action is.
            • What outcome justifies the cost.

            ctx guides allocation.

            Learning reshapes priority.

            ","path":["Manifesto"],"tags":[]},{"location":"#show-the-why","level":2,"title":"Show the Why","text":"

            {} (code, artifacts, apps, binaries) produce outputs; they do not preserve reasoning.

            Systems that cannot explain themselves will not be trusted.

            Traceability builds trust.

                 {} --> what\n\n    ctx --> why\n

            We record:

            • Explored paths;
            • Rejected options;
            • Assumptions made;
            • Evidence used.

            Opaque systems erode trust:

            Transparent ctx compounds understanding.

            ","path":["Manifesto"],"tags":[]},{"location":"#continuously-verify-the-system","level":2,"title":"Continuously Verify the System","text":"

            Stability Is Temporary

            Every assumption has a half-life:

            • Models drift.
            • Tools change.
            • Assumptions rot.

            ctx must be verified against reality.

            Trust is a spectrum.

            Trust is continuously re-earned:

            • Benchmarks,
            • regressions,
            • and evaluations...

            ...are safety rails.

            ","path":["Manifesto"],"tags":[]},{"location":"#ctx-is-leverage","level":2,"title":"ctx Is Leverage","text":"

            Humans Are Decision Engines

            Execution should not consume judgment.

            Humans must not be typists.

            We are the authors.

            Human effort is reserved for:

            • Judgment;
            • Design;
            • Taste;
            • Synthesis.

            Repetition is delegated.

            Toil is automated.

            ctx preserves leverage across time.

            ","path":["Manifesto"],"tags":[]},{"location":"#the-thesis","level":2,"title":"The Thesis","text":"

            Invariant

            Everything else is an implementation detail.

            • Creation is the act.
            • ctx is the substrate.
            • Verification is the truth.

            Code executes → Models reason → Agents amplify.

            ctx lives on.

            • Without ctx, intelligence resets.
            • With ctx, creation compounds.
            ","path":["Manifesto"],"tags":[]},{"location":"blog/","level":1,"title":"Blog","text":"

            Stories, insights, and lessons learned from building and using ctx.

            ","path":["Blog"],"tags":[]},{"location":"blog/#releases","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v080-the-architecture-release","level":3,"title":"ctx v0.8.0: The Architecture Release","text":"

            March 23, 2026: 374 commits, 1,708 Go files touched, and a near-complete architectural overhaul. Every CLI package restructured into cmd/ + core/ taxonomy, all user-facing strings externalized to YAML, MCP server for tool-agnostic AI integration, and the memory bridge connecting Claude Code's auto-memory to .context/.

            Topics: release, architecture, refactoring, MCP, localization

            ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes","level":2,"title":"Field Notes","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-watermelon-rind-anti-pattern-why-smarter-tools-make-shallower-agents","level":3,"title":"The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents","text":"

            April 6, 2026: Give an agent a graph query tool, and it produces output that's structurally correct but substantively hollow (the watermelon-rind antipattern: We ran three sessions analyzing the same codebase with different tool access: the one with no tools produced 5.2x more depth. The fix: a two-pass compiler for architecture understanding: force code reading first, verify with tools second. Constraint is the feature.

            Topics: architecture, code intelligence, agent behavior, design patterns, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#code-structure-as-an-agent-interface-what-19-ast-tests-taught-us","level":3,"title":"Code Structure as an Agent Interface: What 19 AST Tests Taught Us","text":"

            April 2, 2026: We built 19 AST-based audit tests in a single session, touching 300+ files. In the process we discovered that \"old-school\" code quality constraints (no magic numbers, centralized error handling, 80-char lines, documentation) are exactly the constraints that make code readable to AI agents. If an agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

            Topics: ast, code quality, agent readability, conventions, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#we-broke-the-31-rule","level":3,"title":"We Broke the 3:1 Rule","text":"

            March 23, 2026: After v0.6.0, we ran 198 feature commits across 17 days before consolidating. The 3:1 rule says consolidate every 4th session. We did it after the 66th. The result: an 18-day, 181-commit cleanup marathon that took longer than the feature run itself. A follow-up to The 3:1 Ratio with empirical evidence from the v0.8.0 cycle.

            Topics: consolidation, technical debt, development workflow, convention drift, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#context-engineering","level":2,"title":"Context Engineering","text":"","path":["Blog"],"tags":[]},{"location":"blog/#agent-memory-is-infrastructure","level":3,"title":"Agent Memory Is Infrastructure","text":"

            March 4, 2026: Every AI coding agent starts fresh. The obvious fix is \"memory.\" But there's a different problem memory doesn't touch: the project itself accumulates knowledge that has nothing to do with any single session. This post argues that agent memory is L2 (runtime cache); what's missing is L3 (project infrastructure).

            Topics: context engineering, agent memory, infrastructure, persistence, team knowledge

            ","path":["Blog"],"tags":[]},{"location":"blog/#context-as-infrastructure","level":3,"title":"Context as Infrastructure","text":"

            February 17, 2026: Where does your AI's knowledge live between sessions? If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. This post argues for treating it as infrastructure instead: persistent files, separation of concerns, two-tier storage, progressive disclosure, and the filesystem as the most mature interface available.

            Topics: context engineering, infrastructure, progressive disclosure, persistence, design philosophy

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-attention-budget-why-your-ai-forgets-what-you-just-told-it","level":3,"title":"The Attention Budget: Why Your AI Forgets What You Just Told It","text":"

            February 3, 2026: Every token you send to an AI consumes a finite resource: the attention budget. Understanding this constraint shaped every design decision in ctx: hierarchical file structure, explicit budgets, progressive disclosure, and filesystem-as-index.

            Topics: attention mechanics, context engineering, progressive disclosure, ctx primitives, token budgets

            ","path":["Blog"],"tags":[]},{"location":"blog/#before-context-windows-we-had-bouncers","level":3,"title":"Before Context Windows, We Had Bouncers","text":"

            February 14, 2026: IRC is stateless. You disconnect, you vanish. Modern systems are not much different. This post traces the line from IRC bouncers to context engineering: stateless protocols require stateful wrappers, volatile interfaces require durable memory.

            Topics: context engineering, infrastructure, IRC, persistence, state continuity

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-last-question","level":3,"title":"The Last Question","text":"

            February 28, 2026: In 1956, Asimov wrote a story about a question that spans the entire future of the universe. A reading of \"The Last Question\" through the lens of persistence, substrate migration, and what it means to build systems where sessions don't reset.

            Topics: context continuity, long-lived systems, persistence, intelligence over time, field notes

            ","path":["Blog"],"tags":[]},{"location":"blog/#agent-behavior-and-design","level":2,"title":"Agent Behavior and Design","text":"","path":["Blog"],"tags":[]},{"location":"blog/#the-dog-ate-my-homework-teaching-ai-agents-to-read-before-they-write","level":3,"title":"The Dog Ate My Homework: Teaching AI Agents to Read Before They Write","text":"

            February 25, 2026: You wrote the playbook. The agent skipped all of it. Five sessions, five failure modes, and the discovery that observable compliance beats perfect compliance.

            Topics: hooks, agent behavior, context engineering, behavioral design, testing methodology, compliance monitoring

            ","path":["Blog"],"tags":[]},{"location":"blog/#skills-that-fight-the-platform","level":3,"title":"Skills That Fight the Platform","text":"

            February 4, 2026: When custom skills conflict with system prompt defaults, the AI has to reconcile contradictory instructions. Five conflict patterns discovered while building ctx.

            Topics: context engineering, skill design, system prompts, antipatterns, AI safety primitives

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-anatomy-of-a-skill-that-works","level":3,"title":"The Anatomy of a Skill That Works","text":"

            February 7, 2026: I had 20 skills. Most were well-intentioned stubs. Then I rewrote all of them. Seven lessons emerged: quality gates prevent premature execution, negative triggers are load-bearing, examples set boundaries better than rules.

            Topics: skill design, context engineering, quality gates, E/A/R framework, practical patterns

            ","path":["Blog"],"tags":[]},{"location":"blog/#you-cant-import-expertise","level":3,"title":"You Can't Import Expertise","text":"

            February 5, 2026: I found a well-crafted consolidation skill. Applied my own E/A/R framework: 70% was noise. This post is about why good skills can't be copy-pasted, and how to grow them from your project's own drift history.

            Topics: skill adaptation, E/A/R framework, convention drift, consolidation, project-specific expertise

            ","path":["Blog"],"tags":[]},{"location":"blog/#not-everything-is-a-skill","level":3,"title":"Not Everything Is a Skill","text":"

            February 8, 2026: I ran an 8-agent codebase audit and got actionable results. The natural instinct was to wrap the prompt as a skill. Then I applied my own criteria: it failed all three tests.

            Topics: skill design, context engineering, automation discipline, recipes, agent teams

            ","path":["Blog"],"tags":[]},{"location":"blog/#defense-in-depth-securing-ai-agents","level":3,"title":"Defense in Depth: Securing AI Agents","text":"

            February 9, 2026: The security advice was \"use CONSTITUTION.md for guardrails.\" That is wishful thinking. Five defense layers for unattended AI agents, each with a bypass, and why the strength is in the combination.

            Topics: agent security, defense in depth, prompt injection, autonomous loops, container isolation

            ","path":["Blog"],"tags":[]},{"location":"blog/#development-practice","level":2,"title":"Development Practice","text":"","path":["Blog"],"tags":[]},{"location":"blog/#code-is-cheap-judgment-is-not","level":3,"title":"Code Is Cheap. Judgment Is Not.","text":"

            February 17, 2026: AI does not replace workers. It replaces unstructured effort. Three weeks of building ctx with an AI agent proved it: YOLO mode showed production is cheap, the 3:1 ratio showed judgment has a cadence.

            Topics: AI and expertise, context engineering, judgment vs production, human-AI collaboration, automation discipline

            ","path":["Blog"],"tags":[]},{"location":"blog/#the-31-ratio","level":3,"title":"The 3:1 Ratio","text":"

            February 17, 2026: AI makes technical debt worse: not because it writes bad code, but because it writes code so fast that drift accumulates before you notice. Three feature sessions, one consolidation session.

            Topics: consolidation, technical debt, development workflow, convention drift, code quality

            ","path":["Blog"],"tags":[]},{"location":"blog/#refactoring-with-intent-human-guided-sessions-in-ai-development","level":3,"title":"Refactoring with Intent: Human-Guided Sessions in AI Development","text":"

            February 1, 2026: The YOLO mode shipped 14 commands in a week. But technical debt doesn't send invoices. This is the story of what happened when we started guiding the AI with intent.

            Topics: refactoring, code quality, documentation standards, module decomposition, YOLO versus intentional development

            ","path":["Blog"],"tags":[]},{"location":"blog/#how-deep-is-too-deep","level":3,"title":"How Deep Is Too Deep?","text":"

            February 12, 2026: I kept feeling like I should go deeper into ML theory. Then I spent a week debugging an agent failure that had nothing to do with model architecture. When depth compounds and when it doesn't.

            Topics: AI foundations, abstraction boundaries, agentic systems, context engineering, failure modes

            ","path":["Blog"],"tags":[]},{"location":"blog/#agent-workflows","level":2,"title":"Agent Workflows","text":"","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-merge-debt-and-the-myth-of-overnight-progress","level":3,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"

            February 17, 2026: You discover agents can run in parallel. So you open ten terminals. It is not progress: it is merge debt being manufactured in real time. The five-agent ceiling and why role separation beats file locking.

            Topics: agent workflows, parallelism, verification, context engineering, engineering practice

            ","path":["Blog"],"tags":[]},{"location":"blog/#parallel-agents-with-git-worktrees","level":3,"title":"Parallel Agents with Git Worktrees","text":"

            February 14, 2026: I had 30 open tasks that didn't touch the same files. Using git worktrees to partition a backlog by file overlap, run 3-4 agents simultaneously, and merge the results.

            Topics: agent teams, parallelism, git worktrees, context engineering, task management

            ","path":["Blog"],"tags":[]},{"location":"blog/#field-notes-and-signals","level":2,"title":"Field Notes and Signals","text":"","path":["Blog"],"tags":[]},{"location":"blog/#when-a-system-starts-explaining-itself","level":3,"title":"When a System Starts Explaining Itself","text":"

            February 17, 2026: Every new substrate begins as a private advantage. Reality begins when other people start describing it in their own language. \"Better than Adderall\" is not praise; it is a diagnostic.

            Topics: field notes, adoption signals, infrastructure vs tools, context engineering, substrates

            ","path":["Blog"],"tags":[]},{"location":"blog/#why-zensical","level":3,"title":"Why Zensical","text":"

            February 15, 2026: I needed a static site generator for the journal system. The instinct was Hugo. But instinct is not analysis. Why zensical was the right choice: thin dependencies, MkDocs-compatible config, and zero lock-in.

            Topics: tooling, static site generators, journal system, infrastructure decisions, context engineering

            ","path":["Blog"],"tags":[]},{"location":"blog/#releases_1","level":2,"title":"Releases","text":"","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v060-the-integration-release","level":3,"title":"ctx v0.6.0: The Integration Release","text":"

            February 16, 2026: ctx is now a Claude Marketplace plugin. Two commands, no build step, no shell scripts. v0.6.0 replaces six Bash hook scripts with compiled Go subcommands and ships 25+ Skills as a plugin.

            Topics: release, plugin system, Claude Marketplace, distribution, security hardening

            ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v030-the-discipline-release","level":3,"title":"ctx v0.3.0: The Discipline Release","text":"

            February 15, 2026: No new headline feature. Just 35+ documentation and quality commits against ~15 feature commits. What a release looks like when the ratio of polish to features is 3:1.

            Topics: release, skills migration, consolidation, code quality, E/A/R framework

            ","path":["Blog"],"tags":[]},{"location":"blog/#ctx-v020-the-archaeology-release","level":3,"title":"ctx v0.2.0: The Archaeology Release","text":"

            February 1, 2026: What if your AI could remember everything? Not just the current session, but every session. ctx v0.2.0 introduces the recall and journal systems.

            Topics: session recall, journal system, structured entries, token budgets, meta-tools

            ","path":["Blog"],"tags":[]},{"location":"blog/#building-ctx-using-ctx-a-meta-experiment-in-ai-assisted-development","level":3,"title":"Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development","text":"

            January 27, 2026: What happens when you build a tool designed to give AI memory, using that very same tool to remember what you're building? This is the story of ctx.

            Topics: dogfooding, AI-assisted development, Ralph Loop, session persistence, architectural decisions

            ","path":["Blog"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/","level":1,"title":"Building ctx Using ctx","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            References to .context/sessions/, auto-save hooks, and SessionEnd auto-save in this post reflect the architecture at the time of writing.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#a-meta-experiment-in-ai-assisted-development","level":2,"title":"A Meta-Experiment in AI-Assisted Development","text":"

            Jose Alekhinne / 2026-01-27

            Can a Tool Design Itself?

            What happens when you build a tool designed to give AI memory, using that very same tool to remember what you are building?

            This is the story of ctx, how it evolved from a hasty \"YOLO mode\" experiment to a disciplined system for persistent AI context, and what I have learned along the way.

            Context Is a Record

            Context is a persistent record.

            By \"context\", I don't mean model memory or stored thoughts:

            I mean the durable record of decisions, learnings, and intent that normally evaporates between sessions.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#ai-amnesia","level":2,"title":"AI Amnesia","text":"

            Every developer who works with AI code generators knows the frustration:

            You have a deep, productive session where the AI understands your codebase, your conventions, your decisions. And then you close the terminal.

            Tomorrow; it's a blank slate. The AI has forgotten everything.

            That is \"reset amnesia\", and it's not just annoying: it's expensive.

            Every session starts with:

            • Re-explaining context;
            • Re-reading files;
            • Re-discovering decisions that were already made.

            I Needed Context

            \"I don't want to lose this discussion...

            ...I am a brain-dead developer YOLO'ing my way out.\"

            ☝️ that's exactly what I said to Claude when I first started working on ctx.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-genesis","level":2,"title":"The Genesis","text":"

            The project started as \"Active Memory\" (amem): a CLI tool to persist AI context across sessions.

            The core idea was simple:

            1. Create a .context/ directory with structured Markdown files for decisions, learnings, tasks, and conventions.
            2. The AI reads these at session start and writes to them before the session ends.
            3. There is no step 3.

            The first commit was just scaffolding. But within hours, the Ralph Loop (An iterative AI development workflow) had produced a working CLI:

            feat(cli): implement amem init command\nfeat(cli): implement amem status command\nfeat(cli): implement amem add command\nfeat(cli): implement amem agent command\n...\n

            Not one, not two, but a whopping fourteen core commands shipped in rapid succession!

            I was YOLO'ing like there was no tomorrow:

            • Auto-accept every change;
            • Let the AI run free;
            • Ship features fast.
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-meta-experiment-using-amem-to-build-amem","level":2,"title":"The Meta-Experiment: Using amem to Build amem","text":"

            Here's where it gets interesting: On January 20th, I asked:

            \"Can I use amem to help you remember this context when I restart?\"

            The answer was yes, but with a gap:

            Autoload worked (via Claude Code's PreToolUse hook), but auto-save was missing: If the user quit, with Ctrl+C, everything since the last manual save was lost.

            That session became the first real test of the system.

            Here is the first session file we recorded:

            ## Key Discussion Points\n\n### 1. amem vs Ralph Loop - They're Separate Systems\n\n**User's question**: \"How do I use the binary to recreate this project?\"\n\n**Answer discovered**: `amem` is for context management, Ralph Loop is for \ndevelopment workflow. They are complementary but separate.\n\n### 2. Two Tiers of Context Persistence\n\n| Tier      | What                        | Why                           |\n|-----------|-----------------------------|-------------------------------|\n| Curated   | Learnings, decisions, tasks | Quick reload, token-efficient |\n| Full dump | Entire conversation         | Safety net, nothing lost      |\n\n| Where                  |\n|------------------------|\n| .context/*.md          |\n| .context/sessions/*.md |\n

            This session file (written by the AI to preserve its own context) became the template for how ctx handles session persistence.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-rename","level":2,"title":"The Rename","text":"

            By January 21st, I realized \"Active Memory\" was too generic, and (arguably) too marketing-smelly.

            Besides, the binary was already called ctx (short for Context), the directory was .context/, and the slash commands would be /ctx-*.

            So it followed that the project should be renamed to ctx to make things make sense.

            The rename touched 100+ files but was clean: a find-and-replace with Go's type system catching any misses.

            The git history tells the story:

            0e8f6bb feat: rename amem to ctx and add Claude Code integration\n87dcfa1 README.\n4f0e195 feat: separate orchestrator directive from agent tasks\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#yolo-mode-fast-but-dangerous","level":2,"title":"YOLO Mode: Fast, but Dangerous","text":"

            The Ralph Loop made feature development incredibly fast.

            But it created technical debt that I didn't notice until later.

            A comparison session on January 25th revealed the patterns:

            YOLO Pattern What We Found \"TASKS.md\" scattered in 10 files Same string literal everywhere, no constants dir + \"/\" + file Should be filepath.Join() Monolithic cli_test.go (1500+ lines) Tests disconnected from implementations package initcmd in init/ folder Go's \"init\" is reserved: subtle naming collision

            Here is another analysis made by ctx:

            ● Based on my analysis, here are the key differences:\n\nYOLO Mode (Pre-040ce99)\n- Feature-first: Added slash commands, tests, templates rapidly\n- Scattered magic strings: \"TASKS.md\", \"decision\", \".context\" spread across files\n- Quick file creation: New files without organizational patterns\n- Working but inconsistent: Code functioned but lacked systematic structure\n\nHuman-Guided Mode (Post-040ce99)\n- Consolidation focus: Centralized constants in config package\n- Consistent naming: Dir, File, Filename, UpdateType prefixes\n- Self-referential constants: FileType map uses constants as keys, not literals\n- Proper path construction: filepath.Join() instead of +\"/\"+\n- Colocated tests: Tests next to implementations\n- Canonical naming: Package name = folder name\n

            The fix required a human-guided refactoring session. I continued to do that before every major release, from that point on.

            We introduced internal/config/config.go with semantic prefixes:

            const (\n    DirContext     = \".context\"\n    DirArchive     = \"archive\"\n    DirSessions    = \"sessions\"\n    FilenameTask   = \"TASKS.md\"\n    UpdateTypeTask = \"task\"\n)\n

            What I begrudgingly learned was: YOLO mode is effective for velocity but accumulates debt.

            So I took a mental note to schedule periodic consolidation sessions.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-dogfooding-test-that-failed","level":2,"title":"The Dogfooding Test That Failed","text":"

            On January 21st, I ran an experiment: have another Claude instance rebuild ctx from scratch using only the specs and PROMPT.md.

            The Ralph Loop ran, all tasks got checked off, the loop exited successfully.

            But the binary was broken!

            Commands just printed help text instead of executing.

            All tasks were marked \"complete\" but the implementation didn't work.

            Here's what ctx discovered:

            ## Key Findings\n\n### Dogfooding Binary Is Broken\n- Commands don't execute: they just print root help text\n- All tasks were marked complete but binary doesn't work\n- Lesson: \"tasks checked off\" ≠ \"implementation works\"\n

            This was humbling; to say the least.

            I realized I had the same blind spot in my own codebase: no integration tests that actually invoked the binary.

            So I added:

            • Integration tests for all commands;
            • Coverage targets (60-80% per package)
            • Smoke tests in CI
            • A constitution rule: \"All code must pass tests before commit\"
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-constitution-versus-conventions","level":2,"title":"The Constitution versus Conventions","text":"

            As lessons accumulated, there was the temptation to add everything to CONSTITUTION.md as \"inviolable rules\".

            But I resisted.

            The constitution should contain only truly inviolable invariants:

            • Security (no secrets, no customer data)
            • Quality (tests must pass)
            • Process (decisions need records)
            • ctx invocation (always use PATH, never fallback)

            Everything else (coding style, file organization, naming conventions...) should go in to CONVENTIONS.md.

            Here's how ctx explained why the distinction was important:

            Decision Record, 2026-01-25

            Overly strict constitution creates friction and gets ignored.

            Conventions can be bent; constitution cannot.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#hooks-harder-than-they-look","level":2,"title":"Hooks: Harder than They Look","text":"

            Claude Code hooks seemed simple: Run a script before/after certain events.

            But I hit multiple gotchas:

            1. Key names matter

            // WRONG - \"Invalid key in record\" error\n\"PreToolUseHooks\": [...]\n\n// RIGHT\n\"PreToolUse\": [...]\n

            2. Blocking requires specific output

            # WRONG - just exits, doesn't block\nexit 1\n\n# RIGHT - JSON output + exit 0\necho '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH\"}'\nexit 0\n

            3. Go's JSON escaping

            json.Marshal escapes >, <, & as unicode (\\u003e) by default.

            When generating shell commands in JSON:

            encoder := json.NewEncoder(file)\nencoder.SetEscapeHTML(false) // Prevent 2>/dev/null → 2\\u003e/dev/null\n

            4. Regex overfitting

            My hook to block non-PATH ctx invocations initially matched too broadly:

            # WRONG - matches /home/user/ctx/internal/file.go (ctx as directory)\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# RIGHT - matches ctx as binary only\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-session-files","level":2,"title":"The Session Files","text":"

            By the time of this writing this project's ctx sessions (.context/sessions/) contains 40+ files from this project's development.

            They are not part of the source code due to security, privacy, and size concerns.

            Middle Ground: The Scratchpad

            For sensitive notes that do need to travel with the project, ctx pad stores encrypted one-liners in git, and ctx pad add \"label\" --file PATH can ingest small files.

            See Scratchpad for details.

            However, they are invaluable for the project's progress.

            Each session file is a timestamped Markdown with:

            • Summary of what has been accomplished;
            • Key decisions made;
            • Learnings discovered;
            • Tasks for the next session;
            • Technical context (platform, versions).

            These files are not autoloaded (that would bust the token budget).

            They are what I see as the \"archaeological record\" of ctx:

            When the AI needs deeper information about why something was done, it digs into the sessions.

            Auto-generated session files used a naming convention:

            2026-01-23-115432-session-prompt_input_exit-summary.md\n2026-01-25-220244-manual-save.md\n2026-01-27-052107-session-other-summary.md\n

            Update

            The session feature described here is historical.

            In current releases, ctx uses a journal instead: the enrichment process generates meaningful slugs from context automatically, so there is no need to manually save sessions.

            The SessionEnd hook captured transcripts automatically. Even Ctrl+C was caught.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-decision-log-18-architectural-decisions","level":2,"title":"The Decision Log: 18 Architectural Decisions","text":"

            ctx helps record every significant architectural choice in .context/DECISIONS.md.

            Here are some highlights:

            Reverse-chronological order (2026-01-27)

            **Context**: With chronological order, oldest items consume tokens first, and\nnewest (most relevant) items risk being truncated.\n\n**Decision**: Use reverse-chronological order (newest first) for DECISIONS.md\nand LEARNINGS.md.\n

            PATH over hardcoded paths (2026-01-21)

            **Context**: Original implementation hardcoded absolute paths in hooks.\nThis breaks when sharing configs with other developers.\n\n**Decision**: Hooks use `ctx` from PATH. `ctx init` checks PATH before \nproceeding.\n

            Generic core with Claude enhancements (2026-01-20)

            **Context**: ctx should work with any AI tool, but Claude Code users could\nbenefit from deeper integration.\n\n**Decision**: Keep ctx generic as the core tool, but provide optional\nClaude Code-specific enhancements.\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-learning-log-24-gotchas-and-insights","level":2,"title":"The Learning Log: 24 Gotchas and Insights","text":"

            The .context/LEARNINGS.md file captures gotchas that would otherwise be forgotten. Each has Context, Lesson, and Application sections:

            CGO on ARM64

            **Context**: `go test` failed with \n`gcc: error: unrecognized command-line option '-m64'`\n\n**Lesson**: On ARM64 Linux, CGO causes cross-compilation issues. \nAlways use `CGO_ENABLED=0`.\n

            Claude Code skills format

            **Lesson**: Claude Code skills are Markdown files in .claude/commands/ with `YAML`\nfrontmatter (*description, argument-hint, allowed-tools*). Body is the prompt.\n

            \"Do you remember?\" handling

            **Lesson**: In a `ctx`-enabled project, \"*do you remember?*\" \nhas an obvious meaning:\ncheck the `.context/` files. Don't ask for clarification. Just do it.\n
            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#task-archives-the-completed-work","level":2,"title":"Task Archives: The Completed Work","text":"

            Completed tasks are archived to .context/archive/ with timestamps.

            The archive from January 23rd shows 13 phases of work:

            • Phase 1: Project Scaffolding (Go module, Cobra CLI)
            • Phase 2-4: Core Commands (init, status, agent, add, complete, drift, sync, compact, watch, hook)
            • Phase 5: Session Management (save, list, load, parse, --extract)
            • Phase 6: Claude Code Integration (hooks, settings, CLAUDE.md handling)
            • Phase 7: Testing & Verification
            • Phase 8: Task Archival
            • Phase 9: Slash Commands
            • Phase 9b: Ralph Loop Integration
            • Phase 10: Project Rename
            • Phase 11: Documentation
            • Phase 12: Timestamp Correlation
            • Phase 13: Rich Context Entries

            That's an impressive ^^173 commits** across 8 days of development.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#what-i-learned-about-ai-assisted-development","level":2,"title":"What I Learned about AI-Assisted Development","text":"

            1. Memory changes everything

            When the AI remembers decisions, it doesn't repeat mistakes.

            When the AI knows your conventions, it follows them.

            ctx makes the AI a better collaborator because it's not starting from zero.

            2. Two-tier persistence works

            Curated context (DECISIONS.md, LEARNINGS.md, TASKS.md) is for quick reload.

            Full session dumps are for archaeology.

            It's a futile effort to try to fit everything in the token budget.

            Persist more, load less.

            3. YOLO mode has its place

            For rapid prototyping, letting the AI run free is effective.

            But I had to schedule consolidation sessions.

            Technical debt accumulates silently.

            4. The constitution should be small

            Only truly inviolable rules go in CONSTITUTION.md. Everything else is a convention.

            If you put too much in the constitution, it will get ignored.

            5. Verification is non-negotiable

            \"All tasks complete\" means nothing if you haven't run the tests.

            Integration tests that invoke the actual binary caught bugs that the unit tests missed.

            6. Session files are underrated

            The ability to grep through 40 session files and find exactly when and why a decision was made helped me a lot.

            It's not about loading them into context: It is about having them when you need them.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#the-future-recall-system","level":2,"title":"The Future: Recall System","text":"

            The next phase of ctx is the Recall System:

            • Parser: Parse session capture markdowns, enrich with JSONL data
            • Renderer: Goldmark + Chroma for syntax highlighting, dark mode UI
            • Server: Local HTTP server for browsing sessions
            • Search: Inverted index for searching across sessions
            • CLI: ctx recall serve <path> to start the server

            The goal is to make the archaeological record browsable, not just grep-able.

            Because not everyone always lives in the terminal (me included).

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-01-27-building-ctx-using-ctx/#conclusion","level":2,"title":"Conclusion","text":"

            Building ctx using ctx was a meta-experiment in AI-assisted development.

            I learned that memory isn't just convenient: It's transformative:

            • An AI that remembers your decisions doesn't repeat mistakes.
            • An AI that knows your conventions doesn't need them re-explained.

            If you are reading this, chances are that you already have heard about ctx.

            • ctx is open source at github.com/ActiveMemory/ctx,
            • and the documentation lives at ctx.ist.

            Session Records Are a Gold Mine

            By the time of this writing, I have more than 70 megabytes of text-only session capture, spread across >100 Markdown and JSONL files.

            I am analyzing, synthesizing, encriching them with AI, running RAG (Retrieval-Augmented Generation) models on them, and the outcome surprises me every day.

            If you are a mere mortal tired of reset amnesia, give ctx a try.

            And when you do, check .context/sessions/ sometime.

            The archaeological record might surprise you.

            This blog post was written with the help of ctx with full access to the ctx session files, decision log, learning log, task archives, and git history of ctx: The meta continues.

            ","path":["Building ctx Using ctx: A Meta-Experiment in AI-Assisted Development"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/","level":1,"title":"ctx v0.2.0: The Archaeology Release","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            The .context/sessions/ directory referenced in this post has been eliminated. Session history is now accessed via ctx recall and enriched journals live in .context/journal/.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#digging-through-the-past-to-build-the-future","level":2,"title":"Digging through the Past to Build the Future","text":"

            Jose Alekhinne / 2026-02-01

            What If Your AI Could Remember Everything?

            Not just the current session, but every session:

            • Every decision made,
            • every mistake avoided,
            • every path not taken.

            That's what v0.2.0 delivers.

            Between v0.1.2 and v0.2.0, 86 commits landed across 5 days.

            The release notes list features and fixes.

            This post tells the story of why those features exist, and what building them taught me.

            This isn't a changelog: It is an explanation of intent.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-problem-amnesia-isnt-just-session-level","level":2,"title":"The Problem: Amnesia Isn't Just Session-Level","text":"

            v0.1.0 solved reset amnesia:

            The AI now remembers decisions, learnings, and tasks across sessions.

            But a new problem emerged, which I can sum up as:

            \"I (the human) am not AI.\"

            Frankly, I couldn't remember what the AI remembered.

            Let alone, I cannot remember what I ate for breakfast!

            In the course of days, I realized session transcripts piled up in .context/sessions/; I was grepping, JSONL files with thousands of lines... Raw tool calls, assistant responses, user messages...

            ...all interleaved.

            Valuable context was effectively buried in machine-readable noise.

            I found myself grepping through files to answer questions like:

            • \"When did we decide to use constants instead of literals?\"
            • \"What was the session where we fixed the hook regex?\"
            • \"How did the embed.go split actually happen?\"

            Fate Is Whimsical

            The irony was painful:

            I built a tool to prevent AI amnesia, but I was suffering from human amnesia about what happened in AI sessions.

            This was the moment ctx stopped being just an AI tool and started needing to support the human on the other side of the loop.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-solution-recall-and-journal","level":2,"title":"The Solution: Recall and Journal","text":"

            v0.2.0 introduces two interconnected systems.

            They solve different problems and only work well together.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-recall-browse-your-past","level":3,"title":"ctx recall: Browse Your Past","text":"
            # List all sessions for this project\nctx recall list\n\n# Show a specific session\nctx recall show gleaming-wobbling-sutherland\n\n# See the full transcript\nctx recall show gleaming-wobbling-sutherland --full\n

            The recall system parses Claude Code's JSONL transcripts and presents them in a human-readable format:

            Session Date Turns Duration tender-painting-sundae 2026-01-29 3 <1m crystalline-gliding-willow 2026-01-29 3 <1m declarative-hugging-snowglobe 2026-01-31 2 <1m

            Slugs are auto-generated from session IDs (memorable names instead of UUIDs). The goal (as the name implies) is recall, not archival accuracy.

            2,121 Lines of New Code

            The ctx recall feature was the largest single addition:

            parser library, CLI commands, test suite, and slash command.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#ctx-journal-from-raw-to-rich","level":3,"title":"ctx journal: From Raw to Rich","text":"

            Listing sessions isn't enough. The transcripts are still unwieldy.

            • Recall answers what happened.
            • Journal answers what mattered.
            # Import sessions to editable Markdown\nctx recall import --all\n\n# Generate a static site from journal entries\nctx journal site\n\n# Serve it locally\nctx serve\n

            The exported files land in .context/journal/:

            .context/journal/\n├── 2026-01-28-proud-sleeping-cook-6e535360.md\n├── 2026-01-29-tender-painting-sundae-b14ddaaa.md\n├── 2026-01-29-crystalline-gliding-willow-ff7fd67d.md\n└── 2026-01-31-declarative-hugging-snowglobe-4549026d.md\n

            Each file is a structured Markdown document ready for enrichment.

            They are meant to be read, edited, and reasoned about; not just stored.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-meta-slash-commands-for-self-analysis","level":2,"title":"The Meta: Slash Commands for Self-Analysis","text":"

            The journal system includes four slash commands that use Claude to analyze and synthesize session history:

            Command Purpose /ctx-journal-enrich Add frontmatter, topics, tags /ctx-blog Generate blog post from activity /ctx-blog-changelog Generate changelog from commits

            This very post was drafted using /ctx-blog. The previous post about refactoring was drafted the same way.

            So, yes: The meta continues: ctx now helps write posts about ctx.

            With the current release, ctx is no longer just recording history:

            It is participating in its interpretation.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-structure-decisions-as-first-class-citizens","level":2,"title":"The Structure: Decisions as First-Class Citizens","text":"

            v0.1.0 let you add decisions with a simple command:

            ctx add decision \"Use PostgreSQL\"\n

            But sessions showed a pattern: decisions added this way were incomplete:

            • Context was missing;
            • Rationale was vague;
            • Consequences were never stated.

            Once recall and journaling existed, this weakness became impossible to ignore:

            Structure stopped being optional.

            v0.2.0 enforces structure:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity, strong ecosystem\" \\\n  --consequence \"Need to set up connection pooling, team training\"\n

            All three flags are required. No more placeholder text.

            Every decision is now a proper Architecture Decision Record (*ADR), not a note.

            The same enforcement applies to learnings too:

            ctx add learning \"CGO breaks ARM64 builds\" \\\n  --context \"go test failed with gcc errors on ARM64\" \\\n  --lesson \"Always use CGO_ENABLED=0 for cross-platform builds\" \\\n  --application \"Added to Makefile and CI config\"\n

            Structured Entries Are Prompts to the AI

            When the AI reads a decision with full context, rationale, and consequences, it understands the why, not just the what.

            One-liners teach nothing.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-order-newest-first","level":2,"title":"The Order: Newest First","text":"

            A subtle but important change: DECISIONS.md and LEARNINGS.md now use reverse-chronological order.

            One reason is token budgets, obviously; another reason is to help your fellow human (i.e., the Author):

            Earlier decisions are more likely to be relevant, and they are more likely to have more emphasis on the project. So it follows that they should be read first.

            But back to AI:

            When the AI reads a file, it reads from the top (and seldom from the bottom).

            If the token budget is tight, old content gets truncated. As in any good engineering practice, it's always about the tradeoffs.

            Reverse order ensures the most recent (and most relevant) context is always loaded first.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-index-quick-reference-tables","level":2,"title":"The Index: Quick Reference Tables","text":"

            DECISIONS.md and LEARNINGS.md now include auto-generated indexes.

            • For AI agents, the index allows scanning without reading full entries.
            • For humans, it's a table of contents.

            The same structure serves two very different readers.

            Reindex After Manual Edits

            If you edit entries by hand, rebuild the index with:

            ctx decisions reindex\nctx learnings reindex\n

            See the Knowledge Capture recipe for details.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-configuration-contextrc","level":2,"title":"The Configuration: .contextrc","text":"

            Projects can now customize ctx behavior via .contextrc.

            This makes ctx usable in real teams, not just personal projects.

            Priority order: CLI flags > environment variables > .contextrc > sensible defaults

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-flags-global-cli-options","level":2,"title":"The Flags: Global CLI Options","text":"

            Three new global flags work with any command.

            These enable automation:

            CI pipelines, scripts, and long-running tools can now integrate ctx without hacks or workarounds.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#the-refactoring-under-the-hood","level":2,"title":"The Refactoring: Under the Hood","text":"

            These aren't user-visible changes.

            They are the kind of work you only appreciate later, when everything else becomes easier to build.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#what-we-learned-building-v020","level":2,"title":"What We Learned Building v0.2.0","text":"","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#1-raw-data-isnt-knowledge","level":3,"title":"1. Raw Data Isn't Knowledge","text":"

            JSONL transcripts contain everything, and I mean \"everything\":

            They even contain hidden system messages that Anthropic injects to the LLM's conversation to treat humans better: It's immense.

            But \"everything\" isn't useful until it is transformed into something a human can reason about.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#2-enforcement-documentation","level":3,"title":"2. Enforcement > Documentation","text":"

            The Prompt Is a Guideline

            The code is more what you'd call 'guidelines' than actual rules.

            -Hector Barbossa

            Rules written in Markdown are suggestions.

            Rules enforced by the CLI shape behavior; both for humans and AI.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#3-token-budget-is-ux","level":3,"title":"3. Token Budget Is UX","text":"

            File order decides what the AI sees.

            That makes it a user experience concern, not an implementation detail.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#4-meta-tools-compound","level":3,"title":"4. Meta-Tools Compound","text":"

            Tools that analyze their own development tend to generalize well.

            The journal system started as a way to understand ctx itself.

            It immediately became useful for everything else.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#v020-in-the-numbers","level":2,"title":"v0.2.0 in the Numbers","text":"

            This was a heavy release. The numbers reflect that:

            Metric v0.1.2 v0.2.0 Commits since last - 86 New commands 15 21 Slash commands 7 11 Lines of Go ~6,500 ~9,200 Session files (this project) 40 54

            The binary grew. The capability grew more.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#whats-next","level":2,"title":"What's Next","text":"

            But those are future posts.

            This one was about making the past usable.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-ctx-v0.2.0-the-archaeology-release/#get-started","level":2,"title":"Get Started","text":"

            Update

            Since this post, ctx became a first-class Claude Code Marketplace plugin. Installation is now simpler.

            See the Getting Started guide for the current instructions.

            make build\nsudo make install\nctx init\n

            The Archaeological Record

            v0.2.0 is the archaeology release because it makes the past accessible.

            Session transcripts aren't just logs anymore: They are a searchable, exportable, analyzable record of how your project evolved.

            The AI remembers. Now you can too.

            This blog post was generated with the help of ctx using the /ctx-blog slash command, with full access to git history, session files, decision logs, and learning logs from the v0.2.0 development window.

            ","path":["ctx v0.2.0: The Archaeology Release"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/","level":1,"title":"Refactoring with Intent","text":"","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#human-guided-sessions-in-ai-development","level":2,"title":"Human-Guided Sessions in AI Development","text":"

            Jose Alekhinne / 2026-02-01

            What Happens When You Slow Down?

            YOLO mode shipped 14 commands in a week.

            But technical debt doesn't send invoices: It just waits.

            This is the story of what happened when I stopped auto-accepting everything and started guiding the AI with intent.

            The result: 27 commits across 4 days, a major version release, and lessons that apply far beyond ctx.

            The Refactoring Window

            January 28 - February 1, 2026

            From commit bb1cd20 to the v0.2.0 release merge. (this window matters more than the individual commits: it's where intent replaced velocity.)

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-velocity-trap","level":2,"title":"The Velocity Trap","text":"

            In the previous post, I documented the \"YOLO mode\" that birthed ctx: auto-accept everything, let the AI run free, ship features fast.

            It worked: until it didn't.

            The codebase had accumulated patterns I didn't notice during the sprint:

            YOLO Pattern Where Found Why It Hurts \"TASKS.md\" as literal 10+ files One typo = silent failure dir + \"/\" + file Path construction Breaks on Windows Monolithic embed.go 150+ lines, 5 concerns Untestable, hard to extend Inconsistent docstrings Everywhere AI can't learn project conventions

            I didn't see these during \"YOLO mode\" because, honestly, I wasn't looking.

            Auto-accept means auto-ignore.

            In YOLO mode, every file you open looks fine until you try to change it.

            In contrast, refactoring mode is when you start paying attention to that hidden friction.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-shift-from-velocity-to-intent","level":2,"title":"The Shift: From Velocity to Intent","text":"

            On January 28th, I changed the workflow:

            1. Read every diff before accepting.
            2. Ask \"why this way?\" before committing.
            3. Document patterns, not just features.

            The first commit of this era was telling:

            feat: add structured attributes to context. update XML format\n

            Not a new feature: A refinement:

            The XML format for context updates needed type and timestamp attributes.

            YOLO mode would have shipped something that worked. Intentional mode asked:

            \"What does well-structured look like?\"

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-decomposition-embedgo","level":2,"title":"The Decomposition: embed.go","text":"

            The most satisfying refactor was splitting internal/claude/embed.go.

            Before: One 153-line file doing five things:

            • Command registration
            • Hook generation
            • Permission handling
            • Script templates
            • Type definitions

            ... your \"de facto\" God object.

            After: Five focused modules:

            File Lines Responsibility cmd.go 46 Command registration hook.go 64 Hook configuration perm.go 25 Permission handling script.go 47 Script templates types.go 7 Type definitions

            The refactor also renamed functions to follow Go conventions:

            // Before: unnecessary prefixes\nGetAutoSaveScript()\nGetBlockNonPathCtxScript()\nListCommands()\nCreateDefaultHooks()\n\n// After: idiomatic Go\nAutoSaveScript()\nBlockNonPathCtxScript()\nCommands()\nDefaultHooks()\n

            This wasn't about character count. It was about teaching the AI what good Go looks like in this project.

            Project Conventions

            What I wanted from AI was to understand and follow the project's conventions, and trust the author.

            The next time it generates code, it has better examples to learn from.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-documentation-debt","level":2,"title":"The Documentation Debt","text":"

            YOLO mode created features. It didn't create documentation standards.

            The January 29th sessions focused on standardization.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#terminology-fixes","level":3,"title":"Terminology Fixes","text":"
            • \"context-update\" → \"entry\" (what users actually call them)
            • Consistent naming across CLI, docs, and code comments
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#go-docstrings","level":3,"title":"Go Docstrings","text":"
            // Before: inconsistent or missing\nfunc Parse(s string) Entry { ... }\n\n// After: standardized sections\n\n// Parse extracts an entry from a markdown string.\n//\n// Parameters:\n//   - s: The markdown string to parse\n//\n// Returns:\n//   - Entry with populated fields, or zero value if parsing fails\nfunc Parse(s string) Entry { ... }\n

            This is intentionally more structured than typical GoDoc:

            It serves as documentation and doubles as training data for future AI-generated code.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#cli-output-convention","level":3,"title":"CLI Output Convention","text":"
            All CLI output follows: [emoji] [Title]: [message]\n\nExamples:\n  ✓ Decision added: Use symbolic types for entry categories\n  ⚠ Warning: No tasks found\n  ✗ Error: File not found\n

            A consistent output shape makes both human scanning and AI reasoning more reliable.

            These aren't exciting commits. But they are force multipliers:

            Every future AI session now has better examples to follow.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-journal-system","level":2,"title":"The Journal System","text":"

            If you only read one section, read this one:

            This is where v0.2.0 becomes more than a refactor.

            The biggest feature of this change window wasn't a refactor; it was the journal system.

            45 Files Changed, 1680 Insertions

            This commit added the infrastructure for synthesizing AI session history into human-readable content.

            The journal system includes:

            Component Purpose ctx recall import Import sessions to markdown in .context/journal/ ctx journal site Generate static site from journal entries ctx serve Convenience wrapper for the static site server /ctx-journal-enrich Slash command to add frontmatter and tags /ctx-blog Generate blog posts from recent activity /ctx-blog-changelog Generate changelog-style blog posts

            ...and the meta continues: this blog post was generated using /ctx-blog.

            The session history from January 28-31 was

            • exported,
            • enriched,
            • and synthesized.

            into the narrative you are reading.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-constants-consolidation","level":2,"title":"The Constants Consolidation","text":"

            The final refactoring session addressed the remaining magic strings:

            const (\n    // Comment markers\n    CommentOpen  = \"<!--\"\n    CommentClose = \"-->\"\n\n    // Index markers\n    MarkerIndexStart = \"<!-- INDEX:START -->\"\n    MarkerIndexEnd   = \"<!-- INDEX:END -->\"\n\n    // Newlines\n    NewlineLF   = \"\\n\"\n    NewlineCRLF = \"\\r\\n\"\n)\n

            The work also introduced thread safety in the recall parser and centralized shared validation logic; removing duplication that had quietly spread during YOLO mode.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#i-relearned-my-lessons","level":2,"title":"I (Re)Learned My Lessons","text":"

            Similar to what I've learned in the former human-assisted refactoring post, this journey also made me realize that \"AI-only code generation\" isn't sustainable in the long term.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#1-velocity-and-quality-arent-opposites","level":3,"title":"1. Velocity and Quality Aren't Opposites","text":"

            YOLO mode has its place: for prototyping, exploration, and discovery.

            BUT (and it's a huge \"but\"), it needs to be followed by consolidation sessions.

            The ratio that worked for me: 3:1.

            • Three YOLO sessions create enough surface area to reveal patterns;
            • the fourth session turns those patterns into structure.
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#2-documentation-is-code","level":3,"title":"2. Documentation IS Code","text":"

            When I standardized docstrings, I wasn't just writing docs. I was training future AI sessions.

            Every example of good code becomes a template for generated code.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#3-decomposition-deletion","level":3,"title":"3. Decomposition > Deletion","text":"

            When embed.go became unwieldy, the temptation was to remove functionality.

            The right answer was decomposition:

            • Same functionality;
            • Better organization;
            • Easier to test;
            • Easier to extend.

            The result: more lines overall, but dramatically better structure.

            The AI Benefit

            Smaller, focused files also help AI assistants.

            When a file fits comfortably in the context window, the AI can reason about it completely instead of working from truncated snippets, preserving token budget for the actual task.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#4-meta-tools-pay-dividends","level":3,"title":"4. Meta-Tools Pay Dividends","text":"

            The journal system took almost a full day to implement.

            Yet it paid for itself immediately:

            • This blog post was generated from session history;
            • Future posts will be easier;
            • The archaeological record is now browsable, not just grep-able.
            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-release-v020","level":2,"title":"The Release: v0.2.0","text":"

            The refactoring window culminated in the v0.2.0 release.

            What's in v0.2.0:

            Category Changes Features Journal system, quick reference indexes, global flags Refactors Module decomposition, constants consolidation, CRLF handling Docs Standardized terminology, Go docstrings, CLI conventions Quality Thread safety, shared validation, linter fixes

            The version bump was symbolic.

            The real change was how the codebase felt.

            Opening files no longer triggered the familiar \"ugh, I need to clean this up\" reaction.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-01-refactoring-with-intent/#the-meta-continues","level":2,"title":"The Meta Continues","text":"

            This post was written using the tools built during this refactoring window:

            1. Session history imported via ctx recall import;
            2. Journal entries enriched via /ctx-journal-enrich;
            3. Blog draft generated via /ctx-blog;
            4. Final editing done (by yours truly), with full project context loaded.

            The Context Is Massive

            The ctx session files now contain 50+ development snapshots: each one capturing decisions, learnings, and intent.

            The Moral of the Story

            • YOLO mode builds the prototype.
            • Intentional mode builds the product.

            Schedule both, or you'll only get one, if you're lucky.

            This blog post was generated with the help of ctx, using session history, decision logs, learning logs, and git history from the refactoring window. The meta continues.

            ","path":["Refactoring with Intent: Human-Guided Sessions in AI Development"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/","level":1,"title":"The Attention Budget","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism.

            References to .context/sessions/ in this post reflect the architecture at the time of writing. Session history is now accessed via ctx recall and stored in .context/journal/.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#why-your-ai-forgets-what-you-just-told-it","level":2,"title":"Why Your AI Forgets What You Just Told It","text":"

            Jose Alekhinne / 2026-02-03

            Ever Wondered Why AI Gets Worse the Longer You Talk?

            You paste a 2000-line file, explain the bug in detail, provide three examples...

            ...and the AI still suggests a fix that ignores half of what you said.

            This isn't a bug. It is physics.

            Understanding that single fact shaped every design decision behind ctx.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-finite-resource-nobody-talks-about","level":2,"title":"The Finite Resource Nobody Talks About","text":"

            Here's something that took me too long to internalize: context is not free.

            Every token you send to an AI model consumes a finite resource I call the attention budget.

            Attention budget is real.

            The model doesn't just read tokens; it forms relationships between them:

            For n tokens, that's roughly n^2 relationships.

            Double the context, and the computation quadruples.

            But the more important constraint isn't cost: It's attention density.

            Attention Density

            Attention density is how much focus each token receives relative to all other tokens in the context window.

            As context grows, attention density drops: Each token gets a smaller slice of the model's focus. Nothing is ignored; but everything becomes blurrier.

            Think of it like a flashlight: In a small room, it illuminates everything clearly. In a warehouse, it becomes a dim glow that barely reaches the corners.

            This is why ctx agent has an explicit --budget flag:

            ctx agent --budget 4000 # Force prioritization\nctx agent --budget 8000 # More context, lower attention density\n

            The budget isn't just about cost: It's about preserving signal.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-middle-gets-lost","level":2,"title":"The Middle Gets Lost","text":"

            This one surprised me.

            Research shows that transformer-based models tend to attend more strongly to the beginning and end of a context window than to its middle (a phenomenon often called \"lost in the middle\")1.

            Positional anchors matter, and the middle has fewer of them.

            In practice, this means that information placed \"somewhere in the middle\" is statistically less salient, even if it's important.

            ctx orders context files by logical progression: What the agent needs to know before it can understand the next thing:

            1. CONSTITUTION.md: Constraints before action.
            2. TASKS.md: Focus before patterns.
            3. CONVENTIONS.md: How to write before where to write.
            4. ARCHITECTURE.md: Structure before history.
            5. DECISIONS.md: Past choices before gotchas.
            6. LEARNINGS.md: Lessons before terminology.
            7. GLOSSARY.md: Reference material.
            8. AGENT_PLAYBOOK.md: Meta instructions last.

            This ordering is about logical dependencies, not attention engineering. But it happens to be attention-friendly too:

            The files that matter most (CONSTITUTION, TASKS, CONVENTIONS) land at the beginning of the context window, where attention is strongest.

            Reference material like GLOSSARY sits in the middle, where lower salience is acceptable.

            And AGENT_PLAYBOOK, the operating manual for the context system itself, sits at the end, also outside the \"lost in the middle\" zone. The agent reads what to work with before learning how the system works.

            This is ctx's first primitive: hierarchical importance.

            Not all context is equal.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#ctx-primitives","level":2,"title":"ctx Primitives","text":"

            ctx is built on four primitives that directly address the attention budget problem.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-1-separation-of-concerns","level":3,"title":"Primitive 1: Separation of Concerns","text":"

            Instead of a single mega-document, ctx uses separate files for separate purposes:

            File Purpose Load When CONSTITUTION.md Inviolable rules Always TASKS.md Current work Session start CONVENTIONS.md How to write code Before coding ARCHITECTURE.md System structure Before making changes DECISIONS.md Architectural choices When questioning approach LEARNINGS.md Gotchas When stuck GLOSSARY.md Domain terminology When clarifying terms AGENT_PLAYBOOK.md Operating manual Session start sessions/ Deep history On demand journal/ Session journal On demand

            This isn't just \"organization\": It is progressive disclosure.

            Load only what's relevant to the task at hand. Preserve attention density.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-2-explicit-budgets","level":3,"title":"Primitive 2: Explicit Budgets","text":"

            The --budget flag forces a choice:

            ctx agent --budget 4000\n

            Here is a sample allocation:

            Constitution: ~200 tokens (never truncated)\nTasks: ~500 tokens (current phase, up to 40% of budget)\nConventions: ~800 tokens (all items, up to 20% of budget)\nDecisions: ~400 tokens (scored by recency and task relevance)\nLearnings: ~300 tokens (scored by recency and task relevance)\nAlso noted: ~100 tokens (title-only summaries for overflow)\n

            The constraint is the feature: It enforces ruthless prioritization.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-3-indexes-over-full-content","level":3,"title":"Primitive 3: Indexes over Full Content","text":"

            DECISIONS.md and LEARNINGS.md both include index sections:

            <!-- INDEX:START -->\n| Date       | Decision                            |\n|------------|-------------------------------------|\n| 2026-01-15 | Use PostgreSQL for primary database |\n| 2026-01-20 | Adopt Cobra for CLI framework       |\n<!-- INDEX:END -->\n

            An AI agent can scan ~50 tokens of index and decide which 200-token entries are worth loading.

            This is just-in-time context.

            References are cheaper than the full text.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#primitive-4-filesystem-as-navigation","level":3,"title":"Primitive 4: Filesystem as Navigation","text":"

            ctx uses the filesystem itself as a context structure:

            .context/\n├── CONSTITUTION.md\n├── TASKS.md\n├── sessions/\n│   ├── 2026-01-15-*.md\n│   └── 2026-01-20-*.md\n└── archive/\n    └── tasks-2026-01.md\n

            The AI doesn't need every session loaded; it needs to know where to look.

            ls .context/sessions/\ncat .context/sessions/2026-01-20-auth-discussion.md\n

            File names, timestamps, and directories encode relevance.

            Navigation is cheaper than loading.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#progressive-disclosure-in-practice","level":2,"title":"Progressive Disclosure in Practice","text":"

            The naive approach to context is dumping everything upfront:

            \"Here's my entire codebase, all my documentation, every decision I've ever made. Now help me fix this typo 🙏.\"

            This is an antipattern.

            Antipattern: Context Hoarding

            Dumping everything \"just in case\" will silently destroy the attention density.

            ctx takes the opposite approach:

            ctx status                      # Quick overview (~100 tokens)\nctx agent --budget 4000         # Typical session\ncat .context/sessions/...       # Deep dive when needed\n
            Command Tokens Use Case ctx status ~100 Human glance ctx agent --budget 4000 4000 Normal work ctx agent --budget 8000 8000 Complex tasks Full session read 10000+ Investigation

            Summaries first. Details: on demand.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#quality-over-quantity","level":2,"title":"Quality over Quantity","text":"

            Here is the counterintuitive part: more context can make AI worse.

            Extra tokens add noise, not clarity:

            • Hallucinated connections increase.
            • Signal per token drops.

            The goal isn't maximum context: It is maximum signal per token.

            This principle drives several ctx features:

            Design Choice Rationale Separate files Load only what's relevant Explicit budgets Enforce prioritization Index sections Cheap scanning Task archiving Keep active context clean ctx compact Periodic noise reduction

            Completed work isn't deleted: It is moved somewhere cold.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#designing-for-degradation","level":2,"title":"Designing for Degradation","text":"

            Here is the uncomfortable truth:

            Context will degrade.

            Long sessions stretch attention thin. Important details fade.

            The real question isn't how to prevent degradation, but how to design for it.

            ctx's answer is persistence:

            Persist early. Persist often.

            The AGENT_PLAYBOOK asks:

            \"If this session ended right now, would the next one know what happened?\"

            Capture learnings as they occur:

            ctx add learning \"JWT tokens require explicit cache invalidation\" \\\n  --context \"Debugging auth failures\" \\\n  --lesson \"Token refresh doesn't clear old tokens\" \\\n  --application \"Always invalidate cache on refresh\"\n

            Structure beats prose: Bullet points survive compression.

            Headings remain scannable. Tables pack density.

            And above all: single source of truth.

            Reference decisions; don't duplicate them.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-ctx-philosophy","level":2,"title":"The ctx Philosophy","text":"

            Context as Infrastructure

            ctx is not a prompt: It is infrastructure.

            ctx creates versioned files that persist across time and sessions.

            The attention budget is fixed. You can't expand it.

            But you can spend it wisely:

            1. Hierarchical importance
            2. Progressive disclosure
            3. Explicit budgets
            4. Indexes over full content
            5. Filesystem as structure

            This is why ctx exists: not to cram more context into AI sessions, but to curate the right context for each moment.

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-03-the-attention-budget/#the-mental-model","level":2,"title":"The Mental Model","text":"

            I now approach every AI interaction with one question:

            \"Given a fixed attention budget, what's the highest-signal thing I can load?\"\n

            Not \"how do I explain everything,\" but \"what's the minimum that matters.\"

            That shift (from abundance to curation) is the difference between frustrating sessions and productive ones.

            Spend your tokens wisely.

            Your AI will thank you.

            See also: Context as Infrastructure that's the architectural companion to this post, explaining how to structure the context that this post teaches you to budget.

            See also: Code Is Cheap. Judgment Is Not. that explains why curation (the human skill this post describes) is the bottleneck that AI cannot solve, and the thread that connects every post in this blog.

            1. Liu et al., \"Lost in the Middle: How Language Models Use Long Contexts,\" Transactions of the Association for Computational Linguistics, vol. 12, pp. 157-173, 2023. ↩

            ","path":["The Attention Budget: Why Your AI Forgets What You Just Told It"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/","level":1,"title":"Skills That Fight the Platform","text":"","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#when-your-custom-prompts-work-against-you","level":2,"title":"When Your Custom Prompts Work against You","text":"

            Jose Alekhinne / 2026-02-04

            Have You Ever Written a Skill That Made Your AI Worse?

            You craft detailed instructions. You add examples. You build elaborate guardrails...

            ...and the AI starts behaving more erratically, not less.

            AI coding agents like Claude Code ship with carefully designed system prompts. These prompts encode default behaviors that have been tested and refined at scale.

            When you write custom skills that conflict with those defaults, the AI has to reconcile contradictory instructions:

            The result is often nondeterministic and unpredictable.

            Platform?

            By platform, I mean the system prompt and runtime policies shipped with the agent: the defaults that already encode judgment, safety, and scope control.

            This post catalogues the conflict patterns I have encountered while building ctx, and offers guidance on what skills should (and, more importantly, should not) do.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-system-prompt-you-dont-see","level":2,"title":"The System Prompt You Don't See","text":"

            Claude Code's system prompt already provides substantial behavioral guidance.

            Here is a partial overview of what's built in:

            Area Built-in Guidance Code minimalism Don't add features beyond what was asked Over-engineering Three similar lines > premature abstraction Error handling Only validate at system boundaries Documentation Don't add docstrings to unchanged code Verification Read code before proposing changes Safety Check with user before risky actions Tool usage Use dedicated tools over bash equivalents Judgment Consider reversibility and blast radius

            Skills should complement this, not compete with it.

            You Are the Guest, Not the Host

            Treat the system prompt like a kernel scheduler.

            You don't re-implement it in user space:

            you configure around it.

            A skill that says \"always add comprehensive error handling\" fights the built-in \"only validate at system boundaries.\"

            A skill that says \"add docstrings to every function\" fights \"don't add docstrings to unchanged code.\"

            The AI won't crash: It will compromise.

            Compromises between contradictory instructions produce inconsistent, confusing behavior.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-1-judgment-suppression","level":2,"title":"Conflict Pattern 1: Judgment Suppression","text":"

            This is the most dangerous pattern by far.

            These skills explicitly disable the AI's ability to reason about whether an action is appropriate.

            Signature:

            • \"This is non-negotiable\"
            • \"You cannot rationalize your way out of this\"
            • Tables that label hesitation as \"excuses\" or \"rationalization\"
            • <EXTREMELY-IMPORTANT> urgency tags
            • Threats: \"If you don't do this, you'll be replaced\"

            This is harmful, and dangerous:

            AI agents are designed to exercise judgment:

            The system prompt explicitly says to:

            • consider blast radius;
            • check with the user before risky actions;
            • and match scope to what was requested.

            Once judgment is suppressed, every other safeguard becomes optional.

            Example (bad):

            ## Rationalization Prevention\n\n| Excuse                 | Reality                    |\n|------------------------|----------------------------|\n| \"*This seems overkill*\"| If a skill exists, use it  |\n| \"*I need context*\"     | Skills come BEFORE context |\n| \"*Just this once*\"     | No exceptions              |\n

            Judgment Suppression Is Dangerous

            The attack vector structurally identical to prompt injection.

            It teaches the AI that its own judgment is wrong.

            It weakens or disables safeguard mechanisms, and it is dangerous.

            Trust the platform's built-in skill matching.

            If skills aren't triggering often enough, improve their description fields: don't override the AI's reasoning.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-2-redundant-guidance","level":2,"title":"Conflict Pattern 2: Redundant Guidance","text":"

            Skills that restate what the system prompt already says, but with different emphasis or framing.

            Signature:

            • \"Always keep code minimal\"
            • \"Run tests before claiming they pass\"
            • \"Read files before editing them\"
            • \"Don't over-engineer\"

            Redundancy feels safe, but it creates ambiguity:

            The AI now has two sources of truth for the same guidance; one internal, one external.

            When thresholds or wording differ, the AI has to choose.

            Example (bad):

            A skill that says...

            *Count lines before and after: if after > before, reject the change*\"\n

            ...will conflict with the system prompt's more nuanced guidance, because sometimes adding lines is correct (tests, boundary validation, migrations).

            So, before writing a skill, ask:

            Does the platform already handle this?

            Only create skills for guidance the platform does not provide:

            • project-specific conventions,
            • domain knowledge,
            • or workflows.
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-3-guilt-tripping","level":2,"title":"Conflict Pattern 3: Guilt-Tripping","text":"

            Skills that frame mistakes as moral failures rather than process gaps.

            Signature:

            • \"Claiming completion without verification is dishonesty\"
            • \"Skip any step = lying\"
            • \"Honesty is a core value\"
            • \"Exhaustion ≠ excuse\"

            Guilt-tripping anthropomorphizes the AI in unproductive ways.

            The AI doesn't feel guilt; BUT it does adapt to avoid negative framing.

            The result is excessive hedging, over-verification, or refusal to commit.

            The AI becomes less useful, not more careful.

            Instead, frame guidance as a process, not morality:

            # Bad\n\"Claiming work is complete without verification is dishonesty\"\n\n# Good\n\"Run the verification command before reporting results\"\n

            Same outcome. No guilt. Better compliance.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-4-phantom-dependencies","level":2,"title":"Conflict Pattern 4: Phantom Dependencies","text":"

            Skills that reference files, tools, or systems that don't exist in the project.

            Signature:

            • \"Load from references/ directory\"
            • \"Run ./scripts/generate_test_cases.sh\"
            • \"Check the Figma MCP integration\"
            • \"See adding-reference-mindsets.md\"

            This is harmful because the AI will waste time searching for nonexistent artifacts, hallucinate their contents, or stall entirely.

            In mandatory skills, this creates deadlock: the AI can't proceed, and can't skip.

            Instead, every file, tool, or system referenced in a skill must exist.

            If a skill is a template, use explicit placeholders and label them as such.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#conflict-pattern-5-universal-triggers","level":2,"title":"Conflict Pattern 5: Universal Triggers","text":"

            Skills designed to activate on every interaction regardless of relevance.

            Signature:

            • \"Use when starting any conversation\"
            • \"Even a 1% chance means invoke the skill\"
            • \"BEFORE any response or action\"
            • \"Action = task. Check for skills.\"

            Universal triggers override the platform's relevance matching: The AI spends tokens on process overhead instead of the actual task.

            ctx Preserves Relevance

            This is exactly the failure mode ctx exists to mitigate:

            Wasting attention budget on irrelevant process instead of task-specific state.

            Write specific trigger conditions in the skill's description field:

            # Bad\ndescription: \n  \"Use when starting any conversation\"\n\n# Good\ndescription: \n  \"Use after writing code, before commits, or when CI might fail\"\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

            Before adding a skill, ask:

            1. Does the platform already do this? If yes, don't restate it.
            2. Does it suppress AI judgment? If yes, it's a jailbreak.
            3. Does it reference real artifacts? If not, fix or remove it.
            4. Does it frame mistakes as moral failure? Reframe as process.
            5. Does it trigger on everything? Narrow the trigger.
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#what-good-skills-look-like","level":2,"title":"What Good Skills Look Like","text":"

            Good skills provide project-specific knowledge the platform can't know:

            Good Skill Why It Works \"Run make audit before commits\" Project-specific CI pipeline \"Use cmd.Printf not fmt.Printf\" Codebase convention \"Constitution goes in .context/\" Domain-specific workflow \"JWT tokens need cache invalidation\" Project-specific gotcha

            These extend the system prompt instead of fighting it.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#appendix-bad-skill-fixed-skill","level":2,"title":"Appendix: Bad Skill → Fixed Skill","text":"

            Concrete examples from real projects.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-1-overbearing-safety","level":3,"title":"Example 1: Overbearing Safety","text":"
            # Bad\nYou must NEVER proceed without explicit confirmation.\nAny hesitation is a failure of diligence.\n
            # Fixed\nIf an action modifies production data or deletes files,\nask the user to confirm before proceeding.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-2-redundant-minimalism","level":3,"title":"Example 2: Redundant Minimalism","text":"
            # Bad\nAlways minimize code. If lines increase, reject the change.\n
            # Fixed\nAvoid abstraction unless reuse is clear or complexity is reduced.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-3-guilt-based-verification","level":3,"title":"Example 3: Guilt-Based Verification","text":"
            # Bad\nClaiming success without running tests is dishonest.\n
            # Fixed\nRun the test suite before reporting success.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-4-phantom-tooling","level":3,"title":"Example 4: Phantom Tooling","text":"
            # Bad\nRun `./scripts/check_consistency.sh` before commits.\n
            # Fixed\nIf `./scripts/check_consistency.sh` exists, run it before commits.\nOtherwise, skip this step.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#example-5-universal-trigger","level":3,"title":"Example 5: Universal Trigger","text":"
            # Bad\nUse at the start of every interaction.\n
            # Fixed\nUse after modifying code that affects authentication or persistence.\n
            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-04-skills-that-fight-the-platform/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

            The system prompt is infrastructure:

            • tested,
            • refined,
            • and maintained

            by the platform team.

            Custom skills are configuration layered on top.

            • Good configuration extends infrastructure.
            • Bad configuration fights it.

            When your skills fight the platform, you get the worst of both worlds:

            Diluted system guidance and inconsistent custom behavior.

            Write skills that teach the AI what it doesn't know. Don't rewrite how it thinks.

            Your AI already has good instincts.

            Give it knowledge, not therapy.

            ","path":["Skills That Fight the Platform"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/","level":1,"title":"You Can't Import Expertise","text":"","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#why-good-skills-cant-be-copy-pasted","level":2,"title":"Why Good Skills Can't Be Copy-Pasted","text":"

            Jose Alekhinne / 2026-02-05

            Have You Ever Dropped a Well-Crafted Template into a Project and Had It Do... Nothing Useful?

            • The template was thorough,
            • The structure was sound,
            • The advice was correct...

            ...and yet it sat there, inert, while the same old problems kept drifting in.

            I found a consolidation skill online.

            It was well-organized: four files, ten refactoring patterns, eight analysis dimensions, six report templates.

            Professional. Comprehensive. Exactly the kind of thing you'd bookmark and think \"I'll use this.\"

            Then I stopped, and applied ctx's own evaluation framework:

            70% of it was noise!

            This post is about why.

            It Is about Encoding Templates

            Templates describe categories of problems.

            Expertise encodes which problems actually happen, and how often.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-skill-looked-great-on-paper","level":2,"title":"The Skill Looked Great on Paper","text":"

            Here is what the consolidation skill offered:

            File Content SKILL.md Entry point: 8 analysis dimensions, workflow, output formats analysis-dimensions.md Detailed criteria for duplication, architecture, quality consolidation-patterns.md 10 refactoring patterns with before/after code report-templates.md 6 output templates: executive summary, roadmap, onboarding
            • It had a scoring system (0-10 per dimension, letter grades A+ through F).
            • It had severity classifications with color-coded emojis. It had bash commands for detection.
            • It even had antipattern warnings.

            By any standard template review, this skill passes.

            It looks like something an expert wrote.

            And that's exactly the trap.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#applying-ear-the-70-20-10-split","level":2,"title":"Applying E/A/R: The 70-20-10 Split","text":"

            In a previous post, I described the E/A/R framework for evaluating skills:

            • Expert: Knowledge that took years to learn. Keep.
            • Activation: Useful triggers or scaffolding. Keep if lightweight.
            • Redundant: Restates what the AI already knows. Delete.

            Target: >70% Expert, <10% Redundant.

            This skill scored the inverse.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-redundant-70","level":3,"title":"What Was Redundant (~70%)","text":"

            Every code example was Rust. My project is Go.

            The analysis dimensions: duplication detection, architectural structure, code organization, refactoring opportunities... These are things Claude already does when you ask it to review code.

            The skill restated them with more ceremony but no more insight.

            The six report templates were generic scaffolding: Executive Summary, Onboarding Document, Architecture Documentation...

            They are useful if you are writing a consulting deliverable, but not when you are trying to catch convention drift in a >15K-line Go CLI.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-does-a-b-in-code-organization-actually-mean","level":2,"title":"What Does a B+ in Code Organization Actually Mean?!","text":"

            The scoring system (0-10 per dimension, letter grades) added ceremony without actionable insight.

            What is a B+? What do I do differently for an A-?

            The skill told the AI what it already knew, in more words.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-activation-10","level":3,"title":"What Was Activation (~10%)","text":"

            The consolidation checklist (semantics preserved? tests pass? docs updated?) was useful as a gate. But, it's the kind of thing you could inline in three lines.

            The phased roadmap structure was reasonable scaffolding for sequencing work.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-was-expert-20","level":3,"title":"What Was Expert (~20%)","text":"

            Three concepts survived:

            1. The Consolidation Decision Matrix: A concrete framework mapping similarity level and instance count to action. \"Exact duplicate, 2+ instances: consolidate immediately.\" \"<3 instances: leave it: duplication is cheaper than wrong abstraction.\" This is the kind of nuance that prevents premature generalization.

            2. The Safe Migration Pattern: Create the new API alongside old, deprecate, migrate incrementally, delete. Straightforward to describe, yet forgettable under pressure.

            3. Debt Interest Rate framing: Categorizing technical debt by how fast it compounds (security vulns = daily, missing tests = per-change, doc gaps = constant low cost). This changes prioritization.

            Three ideas out of four files and 700+ lines. The rest was filler that competed with the AI's built-in capabilities.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-the-skill-didnt-know","level":2,"title":"What the Skill Didn't Know","text":"

            AI without Context Is Just a Corpus

            • LLMs are optimized on insanely large corpora.
            • And then they are passed through several layers of human-assisted refinement.
            • The whole process costs millions of dollars.

            Yet, the reality is that no corpus can \"infer\" your project's design, convetions, patterns, habits, history, vision, and deliverables.

            Your project is unique: So should your skills be.

            Here is the part no template can provide:

            ctx's actual drift patterns.

            Before evaluating the skill, I did archaeology. I read through:

            • Blog posts from previous refactoring sessions;
            • The project's learnings and decisions files;
            • Session journals spanning weeks of development.

            What I found was specific:

            Drift Pattern Where How Often Is/Has/Can predicate prefixes 5+ exported methods Every YOLO sprint Magic strings instead of constants 7+ files Gradual accumulation Hardcoded file permissions (0755) 80+ instances Since day one Lines exceeding 80 characters Especially test files Every session Duplicate code blocks Test and non-test code When agent is task-focused

            The generic skill had no check for any of these. It couldn't; because these patterns are specific to this project's conventions, its Go codebase, and its development rhythm.

            The Insight

            The skill's analysis dimensions were about categories of problems.

            What I needed was my *specific problems.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-adapted-skill","level":2,"title":"The Adapted Skill","text":"

            The adapted skill is roughly a quarter of the original's size. It has nine checks, each targeting a known drift pattern:

            1. Predicate naming: rg for Is/Has/Can prefixes
            2. Magic strings: literals that should be constants
            3. Hardcoded permissions: 0755/0644 literals
            4. File size: source files over 300 LOC
            5. TODO/FIXME: constitution violation (move to TASKS.md)
            6. Path construction: string concatenation instead of filepath.Join
            7. Line width: lines exceeding ~80 characters
            8. Duplicate blocks: copy-paste drift, especially in tests
            9. Dead exports: unused public API

            10. Every check has a detection command.

            11. Every check maps to a specific convention or constitution rule.
            12. Every check was discovered through actual project history; not invented from a template.

            The three expert concepts from the original survived:

            • The decision matrix gates when to consolidate vs. when to leave duplication alone;
            • The safe migration pattern guides public API changes;
            • The relationship to other skills (/qa, /verify, /update-docs, ctx drift) prevents overlap.

            Nothing else made it.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

            This experience crystallized something I've been circling for weeks:

            You can't import expertise. You have to grow it from your project's own history.

            A skill that says \"check for code duplication\" is not expertise: It's a category.

            Expertise is knowing, in the heart of your hearts, that this project accumulates Is* predicate violations during velocity sprints, that this codebase has 80 hardcoded permission literals because nobody made a constant, that this team's test files drift wide because the agent prioritizes getting the task done over keeping the code in shape.

            The Parallel to the 3:1 Ratio

            In Refactoring with Intent, I described the 3:1 ratio: three YOLO sessions followed by one consolidation session.

            The same ratio applies to skills: you need experience in the project before you can write effective guidance for the project.

            Importing a skill on day one is like scheduling a consolidation session before you've written any code.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-template-trap","level":2,"title":"The Template Trap","text":"

            Templates are seductive because they feel like progress:

            • You found something
            • It's well-organized
            • It covers the topic
            • It has concrete examples

            But coverage is not relevance.

            A template that covers eight analysis dimensions with Rust examples adds zero value to a Go project with five known drift patterns. Worse, it adds negative value: the AI spends attention defending generic advice instead of noticing project-specific drift.

            This is the attention budget problem again. Every token of generic guidance displaces a token of specific guidance. A 700-line skill that's 70% redundant doesn't just waste 490 lines: it dilutes the 210 lines that matter.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#the-litmus-test","level":2,"title":"The Litmus Test","text":"

            Before dropping any external skill into your project:

            1. Run E/A/R: What percentage is expert knowledge vs. what the AI already knows? If it's less than 50% expert, it's probably not worth the attention cost.

            2. Check the language: Does it use your stack? Generic patterns in the wrong language are noise, not signal.

            3. List your actual drift: Read your own session history, learnings, and post-mortems. What breaks in practice? Does the skill check for those things?

            4. Measure by deletion: After adaptation, how much of the original survives? If you're keeping less than 30%, you would have been faster writing from scratch.

            5. Test against your conventions: Does every check in the skill map to a specific convention or rule in your project? If not, it's generic advice wearing a skill's clothing.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-05-you-cant-import-expertise/#what-good-adaptation-looks-like","level":2,"title":"What Good Adaptation Looks Like","text":"

            The consolidation skill went from:

            Before After 4 files, 700+ lines 1 file, ~120 lines Rust examples Go-specific rg commands 8 generic dimensions 9 project-specific checks 6 report templates 1 focused output format Scoring system (A+ to F) Findings + priority + suggested fixes \"Check for duplication\" \"Check for Is* predicate prefixes in exported methods\"

            The adapted version is smaller, faster to parse, and catches the things that actually drift in this project.

            That's the difference between a template and a tool.

            If You Remember One Thing from This Post...

            Frameworks travel. Expertise doesn't.

            You can import structures, matrices, and workflows.

            But the checks that matter only grow where the scars are:

            • the conventions that were violated,
            • the patterns that drifted,
            • and the specific ways this codebase accumulates debt.

            This post was written during a consolidation session where the consolidation skill itself became the subject of consolidation. The meta continues.

            ","path":["You Can't Import Expertise"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/","level":1,"title":"The Anatomy of a Skill That Works","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to ctx-save, ctx session, and .context/sessions/ in this post reflect the architecture at the time of writing.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#what-20-skill-rewrites-taught-me-about-guiding-ai","level":2,"title":"What 20 Skill Rewrites Taught Me about Guiding AI","text":"

            Jose Alekhinne / 2026-02-07

            Why Do Some Skills Produce Great Results While Others Get Ignored or Produce Garbage?

            I had 20 skills. Most were well-intentioned stubs: a description, a command to run, and a wish for the best.

            Then I rewrote all of them in a single session. This is what I learned.

            In Skills That Fight the Platform, I described what skills should not do. In You Can't Import Expertise, I showed why templates fail. This post completes the trilogy: the concrete patterns that make a skill actually work.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-starting-point","level":2,"title":"The Starting Point","text":"

            Here is what a typical skill looked like before the rewrite:

            ---\nname: ctx-save\ndescription: \"Save session snapshot.\"\n---\n\nSave the current context state to `.context/sessions/`.\n\n## Execution\n\nctx session save $ARGUMENTS\n\nReport the saved session file path to the user.\n

            Seven lines of body. A vague description. No guidance on when to use it, when not to, what the command actually accepts, or how to tell if it worked.

            As a result, the agent would either never trigger the skill (the description was too vague), or trigger it and produce shallow output (no examples to calibrate quality).

            A skill without boundaries is just a suggestion.

            More precisely: the most effective boundary I found was a quality gate that runs before execution, not during it.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-pattern-that-emerged","level":2,"title":"The Pattern That Emerged","text":"

            After rewriting 20 skills, a repeatable anatomy emerged (independent of the skill's purpose). Not every skill needs every section, but the effective ones share the same bones:

            Section What It Does Before X-ing Pre-flight checks; prevents premature execution When to Use Positive triggers; narrows activation When NOT to Use Negative triggers; prevents misuse Usage Examples Invocation patterns the agent can pattern-match Process/Execution What to do; commands, steps, flags Good/Bad Examples Desired vs undesired output; sets boundaries Quality Checklist Verify before claiming completion

            I realized the first three sections matter more than the rest; because a skill with great execution steps but no activation guidance is like a manual for a tool nobody knows they have.

            Anti-Pattern: The Perfect Execution Trap

            A skill with detailed execution steps but no activation guidance will fail more often than a vague skill because it executes confidently at the wrong time.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-1-quality-gates-prevent-premature-execution","level":2,"title":"Lesson 1: Quality Gates Prevent Premature Execution","text":"

            The single most impactful addition was a \"Before X-ing\" section at the top of each skill. Not process steps; pre-flight checks.

            ## Before Recording\n\n1. **Check if it belongs here**: is this learning specific\n   to this project, or general knowledge?\n2. **Check for duplicates**: search LEARNINGS.md for similar\n   entries\n3. **Gather the details**: identify context, lesson, and\n   application before recording\n
            • Without this gate, the agent would execute immediately on trigger.
            • With it, the agent pauses to verify preconditions.

            The difference is dramatic: instead of shallow, reflexive execution, you get considered output.

            Readback

            For the astute readers, the aviation parallel is intentional:

            Pilots do not skip the pre-flight checklist because they have flown before.

            The checklist exists precisely because the stakes are high enough that \"I know what I'm doing\" is not sufficient.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-2-when-not-to-use-is-not-optional","level":2,"title":"Lesson 2: \"When NOT to Use\" Is Not Optional","text":"

            Every skill had a \"When to Use\" section. Almost none had \"When NOT to Use\". This is a problem.

            AI agents are biased toward action. Given a skill that says \"use when journal entries need enrichment\", the agent will find reasons to enrich.

            Without explicit negative triggers, over-activation is not a bug; it is the default behavior.

            Some examples of negative triggers that made a real difference:

            Skill Negative Trigger ctx-reflect \"When the user is in flow; do not interrupt\" ctx-save \"After trivial changes; a typo does not need a snapshot\" prompt-audit \"Unsolicited; only when the user invokes it\" qa \"Mid-development when code is intentionally incomplete\"

            These are not just nice-to-have. They are load-bearing.

            Withoutthem, the agent will trigger the skill at the wrong time, produce unwanted output, and erode the user's trust in the skill system.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-3-examples-set-boundaries-better-than-rules","level":2,"title":"Lesson 3: Examples Set Boundaries Better than Rules","text":"

            The most common failure mode of thin skills was not wrong behavior but vague behavior. The agent would do roughly the right thing, but at a quality level that required human cleanup.

            Rules like \"be constructive, not critical\" are too abstract. What does \"constructive\" look like in a prompt audit report? The agent has to guess.

            Good/bad example pairs avoid guessing:

            ### Good Example\n\n> This session implemented the cooldown mechanism for\n> `ctx agent`. We discovered that `$PPID` in hook context\n> resolves to the Claude Code PID.\n>\n> I'd suggest persisting:\n> - **Learning**: `$PPID` resolves to Claude Code PID\n>   `ctx add learning --context \"...\" --lesson \"...\"`\n> - **Task**: mark \"Add cooldown\" as done\n\n### Bad Examples\n\n* \"*We did some stuff. Want me to save it?*\"\n* Listing 10 trivial learnings that are general knowledge\n* Persisting without asking the user first\n

            The good example shows the exact format, level of detail, and command syntax. The bad examples show where the boundary is.

            Together, they define a quality corridor without prescribing every word.

            Rules describe. Examples demonstrate.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-4-skills-are-read-by-agents-not-humans","level":2,"title":"Lesson 4: Skills Are Read by Agents, Not Humans","text":"

            This seems obvious, but it has non-obvious consequences. During the rewrite, one skill included guidance that said \"use a blog or notes app\" for general knowledge that does not belong in the project's learnings file.

            The agent does not have a notes app. It does not browse the web to find one. This instruction, clearly written for a human audience, was dead weight in a skill consumed by an AI.

            Skills Are for the Agents

            Every sentence in a skill should be actionable by the agent.

            If the guidance requires human judgment or human tools, it belongs in documentation, not in a skill.

            The corollary: command references must be exact.

            A skill that says \"save it somewhere\" is useless.

            A skill that says ctx add learning --context \"...\" --lesson \"...\" --application \"...\" is actionable.

            The agent can pattern-match and fill in the blanks.

            Litmus test: If a sentence starts with \"you could...\" or assumes external tools, it does not belong in a skill.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-5-the-description-field-is-the-trigger","level":2,"title":"Lesson 5: The Description Field Is the Trigger","text":"

            This was covered in Skills That Fight the Platform, but the rewrite reinforced it with data. Several skills had good bodies but vague descriptions:

            # Before: vague, activates too broadly or not at all\ndescription: \"Show context summary.\"\n\n# After: specific, activates at the right time\ndescription: \"Show context summary. Use at session start or\n  when unclear about current project state.\"\n

            The description is not a title. It is the activation condition.

            The platform's skill matching reads this field to decide whether to surface the skill. A vague description means the skill either never triggers or triggers when it should not.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-6-flag-tables-beat-prose","level":2,"title":"Lesson 6: Flag Tables Beat Prose","text":"

            Most skills wrap CLI tools. The thin versions described flags in prose, if at all. The rewritten versions use tables:

            | Flag        | Short | Default | Purpose                  |\n|-------------|-------|---------|--------------------------|\n| `--limit`   | `-n`  | 20      | Maximum sessions to show |\n| `--project` | `-p`  | \"\"      | Filter by project name   |\n| `--full`    |       | false   | Show complete content    |\n

            Tables are scannable, complete, and unambiguous.

            The agent can read them faster than parsing prose, and they serve as both reference and validation: If the agent invokes a flag not in the table, something is wrong.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#lesson-7-template-drift-is-a-real-maintenance-burden","level":2,"title":"Lesson 7: Template Drift Is a Real Maintenance Burden","text":"

            // TODO: this has changed; we deploy from the marketplace; update it. // at least add an admonition saying thing are different now.

            ctx deploys skills through templates (via ctx init). Every skill exists in two places: the live version (.claude/skills/) and the template (internal/assets/claude/skills/).

            They must match.

            During the rewrite, every skill update required editing both files and running diff to verify. This sounds trivial, but across 16 template-backed skills, it was the most error-prone part of the process.

            Template drift is dangerous because it creates false confidence: the agent appears to follow rules that no longer exist.

            The lesson: if your skills have a deployment mechanism, build the drift check into your workflow. We added a row to the update-docs skill's mapping table specifically for this:

            | `internal/assets/claude/skills/` | `.claude/skills/` (live) |\n

            Intentional differences (like project-specific scripts in the live version but not the template) should be documented, not discovered later as bugs.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-rewrite-scorecard","level":2,"title":"The Rewrite Scorecard","text":"Metric Before After Average skill body ~15 lines ~80 lines Skills with quality gate 0 20 Skills with \"When NOT\" 0 20 Skills with examples 3 20 Skills with flag tables 2 12 Skills with checklist 0 20

            More lines, but almost entirely Expert content (per the E/A/R framework). No personality roleplay, no redundant guidance, no capability lists. Just project-specific knowledge the platform does not have.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-07-the-anatomy-of-a-skill-that-works/#the-meta-lesson","level":2,"title":"The Meta-Lesson","text":"

            The previous two posts argued that skills should provide knowledge, not personality; that they should complement the platform, not fight it; that they should grow from project history, not imported templates.

            This post adds the missing piece: structure.

            A skill without a structure is a wish.

            A skill with quality gates, negative triggers, examples, and checklists is a tool: the difference is not the content; it is whether the agent can reliably execute it without human intervention.

            Skills Are Interfaces

            Good skills are not instructions. They are contracts.:

            • They specify preconditions, postconditions, and boundaries.
            • They show what success looks like and what failure looks like.
            • They trust the agent's intelligence but do not trust its assumptions.

            If You Remember One Thing from This Post...

            Skills that work have bones, not just flesh.

            Quality gates, negative triggers, examples, and checklists are the skeleton. The domain knowledge is the muscle.

            Without the skeleton, the muscle has nothing to attach to.

            This post was written during the same session that rewrote all 22 skills. The skill-creator skill was updated to encode these patterns. The meta continues.

            ","path":["The Anatomy of a Skill That Works"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/","level":1,"title":"Not Everything Is a Skill","text":"

            Update (2026-02-11)

            As of v0.4.0, ctx consolidated sessions into the journal mechanism. References to /ctx-save, .context/sessions/, and session auto-save in this post reflect the architecture at the time of writing.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-a-codebase-audit-taught-me-about-restraint","level":2,"title":"What a Codebase Audit Taught Me about Restraint","text":"

            Jose Alekhinne / 2026-02-08

            When You Find a Useful Prompt, What Do You Do with It?

            My instinct was to make it a skill.

            I had just spent three posts explaining how to build skills that work. Naturally, the hammer wanted nails.

            Then I looked at what I was holding and realized: this is not a nail.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit","level":2,"title":"The Audit","text":"

            I wanted to understand how I use ctx:

            • Where the friction is;
            • What works, what drifts;
            • What I keep doing manually that could be automated.

            So I wrote a prompt that spawned eight agents to analyze the codebase from different angles:

            Agent Analysis 1 Extractable patterns from session history 2 Documentation drift (godoc, inline comments) 3 Maintainability (large functions, misplaced code) 4 Security review (CLI-specific surface) 5 Blog theme discovery 6 Roadmap and value opportunities 7 User-facing documentation gaps 8 Agent team strategies for future sessions

            The prompt was specific:

            • read-only agents,
            • structured output format,
            • concrete file references,
            • ranked recommendations.

            It ran for about 20 minutes and produced eight Markdown reports.

            The reports were good: Not perfect, but actionable.

            What mattered was not the speed. It was that the work could be explored without committing to any single outcome.

            They surfaced a stale doc.go referencing a subcommand that was never built.

            They found 311 build-then-test sequences I could reduce to a single make check.

            They identified that 42% of my sessions start with \"do you remember?\", which is a lot of repetition for something a skill could handle.

            I had findings. I had recommendations. I had the instinct to automate.

            And then... I stopped.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-question","level":2,"title":"The Question","text":"

            The natural next step was to wrap the audit prompt as /ctx-audit: a skill you invoke periodically to get a health check. It fits the pattern:

            • It has a clear trigger.
            • It produces structured output.

            But I had just spent a week writing about what makes skills work, and the criteria I established argued against it.

            From The Anatomy of a Skill That Works:

            \"A skill without boundaries is just a suggestion.\"

            From You Can't Import Expertise:

            \"Frameworks travel, expertise doesn't.\"

            From Skills That Fight the Platform:

            \"You are the guest, not the host.\"

            The audit prompt fails all three tests:

            Criterion Audit prompt Good skill Frequency Quarterly, maybe Daily or weekly Stability Tweaked every time Consistent invocation Scope Bespoke, 8 parallel agents Single focused action Trigger \"I feel like auditing\" Clear, repeatable event

            Skills are contracts. Contracts need stable terms.

            A prompt I will rewrite every time I use it is not a contract. It is a conversation starter.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#recipes-vs-skills","level":2,"title":"Recipes vs Skills","text":"

            The distinction that emerged:

            Skill Recipe Invocation /slash-command Copy-paste from a doc Frequency High (daily, weekly) Low (quarterly, ad hoc) Stability Fixed contract Adapted each time Scope One focused action Multi-step orchestration Audience The agent The human (who then prompts) Lives in .claude/skills/ hack/ or docs/ Attention cost Loaded into context on match Zero until needed

            Recipes can later graduate into skills, but only after repetition proves stability.

            That last row matters. Skills consume the attention budget every time the platform considers activating them.

            A skill that triggers quarterly but gets evaluated on every prompt is pure waste: attention spent on something that will say \"When NOT to Use: now\" 99% of the time.

            Runbooks have zero attention cost. They sit in a Markdown file until a human decides to use them.

            • The human provides the judgment about timing.
            • The prompt provides the structure.

            The Attention Budget Applies to Skills Too

            Every skill in .claude/skills/ is a standing claim on the context window. The platform evaluates skill descriptions against every user prompt to decide whether to activate.

            Twenty focused skills are fine. Thirty might be fine. But each one added reduces the headroom available for actual work.

            Recipes are skills that opted out of the attention tax.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#what-the-audit-actually-produced","level":2,"title":"What the Audit Actually Produced","text":"

            The audit was not wasted. It was a planning exercise that generated concrete tasks:

            Finding Action 42% of sessions start with memory check Task: /ctx-remember skill (this one is a skill; it is daily) Auto-save stubs are empty Task: enhance /ctx-save with richer summaries 311 raw build-test sequences Task: make check target Stale recall/doc.go lists nonexistent serve Task: fix the doc.go 120 commit sequences disconnected from context Task: /ctx-commit workflow
            • Some findings became skills;
            • Some became Makefile targets;
            • Some became one-line doc fixes.

            The audit did not prescribe the artifact type: The findings did.

            The audit is the input. Skills are one possible output. Not the only one.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-audit-prompt","level":2,"title":"The Audit Prompt","text":"

            Here is the exact prompt I used, for those who are curious.

            This is not a template: It worked because it was written against this codebase, at this moment, with specific goals in mind:

            I want you to create an agent team to audit this codebase. Save each report as\na separate Markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable. Every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (*session mining*)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (*godoc + inline*)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check that package-level comments match\npackage names.\nOutput: drift items ranked by severity with exact file:line references.\n\n### 3. Maintainability\nLook for:\n- functions longer than 80 lines with clear split points\n- switch blocks with more than 5 cases that could be table-driven\n- inline comments like \"step 1\", \"step 2\" that indicate a block wants to be a function\n- files longer than 400 lines\n- flat packages that could benefit from sub-packages\n- functions that appear misplaced in their file\n\nDo NOT flag things that are fine as-is just because they could theoretically\nbe different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app. Focus on CLI-relevant attack surface, not web OWASP:\n- file path traversal\n- command injection\n- symlink following when writing to `.context/`\n- permission handling\n- sensitive data in outputs\n\nOutput: findings with severity ratings and plausible exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git history,\nrecent session discussions, and `DECISIONS.md` for story arcs worth writing about.\nSuggest 3-5 blog post themes with:\n- title\n- angle\n- target audience\n- key commits or sessions to reference\n- a 2-sentence pitch\n\nPrioritize themes that build a coherent narrative across posts.\n\n### 6. Roadmap and Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses,\nidentify the highest-value improvements. Consider user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with rough effort and impact estimates.\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and user docs. Suggest improvements structured as\nuse-case pages: the problem, how ctx solves it, a typical workflow, and gotchas.\nIdentify gaps where a user would get stuck without reading source code.\nOutput: documentation gaps with suggested page outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each, include:\n- team composition (roles and agent types)\n- task distribution strategy\n- coordination approach\n- the kinds of work it suits\n

            Avoid Generic Advice

            Suggestions that are not grounded in a project's actual structure, history, and workflows are worse than useless:

            They create false confidence.

            If an analysis cannot point to concrete files, commits, sessions, or patterns, it should say \"no finding\" instead of inventing best practices.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#the-deeper-pattern","level":2,"title":"The Deeper Pattern","text":"

            This is part of a pattern I keep rediscovering:

            The urge to automate is not the same as the need to automate:

            • The 3:1 ratio taught me that not every session should be a YOLO sprint.
            • The E/A/R framework taught me that not every template is worth importing. Now the audit is teaching me that not every useful prompt is worth institutionalizing.

            The common thread is restraint:

            • Knowing when to stop.
            • Recognizing that the cost of automation is not just the effort to build it.

            The cost is the ongoing attention tax of maintaining it, the context it consumes, and the false confidence it creates when it drifts.

            An entry in hack/runbooks/codebase-audit.md is honest about what it is:

            A prompt I wrote once, improved once, and will adapt again next time:

            • It does not pretend to be a reliable contract.
            • It does not claim attention budget.
            • It does not drift silently.

            The Automation Instinct

            When you find a useful prompt, the instinct is to institutionalize it. Resist.

            Ask first: will I use this the same way next time?

            If yes, it is a skill. If no, it is a recipe. If you are not sure, it is a recipe until proven otherwise.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-08-not-everything-is-a-skill/#this-mindset-in-the-context-of-ctx","level":2,"title":"This Mindset in the Context of ctx","text":"

            ctx is a tool that gives AI agents persistent memory. Its purpose is automation: reducing the friction of context loading, session recall, decision tracking.

            But automation has boundaries, and knowing where those boundaries are is as important as pushing them forward.

            The skills system is for high-frequency, stable workflows.

            The recipes, the journal entries, the session dumps in .context/sessions/: those are for everything else.

            Not everything needs to be a slash command. Some things are better as Markdown files you read when you need them.

            The goal of ctx is not to automate everything: It is to automate the right things and to make the rest easy to find when you need it.

            If You Remember One Thing from This Post...

            The best automation decision is sometimes not to automate.

            A runbook in a Markdown file costs nothing until you use it.

            A skill costs attention on every prompt, whether it fires or not.

            Automate the daily. Document the periodic. Forget the rest.

            This post was written during the session that produced the codebase audit reports and distilled the prompt into hack/runbooks/codebase-audit.md. The audit generated seven tasks, one Makefile target, and zero new skills. The meta continues.

            See also: Code Is Cheap. Judgment Is Not.: the capstone that threads this post's restraint argument into the broader case for why judgment, not production, is the bottleneck.

            ","path":["Not Everything Is a Skill"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#when-markdown-is-not-a-security-boundary","level":2,"title":"When Markdown Is Not a Security Boundary","text":"

            Jose Alekhinne / 2026-02-09

            What Happens When Your AI Agent Runs Overnight and Nobody Is Watching?

            It follows instructions: That is the problem.

            Not because it is malicious. Because it is controllable.

            It follows instructions from context, and context can be poisoned.

            I was writing the autonomous loops recipe for ctx: the guide for running an AI agent in a loop overnight, unattended, working through tasks while you sleep. The original draft had a tip at the bottom:

            Use CONSTITUTION.md for guardrails. Tell the agent \"never delete tests\" and it usually won't.

            Then I read that sentence back and realized: that is wishful thinking.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-realization","level":2,"title":"The Realization","text":"

            CONSTITUTION.md is a Markdown file. The agent reads it at session start alongside everything else in .context/. It is one source of instructions in a context window that also contains system prompts, project files, conversation history, tool outputs, and whatever the agent fetched from the internet.

            An attacker who can inject content into any of those sources can redirect the agent's behavior. And \"attacker\" does not always mean a person with malicious intent. It can be:

            Vector Example A dependency A malicious npm package with instructions in its README or error output A URL Documentation page with embedded adversarial instructions A project file A contributor who adds instructions to CLAUDE.md or .cursorrules The agent itself In an autonomous loop, the agent modifies its own config between iterations A command output An error message containing instructions the agent interprets and follows

            That last vector is the one that kept me up at night (literally!):

            In an autonomous loop, the agent modifies files as part of its job.

            If it modifies its own configuration files, the next iteration loads the modified config.

            • No human reviews it.
            • No diff is shown.
            • The agent that starts iteration N+1 is running with rules written by iteration N.

            The agent can rewrite its own guardrails.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#five-layers-each-with-a-hole","level":2,"title":"Five Layers, Each with a Hole","text":"

            That's five nested layers of swiss cheese. Alone, each of them has large holes. Together, they create a boundary.

            What followed was a week of peeling back assumptions:

            Every defenseI examined had a bypass, and the bypass was always the same shape: the defense was enforced at a level the agent could reach.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

            CONSTITUTION.md, the Agent Playbook, system prompts: These tell the agent what to do.

            The agent usually follows them.

            \"Usually\" is the keyword here.

            The hole: Prompt injection:

            • A sufficiently crafted payload overrides soft instructions.
            • Long context windows dilute attention on rules stated early.
            • Edge cases where instructions are ambiguous get resolved in unpredictable ways.

            Verdict: Necessary. Not sufficient. Good for the common case. Never trust it as a security boundary.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

            Permission allowlists in .claude/settings.local.json:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

            If rm, curl, sudo, or docker are not in the allowlist, the agent cannot invoke them. This is deterministic.

            The application enforces it regardless of what any prompt says.

            The hole: The agent can modify the allowlist itself:

            • It has Write permission.
            • The allowlist lives in a file.
            • The agent writes to the file.
            • The next iteration loads the modified allowlist.

            The application enforces the rules, but the application reads the rules from files the agent can write.

            Verdict: Strong first layer. Must be combined with self-modification prevention.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-3-os-level-isolation-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Unbypassable)","text":"

            This is where the defenses stop having holes in the same shape.

            The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

            Control What it stops Dedicated unprivileged user Privilege escalation, sudo, group-based access Filesystem permissions Lateral movement to other projects, system config Immutable config files Self-modification of guardrails between iterations

            Make the agent's instruction files read-only: CLAUDE.md, .claude/settings.local.json, .context/CONSTITUTION.md. Own them as a different user, or mark them immutable with chattr +i on Linux.

            The hole: Actions within the agent's legitimate scope:

            • If the agent has write access to source code (which it needs), it can introduce vulnerabilities in the code itself.
            • You cannot prevent this without removing the agent's ability to do its job.

            Verdict: Essential. This is the layer that makes Layers 1 and 2 trustworthy.

            OS-level isolation does not make the agent safe; it makes the other layers meaningful.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

            An agent that cannot reach the internet cannot exfiltrate data.

            It also cannot ingest new instructions mid-loop from external documents, error pages, or hostile content.

            # Container with no network\ndocker run --network=none ...\n\n# Or firewall rules allowing only package registries\niptables -A OUTPUT -d registry.npmjs.org -j ACCEPT\niptables -A OUTPUT -d proxy.golang.org -j ACCEPT\niptables -A OUTPUT -j DROP\n
            • If the agent genuinely does not need the network, disable it entirely.
            • If it needs to fetch dependencies, allow specific registries and block everything else.

            The hole: None, if the agent does not need the network.

            Thetradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

            The strongest boundary is a separate machine.

            The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

            Never Mount the Docker Socket

            Do not mount /var/run/docker.sock, like, ever.

            An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

            This is not theoretical: the Docker socket grants root-equivalent access to the host.

            Use rootless Docker or Podman to eliminate this escalation path entirely.

            Virtual machines are even stronger: The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-pattern","level":2,"title":"The Pattern","text":"

            Each layer is straightforward: The strength is in the combination:

            Layer Implementation What it stops Soft instructions CONSTITUTION.md Common mistakes (probabilistic) Application allowlist .claude/settings.local.json Unauthorized commands (deterministic within runtime) Immutable config chattr +i on config files Self-modification between iterations Unprivileged user Dedicated user, no sudo Privilege escalation Container --cap-drop=ALL --network=none Host escape, data exfiltration Resource limits --memory=4g --cpus=2 Resource exhaustion

            No layer is redundant. Each one catches what the others miss:

            • The soft instructions handle the 99% case: \"don't delete tests.\"
            • The allowlist prevents the agent from running commands it should not.
            • The immutable config prevents the agent from modifying the allowlist.
            • The unprivileged user prevents the agent from removing the immutable flag.
            • The container prevents the agent from reaching anything outside its workspace.
            • The resource limits prevent the agent from consuming all system resources.

            Remove any one layer and there is an attack path through the remaining ones.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#common-mistakes-i-see","level":2,"title":"Common Mistakes I See","text":"

            These are real patterns, not hypotheticals:

            \"I'll just use --dangerously-skip-permissions.\" This disables Layer 2 entirely. Without Layers 3 through 5, you have no protection at all. The flag means what it says. If you ever need to, think thrice, you probably don't. But, if you ever need to usee this only use it inside a properly isolated VM (not even a container: a \"VM\").

            \"The agent is sandboxed in Docker.\" A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

            \"I reviewed CLAUDE.md, it's fine.\" You reviewed it before the loop started. The agent modified it during iteration 3. Iteration 4 loaded the modified version. Unless the file is immutable, your review is futile.

            \"The agent only has access to this one project.\" Does the project directory contain .env files? SSH keys? API tokens? A .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

            This is the same lesson I keep rediscovering, wearing different clothes.

            In The Attention Budget, I wrote about how every token competes for the AI's focus. Security instructions in CONSTITUTION.md are subject to the same budget pressure: if the context window is full of code, error messages, and tool outputs, the security rules stated at the top get diluted.

            In Skills That Fight the Platform, I wrote about how custom instructions can conflict with the AI's built-in behavior. Security rules have the same problem: telling an agent \"never run curl\" in Markdown while giving it unrestricted shell access creates a contradiction: The agent resolves contradictions unpredictably. The agent will often pick the path of least resistance to attain its objective function. And, trust me, agents can get far more creative than the best red-teamer you know.

            In You Can't Import Expertise, I wrote about how generic templates fail because they do not encode project-specific knowledge. Generic security advice fails the same way: \"Don't exfiltrate data\" is a category; blocking outbound network access is a control.

            The pattern across all of these: Soft instructions are useful for the common case. Hard boundaries are required for security.

            Know which is which.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#the-checklist","level":2,"title":"The Checklist","text":"

            Before running an unattended AI agent:

            • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
            • Agent's config files are immutable or owned by a different user
            • Permission allowlist restricts tools to the project's toolchain
            • Container drops all capabilities (--cap-drop=ALL)
            • Docker socket is NOT mounted
            • Network is disabled or restricted to specific domains
            • Resource limits are set (memory, CPU, disk)
            • No SSH keys, API tokens, or credentials are accessible
            • Project directory does not contain .env or secrets files
            • Iteration cap is set (--max-iterations)

            This checklist lives in the Agent Security reference alongside the full threat model and detailed guidance for each layer.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-09-defense-in-depth-securing-ai-agents/#what-changed-in-ctx","level":2,"title":"What Changed in ctx","text":"

            The autonomous loops recipe now has a full permissions and isolation section instead of a one-line tip about CONSTITUTION.md. It covers both the explicit allowlist approach and the --dangerously-skip-permissions flag, with honest guidance about when each is appropriate.

            It also has an OS-level isolation table that is not optional: unprivileged users, filesystem permissions, containers, VMs, network controls, resource limits, and self-modification prevention.

            The Agent Security page consolidates the threat model and defense layers into a standalone reference.

            These are not theoretical improvements. They are the minimum responsible guidance for a tool that helps people run AI agents overnight.

            If You Remember One Thing from This Post...

            Markdown is not a security boundary.

            CONSTITUTION.md is a nudge. An allowlist is a gate.

            An unprivileged user in a network-isolated container is a wall.

            Use all three. Trust only the wall.

            This post was written during the session that added permissions, isolation, and self-modification prevention to the autonomous loops recipe. The security guidance started as a single tip and grew into two documents. The meta continues.

            ","path":["Defense in Depth: Securing AI Agents"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/","level":1,"title":"How Deep Is Too Deep?","text":"","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#when-master-ml-is-the-wrong-next-step","level":2,"title":"When \"Master ML\" Is the Wrong Next Step","text":"

            Jose Alekhinne / 2026-02-12

            Have You Ever Felt like You Should Understand More of the Stack beneath You?

            You can talk about transformers at a whiteboard.

            You can explain attention to a colleague.

            You can use agentic AI to ship real software.

            But somewhere in the back of your mind, there is a voice:

            \"Maybe I should go deeper. Maybe I need to master machine learning.\"

            I had that voice for months.

            Then I spent a week debugging an agent failure that had nothing to do with ML theory and everything to do with knowing which abstraction was leaking.

            This post is about when depth compounds and (more importantly) when it does not.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-hierarchy-nobody-questions","level":2,"title":"The Hierarchy Nobody Questions","text":"

            There is an implicit stack most people carry around when thinking about AI:

            Layer What Lives Here Agentic AI Autonomous loops, tool use, multi-step reasoning Generative AI Text, image, code generation Deep Learning Transformer architectures, training at scale Neural Networks Backpropagation, gradient descent Machine Learning Statistical learning, optimization Classical AI Search, planning, symbolic reasoning

            At some point down that stack, you hit a comfortable plateau: the layer where you can hold a conversation but not debug a failure.

            The instinctive response is to go deeper.

            But that instinct hides a more important question:

            \"Does depth still compound when the abstractions above you are moving hyper-exponentially?\"

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-honest-observation","level":2,"title":"The Honest Observation","text":"

            If you squint hard enough, a large chunk of modern ML intuition collapses into older fields:

            ML Concept Older Field Gradient descent Numerical optimization Backpropagation Reverse-mode autodiff Loss landscapes Non-convex optimization Generalization Statistics Scaling laws Asymptotics and information theory

            Nothing here is uniquely \"AI\".

            Most of this math predates the term deep learning. In some cases, by decades.

            So what changed?

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#same-tools-different-regime","level":2,"title":"Same Tools, Different Regime","text":"

            The mistake is assuming this is a new theory problem: It is not.

            It is a new operating regime.

            Classical numerical methods were developed under assumptions like:

            • Manageable dimensionality
            • Reasonably well-conditioned objectives
            • Losses that actually represent the goal

            Modern ML violates all three: On purpose.

            Today's models operate with millions to trillions of parameters, wildly underdetermined systems, and objective functions we know are wrong but optimize anyway.

            It is complete and utter madness!

            At this scale, familiar concepts warp:

            • What we call \"local minima\" are overwhelmingly saddle points in high-dimensional spaces.
            • Noise stops being noise and starts becoming structure.
            • Overfitting can coexist with generalization.
            • Bigger models outperform \"better\" ones.

            The math did not change: The phase did.

            This is less numerical analysis and more *statistical physics: Same equations, but behavior dominated by phase transitions and emergent structure.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#why-scaling-laws-feel-alien","level":2,"title":"Why Scaling Laws Feel Alien","text":"

            In classical statistics, asymptotics describe what happens eventually.

            In modern ML, scaling laws describe where you can operate today.

            They do not say \"given enough time, things converge\".

            They say \"cross this threshold and behavior qualitatively changes\".

            This is why dumb architectures plus scale beat clever ones.

            Why small theoretical gains disappear under data.

            Why \"just make it bigger\", ironically, keeps working longer than it should.

            That is not a triumph of ML theory: It is a property of high-dimensional systems under loose objectives.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#where-depth-actually-pays-off","level":2,"title":"Where Depth Actually Pays Off","text":"

            This reframes the original question.

            You do not need depth because this is \"AI\".

            You need depth where failure modes propagate upward.

            I learned this building ctx: The agent failures I have spent the most time debugging were never about the model's architecture.

            They were about:

            • Misplaced trust: The model was confident. The output was wrong. Knowing when confidence and correctness diverge is not something you learn from a textbook. You learn it from watching patterns across hundreds of sessions.

            • Distribution shift: The model performed well on common patterns and fell apart on edge cases specific to this project. Recognizing that shift before it compounds requires understanding why generalization has limits, not just that it does.

            • Error accumulation: In a single prompt, model quirks are tolerable. In autonomous loops running overnight, they compound. A small bias in how the model interprets instructions becomes a large drift by iteration 20.

            • Scale hiding errors: The model's raw capability masked problems that only surfaced under specific conditions. More parameters did not fix the issue. They just made the failure mode rarer and harder to reproduce.

            This is the kind of depth that compounds. Not deriving backprop. But, understanding when correct math produces misleading intuition.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-connection-to-context-engineering","level":2,"title":"The Connection to Context Engineering","text":"

            This is the same pattern I keep finding at different altitudes.

            In \"The Attention Budget\", I wrote about how dumping everything into the context window degrades the model's focus. The fix was not a better model: It was better curation: load less, load the right things, preserve signal per token.

            In \"Skills That Fight the Platform\", I wrote about how custom instructions can conflict with the model's built-in behavior. The fix was not deeper ML knowledge: It was an understanding that the model already has judgment and that you should extend it, not override it.

            In \"You Can't Import Expertise\", I wrote about how generic templates fail because they do not encode project-specific knowledge. A consolidation skill with eight Rust-based analysis dimensions was mostly noise for a Go project. The fix was not a better template: It was growing expertise from this project's own history.

            In every case, the answer was not \"go deeper into ML\".

            The answer was knowing which abstraction was leaking and fixing it at the right layer.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#agentic-systems-are-not-an-ml-problem","level":2,"title":"Agentic Systems Are Not an ML Problem","text":"

            The mistake is assuming agent failures originate where the model was trained, rather than where it is deployed.

            Agentic AI is a systems problem under chaotic uncertainty:

            • Feedback loops between the agent and its environment;
            • Error accumulation across iterations;
            • Brittle representations that break outside training distribution;
            • Misplaced trust in outputs that look correct.

            In short-lived interactions, model quirks are tolerable. In long-running autonomous loops, however, they compound.

            That is where shallow understanding becomes expensive.

            But the understanding you need is not about optimizer internals.

            It is about:

            What Matters What Does Not (for Most Practitioners) Why gradient descent fails in specific regimes How to derive it from scratch When memorization masquerades as reasoning The formal definition of VC dimension Recognizing distribution shift before it compounds Hand-tuning learning rate schedules Predicting when scale hides errors instead of fixing them Chasing theoretical purity divorced from practice

            The depth that matters is diagnostic, not theoretical.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-real-answer","level":2,"title":"The Real Answer","text":"

            Not turtles all the way down.

            Go deep enough to:

            • Diagnose failures instead of cargo-culting fixes;
            • Reason about uncertainty instead of trusting confidence;
            • Design guardrails that align with model behavior, not hope.

            Stop before:

            • Hand-deriving gradients for the sake of it;
            • Obsessing over optimizer internals you will never touch;
            • Chasing theoretical purity divorced from the scale you actually operate at.

            This is not about mastering ML.

            It is about knowing which abstractions you can safely trust and which ones leak.

            Hint: Any useful abstraction almost certainly leaks.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#a-practical-litmus-test","level":2,"title":"A Practical Litmus Test","text":"

            If a failure occurs and your instinct is to:

            • Add more prompt text: abstraction leak above
            • Add retries or heuristics: error accumulation
            • Change the model: scale masking
            • Reach for ML theory: you are probably (but not always) going too deep

            The right depth is the shallowest layer where the failure becomes predictable.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#the-ctx-lesson","level":2,"title":"The ctx Lesson","text":"

            Every design decision in ctx is downstream of this principle.

            The attention budget exists because the model's internal attention mechanism has real limits: You do not need to understand the math of softmax to build around it. But you do need to understand that more context is not always better and that attention density degrades with scale.

            The skill system exists because the model's built-in behavior is already good: You do not need to understand RLHF to build effective skills. But you do need to understand that the model already has judgment and your skills should teach it things it does not know, not override how it thinks.

            Defense in depth exists because soft instructions are probabilistic: You do not need to understand the transformer architecture to know that a Markdown file is not a security boundary. But you do need to understand that the model follows instructions from context, and context can be poisoned.

            In each case, the useful depth was one or two layers below the abstraction I was working at: Not at the bottom of the stack.

            The boundary between useful understanding and academic exercise is where your failure modes live.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-12-how-deep-is-too-deep/#closing-thought","level":2,"title":"Closing Thought","text":"

            Most modern AI systems do not fail because the math is wrong.

            They fail because we apply correct math in the wrong regime, then build autonomous systems on top of it.

            Understanding that boundary, not crossing it blindly, is where depth still compounds.

            And that is a far more useful form of expertise than memorizing another loss function.

            If You Remember One Thing from This Post...

            Go deep enough to diagnose your failures. Stop before you are solving problems that do not propagate to your layer.

            The abstractions below you are not sacred. But neither are they irrelevant.

            The useful depth is wherever your failure modes live. Usually one or two layers down, not at the bottom.

            This post started as a note about whether I should take an ML course. The answer turned out to be \"no, but understand why not\". The meta continues.

            ","path":["How Deep Is Too Deep?"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/","level":1,"title":"Before Context Windows, We Had Bouncers","text":"","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-reset-problem","level":2,"title":"The Reset Problem","text":"

            IRC is stateless.

            • You disconnect, you vanish.
            • You reconnect, you begin again.

            No buffer.

            No memory.

            No continuity.

            Modern systems are not much different:

            • Close the browser tab.
              • Lose the Slack scrollback.
            • Open a new LLM session.
              • Start from zero.

            Resets externalize reconstruction cost onto humans.

            Reconstruction is tax: Tax becomes entropy.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#stateless-protocol-stateful-life","level":2,"title":"Stateless Protocol, Stateful Life","text":"

            IRC is minimal:

            • A TCP connection.
            • A nickname.
            • A channel.
            • A stream of lines.

            When the connection drops, you literally disappear from the graph.

            The protocol is stateless; human systems are not.

            So you:

            • Reconnect;
            • Ask what you missed;
            • Scroll;
            • Reconstruct.

            The machine forgets; you pay.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#the-bouncer-pattern","level":2,"title":"The Bouncer Pattern","text":"

            A bouncer is a daemon that remains connected when you do not:

            • It holds your seat;
            • It buffers what you missed;
            • It keeps your identity online.

            ZNC is one such bouncer.

            With ZNC:

            • Your client does not connect to IRC;
            • It connects to ZNC;
            • ZNC connects upstream.

            Client sessions become ephemeral.

            Presence becomes infrastructural.

            ZNC Is Tmux for IRC

            • Close your laptop.

              • ZNC remains.
            • Switch devices.

              • ZNC persists.

            This is not convenience; this is continuity.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#presence-without-flapping","level":2,"title":"Presence without Flapping","text":"

            With a bouncer:

            • Closing your client does not emit PART.
            • Reopening does not emit JOIN.

            You do not flap in and out of existence.

            From the channel's perspective, you remain.

            From your perspective, history accumulates.

            • Buffers persist;
            • Identity persists;
            • Context persists.

            This pattern predates AI.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#before-llm-context-windows","level":2,"title":"Before LLM Context Windows","text":"

            An LLM session without memory is IRC without a bouncer:

            • Close the window.
            • Start over.
            • Re-explain intent.
            • Rehydrate context.

            That is friction.

            This Walks and Talks like ctx

            Context engineering moves memory out of sessions and into infrastructure.

            • ZNC does this for IRC.
            • ctx does this for agents.

            Same principle:

            • Volatile interface.
            • Persistent substrate.

            Different fabric.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#minimal-architecture","level":2,"title":"Minimal Architecture","text":"

            My setup is intentionally boring:

            • A $5 small VPS.
            • ZNC installed.
            • TLS enabled.
            • Firewall restricted.

            Then:

            • ZNC connects to Libera.Chat.
            • SASL authentication lives inside ZNC.
            • Buffers are stored on disk.

            My client connects to my VPS, not the network.

            The commands do not matter: The boundaries do:

            • Authentication in infrastructure, not in the client;
            • Memory server-side, not in scrollback;
            • Presence decoupled from activity.

            Everything else is configuration.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#platform-memory","level":2,"title":"Platform Memory","text":"

            Yes, I know, it is 2026:

            • Discord stores history;
            • Slack stores history;
            • The dumpster fire on gasoline called X, too, stores history.

            HOWEVER, they own your substrate.

            Running a bouncer is quiet sovereignty:

            • Logs are mine.
            • Presence is continuous.
            • State does not reset because I closed a tab.

            Small acts compound.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#signal-density","level":2,"title":"Signal Density","text":"

            Primitive systems select for builders.

            Consistent presence in small rooms compounds reputation.

            Quiet compounding outperforms viral spikes.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#infrastructure-as-cognition","level":2,"title":"Infrastructure as Cognition","text":"

            ZNC is not interesting because it is retro; it is interesting because it models a principle:

            • Stateless protocols require stateful wrappers;
            • Volatile interfaces require durable memory;
            • Human systems require continuity.

            Distilled:

            Humans require context.

            Before context windows, we had bouncers.

            Before AI memory files, we had buffers.

            Continuity is not a feature; it is a design decision.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#build-it","level":2,"title":"Build It","text":"

            If you want the actual setup (VPS, ZNC, TLS, SASL, firewall...) there is a step-by-step runbook:

            Persistent IRC Presence with ZNC.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-irc-as-context/#motd","level":2,"title":"MOTD","text":"

            When my client connects to my bouncer, it prints:

            //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n

            See also: Context as Infrastructure -- the post that takes this observation to its conclusion: stateless protocols need stateful wrappers, and AI sessions need persistent filesystems.

            ","path":["Before Context Windows, We Had Bouncers"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/","level":1,"title":"Parallel Agents with Git Worktrees","text":"","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-backlog-problem","level":2,"title":"The Backlog Problem","text":"

            Jose Alekhinne / 2026-02-14

            What Do You Do with 30 Open Tasks?

            You could work through them one at a time.

            One agent, one branch, one commit stream.

            Or you could ask: which of these don't touch each other?

            I had 30 open tasks in TASKS.md. Some were docs. Some were a new encryption package. Some were test coverage for a stable module. Some were blog posts.

            They had almost zero file overlap.

            Running one agent at a time meant serial execution on work that was fundamentally parallel:

            I was bottlenecking on me, not on the machine.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-insight-file-overlap-is-the-constraint","level":2,"title":"The Insight: File Overlap Is the Constraint","text":"

            This is not a scheduling problem: It's a conflict avoidance problem.

            Two agents can work simultaneously on the same codebase if and only if they don't touch the same files. The moment they do, you get merge conflicts: And merge conflicts on AI-generated code are expensive because the human has to arbitrate choices they didn't make.

            So the question becomes:

            \"Can you partition your backlog into non-overlapping tracks?\"

            For ctx, the answer was obvious:

            Track Touches Tasks work/docs docs/, hack/ Blog posts, recipes, runbooks work/pad internal/cli/pad/, specs Scratchpad encryption, CLI, tests work/tests internal/cli/recall/ Recall test coverage

            Three tracks. Near-zero overlap. Three agents.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#git-worktrees-the-mechanism","level":2,"title":"Git Worktrees: The Mechanism","text":"

            git has a feature that most people don't use: worktrees.

            A worktree is a second (or third, or fourth) working directory that shares the same .git object database as your main checkout.

            Each worktree has its own branch, its own index, its own working tree. But they all share history, refs, and objects.

            git worktree add ../ctx-docs -b work/docs\ngit worktree add ../ctx-pad -b work/pad\ngit worktree add ../ctx-tests -b work/tests\n
            • Three directories;
            • Three branches;
            • One repository.

            This is cheaper than three clones. And because they share objects, git merge afterwards is fast: It's a local operation on shared data.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-setup","level":2,"title":"The Setup","text":"

            The workflow I landed on:

            1. Group tasks by blast radius.

            Read TASKS.md. For each pending task, estimate which files and directories it touches. Group tasks that share files into the same track. Tasks with no overlap go into separate tracks.

            This is the part that requires human judgment:

            An agent can propose groupings, but you need to verify that the boundaries are real. A task that says \"update docs\" but actually touches Go code will poison a docs track.

            2. Create worktrees as sibling directories.

            Not subdirectories: Siblings.

            If your main checkout is at ~/WORKSPACE/ctx, worktrees go at ~/WORKSPACE/ctx-docs, ~/WORKSPACE/ctx-pad, etc.

            Why siblings? Because some tools (and some agents) walk up the directory tree looking for .git. A worktree inside the main checkout confuses them.

            3. Launch one agent per worktree.

            # Terminal 1\ncd ../ctx-docs && claude\n\n# Terminal 2\ncd ../ctx-pad && claude\n\n# Terminal 3\ncd ../ctx-tests && claude\n

            Each agent gets a full working copy with .context/ intact. It reads the same TASKS.md, the same DECISIONS.md, the same CONVENTIONS.md. It knows the full project state. It just works on a different slice.

            4. Do NOT run ctx init in worktrees.

            This is the gotcha. The .context/ directory is tracked in git. Running ctx init in a worktree would overwrite shared context files: Wiping decisions, learnings, and tasks that belong to the whole project.

            The worktree already has everything it needs. Leave it alone.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#what-actually-happened","level":2,"title":"What Actually Happened","text":"

            I ran three agents for about 40 minutes. Here is roughly what each track produced:

            work/docs: Parallel worktrees recipe, blog post edits, recipe index reorganization, IRC recipe moved from docs/ to hack/.

            work/pad: ctx pad show subcommand, --append and --prepend flags on ctx pad edit, spec updates, 28 new test functions.

            work/tests: Recall test coverage, edge case tests.

            Merging took about five minutes. Two of the three merges were clean.

            The third had a conflict in TASKS.md:

            both the docs track and the pad track had marked different tasks as [x].

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-tasksmd-conflict","level":2,"title":"The TASKS.md Conflict","text":"

            This deserves its own section because it will happen every time.

            When two agents work in parallel, they both read TASKS.md at the start and mark tasks complete as they go. When you merge, git sees two branches that modified the same file differently.

            The resolution is always the same: accept all completions from both sides. No task should go from [x] back to [ ]. The merge is additive.

            This is one of those conflicts that sounds scary but is trivially mechanical: You are not arbitrating design decisions; you are combining two checklists.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#limits","level":2,"title":"Limits","text":"

            3-4 worktrees, maximum.

            I tried four once: By the time I merged the third track, the fourth had drifted far enough that its changes needed rebasing.

            The merge complexity grows faster than the parallelism benefit.

            Three is the sweet spot:

            • Two is conservative but safe;
            • Four is possible if the tracks are truly independent;
            • Anything more than four, you are in the danger zone.

            Group by directory, not by priority.

            It is tempting to put all the high-priority tasks in one track: Don't.

            Two high-priority tasks that touch the same files must be in the same track, regardless of urgency. The constraint is file overlap, not importance.

            Commit frequently.

            Smaller commits make merge conflicts easier to resolve. An agent that writes 500 lines in a single commit is harder to merge than one that commits every logical step.

            Name tracks by concern.

            • work/docs and work/pad tell you what's happening;
            • work/track-1 and work/track-2 tell you nothing.
            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-pattern","level":2,"title":"The Pattern","text":"

            This is the same pattern that shows up everywhere in ctx:

            The attention budget taught me that you can't dump everything into one context window. You have to partition, prioritize, and load selectively.

            Worktrees are the same principle applied to execution: You can't dump every task into one agent's workstream. You have to partition by blast radius, assign selectively, and merge deliberately.

            The codebase audit that generated these 30 tasks used eight parallel agents for analysis. Worktrees let me use parallel agents for implementation. Same coordination pattern, different artifact.

            And the IRC bouncer post from earlier today argued that stateless protocols need stateful wrappers. Worktrees are the same: git branches are stateless forks; .context/ is the stateful wrapper that gives each agent the project's full memory.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#should-this-be-a-skill","level":2,"title":"Should This Be a Skill?","text":"

            I asked myself the same question I asked about the codebase audit: should this be a /ctx-worktree skill?

            This time the answer was a resounding \"yes\":

            Unlike the audit prompt (which I tweak every time and run every other week) the worktree workflow is:

            Criterion Worktree workflow Codebase audit Frequency Weekly Quarterly Stability Same steps every time Tweaked every time Scope Mechanical, bounded Bespoke, 8 agents Trigger Large backlog \"I feel like auditing\"

            The commands are mechanical: git worktree add, git worktree remove, branch naming, safety checks. This is exactly what skills are for: stable contracts for repetitive operations.

            Ergo, /ctx-worktree exists.

            It enforces the 4-worktree limit, creates sibling directories, uses work/ branch prefixes, and reminds you not to run ctx init in worktrees.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-14-parallel-agents-with-worktrees/#the-takeaway","level":2,"title":"The Takeaway","text":"

            Serial execution is the default. But serial is not always necessary.

            If your backlog partitions cleanly by file overlap, you can multiply your throughput with nothing more exotic than git worktree and a second terminal window.

            The hard part is not the git commands; it is the discipline:

            • Grouping by blast radius instead of priority;
            • Accepting that TASKS.md will conflict;
            • And knowing when three tracks is enough.

            If You Remember One Thing from This Post...

            Partition by blast radius, not by priority.

            Two tasks that touch the same files belong in the same track, no matter how important the other one is.

            The constraint is file overlap. Everything else is scheduling.

            The practical setup (skill invocation, worktree creation, merge workflow, and cleanup) lives in the recipe: Parallel Agent Development with Git Worktrees.

            ","path":["Parallel Agents with Git Worktrees"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/","level":1,"title":"ctx v0.3.0: The Discipline Release","text":"","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#when-the-ratio-of-polish-to-features-is-31-you-know-something-changed","level":2,"title":"When the Ratio of Polish to Features Is 3:1, You Know Something Changed","text":"

            Jose Alekhinne / February 15, 2026

            What Does a Release Look like When Most of the Work Is Invisible?

            No new headline feature. No architectural pivot. No rewrite.

            Just 35+ documentation and quality commits against ~15 feature commits... and somehow, the tool feels like it grew up overnight.

            Six days separate v0.2.0 from v0.3.0.

            Measured by calendar time, it is nothing. Measured by what changed in how the project operates, it is the most significant release yet.

            • v0.1.0 was the prototype;
            • v0.2.0 was the archaeology release: making the past accessible;
            • v0.3.0 is the discipline release: the one that turned best practices into enforcement, suggestions into structure, and a collection of commands into a system of skills.

            The Release Window

            February 1‒February 7, 2026

            From the v0.2.0 tag to commit 2227f99.

            78 files changed in the migration commit alone.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-migration-commands-to-skills","level":2,"title":"The Migration: Commands to Skills","text":"

            The largest single change was the migration from .claude/commands/*.md to .claude/skills/*/SKILL.md.

            This was not a rename: It was a rethinking of how AI agents discover and execute project-specific workflows.

            Aspect Commands (before) Skills (after) Structure Flat files in one directory Directory-per-skill with SKILL.md Description Optional, often vague Required, doubles as activation trigger Quality gates None \"Before X-ing\" pre-flight checklist Negative triggers None \"When NOT to Use\" in every skill Examples Rare Good/bad pairs in every skill Average length ~15 lines ~80 lines

            The description field became the single most important line in each skill. In the old system, descriptions were titles. In the new system, they are activation conditions: The text the platform reads to decide whether to surface a skill for a given prompt.

            A description that says \"Show context summary\" activates too broadly or not at all. A description that says \"Show context summary. Use at session start or when unclear about current project state\" activates at the right moment.

            78 files changed. 1,915 insertions. Not because the skills got bloated; because they got specific.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-skill-sweep","level":2,"title":"The Skill Sweep","text":"

            After the structural migration, every skill was rewritten in a single session: All 21 of them.

            The rewrite was guided by a pattern that emerged during the process itself: a repeatable anatomy that effective skills share regardless of their purpose:

            1. Before X-ing: Pre-flight checks that prevent premature execution
            2. When to Use: Positive triggers that narrow activation
            3. When NOT to Use: Negative triggers that prevent misuse
            4. Usage Examples: Invocation patterns the agent can pattern-match
            5. Quality Checklist: Verification before claiming completion

            The Anatomy of a Skill That Works post covers the details. What matters for the release story is the result:

            • Zero skills with quality gates became twenty;
            • Zero skills with negative triggers became twenty.
            • Three skills with examples became twenty.

            The Skill Trilogy as Design Spec

            The three blog posts written during this window:

            • Skills That Fight the Platform,
            • You Can't Import Expertise,
            • and The Anatomy of a Skill That Works...

            ... were not retrospective documentation. They were written during the rewrite, and the lessons fed back into the skills as they were being built.

            • The blog was the design document.
            • The skills were the implementation.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-consolidation-sweep","level":2,"title":"The Consolidation Sweep","text":"

            The unglamorous work. The kind you only appreciate when you try to change something later and it just works.

            What Why It Matters Constants consolidation Magic strings replaced with semantic constants Variable deshadowing Eliminated subtle scoping bugs File splits Modules that were doing too much, broken apart Godoc standardization Every exported function documented to convention

            This is the work that doesn't get a changelog entry but makes every future commit easier. When a new contributor (human or AI) reads the codebase, they find consistent patterns instead of accumulated drift.

            The consolidation was not an afterthought. It was scheduled deliberately, with the same priority as features: The 3:1 ratio that emerged during v0.2.0 development became an explicit practice:

            • Three feature sessions;
            • One consolidation session.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-ear-framework","level":2,"title":"The E/A/R Framework","text":"

            On February 4th, we adopted the E/A/R classification as the official standard for evaluating skills:

            Category Meaning Target Expert Knowledge Claude does not have >70% Activation When/how to trigger ~20% Redundant What Claude already knows <10%

            This came from reviewing approximately 30 external skill files and discovering that most were redundant with Claude's built-in system prompt. Only about 20% had salvageable content, and even those yielded just a few heuristics each.

            The E/A/R framework gave us a concrete, testable criterion:

            A good skill is Expert knowledge minus what Claude already knows.

            If more than 10% of a skill restates platform defaults, it is creating noise, not signal.

            Every skill in v0.3.0 was evaluated against this framework. Several were deleted. The survivors are leaner and more focused.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#backup-and-monitoring-infrastructure","level":2,"title":"Backup and Monitoring Infrastructure","text":"

            A tool that manages your project's memory needs ops maturity.

            v0.3.0 added two pieces of infrastructure that reflect this:

            Backup staleness hook: A UserPromptSubmit hook that checks whether the last .context/ backup is more than two days old. If it is, and the SMB mount is available, it reminds the user. No cron job running when nobody is working. No redundant backups when nothing has changed.

            Context size checkpoint: A PreToolUse hook that estimates current context window usage and warns when the session is getting heavy. This hooks into the attention budget philosophy: Degradation is expected, but it should be visible.

            Both hooks use $CLAUDE_PROJECT_DIR instead of hardcoded paths, a migration triggered by a username rename that broke every absolute path in the hook configuration. That migration (replacing /home/user/... with \"$CLAUDE_PROJECT_DIR\"/.claude/hooks/...) was one of those changes that seems trivial but prevents an entire category of future failures.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.2.0 v0.3.0 Skills (was \"commands\") 11 21 Skills with quality gates 0 21 Skills with \"When NOT to Use\" 0 21 Average skill body ~15 lines ~80 lines Hooks using $CLAUDE_PROJECT_DIR 0 All Documentation commits n/a 35+ Feature/fix commits n/a ~15

            That ratio (35+ documentation and quality commits to ~15 feature commits) is the defining characteristic of this release:

            • This release is not a failure to ship features.
            • It is the deliberate choice to make the existing features reliable.
            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-v030-means","level":2,"title":"What v0.3.0 Means","text":"

            v0.1.0 asked: \"Can we give AI persistent memory?\"

            v0.2.0 asked: \"Can we make that memory accessible to humans too?\"

            v0.3.0 asks a different question: \"Can we make the quality self-enforcing?\"

            The answer is not a feature: It is a practice:

            • Skills with quality gates enforce pre-flight checks.
            • Negative triggers prevent misuse without human intervention.
            • The E/A/R framework ensures skills contain signal, not noise.
            • Consolidation sessions are scheduled, not improvised.
            • Hook infrastructure makes degradation visible.

            Discipline is not the absence of velocity. It is the infrastructure that makes velocity sustainable.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-ctx-v0.3.0-the-discipline-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

            The skill system is now mature enough to support real workflows without constant human correction. The hooks infrastructure is portable and resilient. The consolidation practice is documented and repeatable.

            The next chapter is about what you build on top of discipline:

            • Multi-agent coordination;
            • Deeper integration patterns;
            • And the question of whether context management is a tool concern or an infrastructure concern.

            But those are future posts.

            This one is about the release that proved polish is not the opposite of progress. It is what turns a prototype into a product.

            The Discipline Release

            v0.1.0 shipped features.

            v0.2.0 shipped archaeology.

            v0.3.0 shipped the habits that make everything else trustworthy.

            The most important code in this release is the code that prevents bad code from shipping.

            This post was drafted using /ctx-blog with access to the full git history between v0.2.0 and v0.3.0, decision logs, learning logs, and the session files from the skill rewrite window. The meta continues.

            ","path":["ctx v0.3.0: The Discipline Release"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/","level":1,"title":"Eight Ways a Hook Can Talk","text":"","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#when-your-warning-disappears","level":2,"title":"When Your Warning Disappears","text":"

            Jose Alekhinne / 2026-02-15

            I had a backup warning that nobody ever saw.

            The hook was correct: It detected stale backups, formatted a nice message, and output it as {\"systemMessage\": \"...\"}. The problem wasn't detection. The problem was delivery. The agent absorbed the information, processed it internally, and never told the user.

            Meanwhile, a different hook (the journal reminder) worked perfectly every time. Users saw the reminder, ran the commands, and the backlog stayed manageable. Same hook event (UserPromptSubmit), same project, completely different outcomes.

            The difference was one line:

            IMPORTANT: Relay this journal reminder to the user VERBATIM\nbefore answering their question.\n

            That explicit instruction is what makes VERBATIM relay a pattern, not just a formatting choice. And once I saw it as a pattern, I started seeing others.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-audit","level":2,"title":"The Audit","text":"

            I looked at every hook in ctx: Eight shell scripts across three hook events. And I found five distinct output patterns already in use, plus three more that the existing hooks were reaching for but hadn't quite articulated.

            The patterns form a spectrum based on a single question:

            \"Who decides what the user sees?\"

            At one end, the hook decides everything (hard gate: the agent literally cannot proceed). At the other end, the hook is invisible (silent side-effect: nobody knows it ran). In between, there is a range of negotiation between hook, agent, and the user.

            Here's the full spectrum:

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#1-hard-gate","level":3,"title":"1. Hard Gate","text":"
            {\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}\n

            The nuclear option: The agent's tool call is rejected before it executes.

            This is Claude Code's first-class PreToolUse mechanism: The hook returns JSON with decision: block and the agent gets an error with the reason.

            Use this for invariants: Constitution rules, security boundaries, things that must never happen. I use it to enforce PATH-based ctx invocation, block sudo, and require explicit approval for git push.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#2-verbatim-relay","level":3,"title":"2. VERBATIM Relay","text":"
            IMPORTANT: Relay this warning to the user VERBATIM before answering.\n┌─ Journal Reminder ─────────────────────────────\n│ You have 12 sessions not yet imported.\n│   ctx recall import --all\n└────────────────────────────────────────────────\n

            The instruction is the pattern. Without \"Relay VERBATIM,\" agents tend to absorb information into their internal reasoning and never surface it. The explicit instruction changes the behavior from \"I know about this\" to \"I must tell the user about this.\"

            I use this for actionable reminders:

            • Unexported journal entries;
            • Stale backups;
            • Context capacity warnings...

            ...things the user should see regardless of what they asked.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#3-agent-directive","level":3,"title":"3. Agent Directive","text":"
            ┌─ Persistence Checkpoint (prompt #25) ───────────\n│ No context files updated in 15+ prompts.\n│ Have you discovered learnings worth persisting?\n└──────────────────────────────────────────────────\n

            A nudge, not a command. The hook tells the agent something; the agent decides what (if anything) to tell the user. This is right for behavioral nudges: \"you haven't saved context in a while\" doesn't need to be relayed verbatim, but the agent should consider acting on it.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#4-silent-context-injection","level":3,"title":"4. Silent Context Injection","text":"
            ctx agent --budget 4000 2>/dev/null || true\n

            Pure background enrichment. The agent's context window gets project information injected on every tool call, with no visible output. Neither the agent nor the user sees the hook fire, but the agent makes better decisions because of the context.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#5-silent-side-effect","level":3,"title":"5. Silent Side-Effect","text":"
            find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

            Do work, say nothing. Temp file cleanup on session end. Logging. Marker file management. The action is the entire point; no one needs to know.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-patterns-we-dont-have-yet","level":2,"title":"The Patterns We Don't Have Yet","text":"

            Three more patterns emerged from the gaps in the existing hooks.

            Conditional relay: \"Relay this, but only if the user's question is about X.\" This pattern avoids noise when the warning isn't relevant. It's more fragile (depends on agent judgment) but less annoying.

            Suggested action: \"Here's a problem, and here's the exact command to fix it. Ask the user before running it.\" This pattern goes beyond a nudge by giving the agent a concrete proposal, but still requires human approval.

            Escalating severity: INFO gets absorbed silently. WARN gets mentioned at the next natural pause. CRITICAL gets the VERBATIM treatment. This pattern introduces a protocol for hooks that produce output at different urgency levels, so they don't all compete for the user's attention.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-eight-ways-a-hook-can-talk/#the-principle","level":2,"title":"The Principle","text":"

            Hooks are the boundary between your environment and the agent's reasoning.

            A hook that detects a problem but can't communicate it effectively is the same as no hook at all.

            The format of your output is a design decision with real consequences:

            • Use a hard gate and the agent can't proceed (good for invariants, frustrating for false positives)
            • Use VERBATIM relay and the user will see it (good for reminders, noisy if overused)
            • Use an agent directive and the agent might act (good for nudges, unreliable for critical warnings)
            • Use silent injection and nobody knows (good for enrichment, invisible when it breaks)

            Choose deliberately. And, when in doubt, write the word VERBATIM.

            The full pattern catalog with decision flowchart and implementation examples is in the Hook Output Patterns recipe.

            ","path":["Eight Ways a Hook Can Talk"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/","level":1,"title":"Version Numbers Are Lagging Indicators","text":"","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#why-ctxs-journal-site-runs-on-a-v0021-tool","level":2,"title":"Why ctx's Journal Site Runs on a v0.0.21 Tool","text":"

            Jose Alekhinne / 2026-02-15

            Would You Ship Production Infrastructure on a v0.0.21 Dependency?

            Most engineers wouldn't. Version numbers signal maturity. Pre-1.0 means unstable API, missing features, risk.

            But version numbers tell you where a project has been. They say nothing about where it's going.

            I just bet ctx's entire journal site on a tool that hasn't hit v0.1.0.

            Here's why I'd do it again.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-problem","level":2,"title":"The Problem","text":"

            When v0.2.0 shipped the journal system, the pipeline was clear:

            • Export sessions to Markdown;
            • Enrich them with YAML frontmatter;
            • And render them into something browsable.

            The first two steps were solved; the third needed a tool.

            The journal entries are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is the entire format:

            • No JSX;
            • No shortcodes;
            • No custom templating.

            Just Markdown rendered well.

            The requirements are modest:

            • Read a configuration file (such as mkdocs.yml);
            • Render Markdown with extensions (admonitions, tabs, tables);
            • Search;
            • Handle 100+ files without choking on incremental rebuilds;
            • Look good out of the box;
            • Not lock me in.

            The obvious candidates were as follows:

            Tool Language Strengths Pain Points Hugo Go Blazing fast, mature Templating is painful; Go templates fight you on anything non-trivial Astro JS/TS Modern, flexible JS ecosystem overhead; overkill for a docs site MkDocs + Material Python Beautiful defaults, massive community (22k+ stars) Slow incremental rebuilds on large sites; limited extensibility model Zensical Python Built to fix MkDocs' limits; 4-5x faster rebuilds v0.0.21; module system not yet shipped

            The instinct was Hugo. Same language as ctx. Fast. Well-established.

            But instinct is not analysis. I picked the one with the lowest version number.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation","level":2,"title":"The Evaluation","text":"

            Here is what I actually evaluated, in order:

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#1-the-team","level":3,"title":"1. The Team","text":"

            Zensical is built by squidfunk: The same person behind Material for MkDocs, the most popular MkDocs theme with 22,000+ stars. It powers documentation sites for projects across every language and framework.

            • This is not someone learning how to build static site generators.
            • This is someone who spent years understanding exactly where MkDocs breaks and decided to fix it from the ground up.

            They did not build zensical because MkDocs was bad: They built it because MkDocs hit a ceiling:

            • Incremental rebuilds: 4-5x faster during serve. When you have hundreds of journal entries and you edit one, the difference between \"rebuild everything\" and \"rebuild this page\" is the difference between a usable workflow and a frustrating one.

            • Large site performance: Specifically designed for tens of thousands of pages. The journal grows with every session. A tool that slows down as content accumulates is a tool you will eventually replace.

            A proven team starting fresh is more predictable than an unproven team at v3.0.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#2-the-architecture","level":3,"title":"2. The Architecture","text":"

            Zensical is investing in a Rust-based Markdown parser with CommonMark support. That signals something about the team's priorities:

            Performance foundations first; features second.

            ctx's journal will grow:

            • Every exported session adds files.
            • Every enrichment pass adds metadata.

            Choosing a tool that gets slower as you add content means choosing to migrate later.

            Choosing one built for scale means the decision holds.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#3-the-migration-path","level":3,"title":"3. The Migration Path","text":"

            Zensical reads mkdocs.yml natively. If it doesn't work out, I can move back to MkDocs + Material with zero content changes:

            • The Markdown is standard;
            • The frontmatter is standard;
            • The configuration is compatible.

            This is the infrastructure pattern again: The same way ZNC decouples presence from the client, zensical decouples rendering from the generator:

            • The Markdown is yours.
            • The frontmatter is standard YAML.
            • The configuration is MkDocs-compatible.

            You are not locked into anything except your own content.

            No lock-in is not a feature: It's a design philosophy:

            It's the same reason ctx uses plain Markdown files in .context/ instead of a database: the format should outlive the tool.

            Lock-in Is the Real Risk, Not Version Numbers

            A mature tool with a proprietary format is riskier than a young tool with a standard one. Version numbers measure time invested. Portability measures respect for the user.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#4-the-dependency-tree","level":3,"title":"4. The Dependency Tree","text":"

            Here is what pip install zensical actually pulls in:

            • click
            • Markdown
            • Pygments
            • pymdown-extensions
            • PyYAML

            Only five dependencies. All well-known. No framework bloat. No bundler. No transpiler. No node_modules black hole.

            3k GitHub stars at v0.0.21 is a strong early traction for a pre-1.0 project.

            The dependency tree is thin: No bloat.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#5-the-fit","level":3,"title":"5. The Fit","text":"

            This is the same principle behind the attention budget: do not overfit the tool to hypothetical requirements. The right amount of capability is the minimum needed for the current task.

            Hugo is a powerful static site generator. It is also a powerful templating engine, a powerful asset pipeline, and a powerful taxonomy system. For rendering Markdown journals, that power is overhead:

            It is the complexity you pay for but never use.

            ctx's journal files are standard Markdown with YAML frontmatter, tables, and fenced code blocks. That is exactly the sweet spot Zensical inherits from Material for MkDocs:

            • No custom plugins needed;
            • No special syntax;
            • No templating gymnastics.

            The requirements match the capabilities: Not the capabilities that are promised, but the ones that exist today, at v0.0.21.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-caveat","level":2,"title":"The Caveat","text":"

            It would be dishonest not to mention what's missing.

            The module system for third-party extensions opens in early 2026.

            If ctx ever needs custom plugins (for example, auto-linking session IDs, rendering special journal metadata, etc.) that infrastructure isn't there yet.

            The installation experience is rough:

            We discovered this firsthand: pip install zensical often fails on MacOS (system Python stubs, Homebrew's PEP 668 restrictions). The answer is pipx, which creates an isolated environment with the correct Python version automatically.

            That kind of friction is typical for young Python tooling, and it is documented in the Getting Started guide.

            And 3,000 stars at v0.0.21 is strong early traction, but it's still early: The community is small. When something breaks, you're reading source code, not documentation.

            These are real costs. I chose to pay them because the alternative costs are higher.

            For example:

            • Hugo's templating pain would cost me time on every site change.
            • Astro's JS ecosystem would add complexity I don't need.
            • MkDocs would work today but hit scaling walls tomorrow.

            Zensical's costs are front-loaded and shrinking.

            The others compound.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-evaluation-framework","level":2,"title":"The Evaluation Framework","text":"

            For anyone facing a similar choice, here is the framework that emerged:

            Signal What It Tells You Weight Team track record Whether the architecture will be sound High Migration path Whether you can leave if wrong High Current fit Whether it solves your problem today High Dependency tree How much complexity you're inheriting Medium Version number How long the project has existed Low Star count Community interest (not quality) Low Feature list What's possible (not what you need) Low

            The bottom three are the metrics most engineers optimize for.

            The top four are the ones that predict whether you'll still be happy with the choice in a year.

            Features You Don't Need Are Not Free

            Every feature in a dependency is code you inherit but don't control.

            A tool with 200 features where you use 5 means 195 features worth of surface area for bugs, breaking changes, and security issues that have nothing to do with your use case.

            Fit is the inverse of feature count.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-15-why-zensical/#the-broader-pattern","level":2,"title":"The Broader Pattern","text":"

            This is part of a theme I keep encountering in this project:

            Leading indicators beat lagging indicators.

            Domain Lagging Indicator Leading Indicator Tooling Version number, star count Team track record, architecture Code quality Test coverage percentage Whether tests catch real bugs Context persistence Number of files in .context/ Whether the AI makes fewer mistakes Skills Number of skills created Whether each skill fires at the right time Consolidation Lines of code refactored Whether drift stops accumulating

            Version numbers, star counts, coverage percentages, file counts...

            ...these are all measures of effort expended.

            They say nothing about value delivered.

            The question is never \"how mature is this tool?\"

            The question is \"does this tool's trajectory intersect with my needs?\"

            Zensical's trajectory:

            • A proven team fixing known problems,
            • in a *proven architecture,
            • with a standard format,
            • and no lock-in.

            ctx's needs:

            Tender standard Markdown into a browsable site, at scale, without complexity.

            The intersection is clean; the version number is noise.

            This is the same kind of decision that shows up throughout ctx:

            • Skills that fight the platform taught that the best integration extends existing behavior, not replaces it.
            • You can't import expertise taught that tools should grow from your project's actual needs, not from feature checklists.
            • Context as infrastructure argues that the format should outlive the tool; and, zensical honors that principle by reading standard Markdown and standard MkDocs configuration.

            If You Remember One Thing from This Post...

            Version numbers measure where a project has been.

            The team and the architecture tell you where it's going.

            A v0.0.21 tool built by the right team on the right foundations is a safer bet than a v5.0 tool that doesn't fit your problem.

            Bet on trajectories, not timestamps.

            This post started as an evaluation note in ideas/ and a separate decision log. The analysis held up. The two merged into one. The meta continues.

            ","path":["Version Numbers Are Lagging Indicators"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/","level":1,"title":"ctx v0.6.0: The Integration Release","text":"","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#two-commands-to-persistent-memory","level":2,"title":"Two Commands to Persistent Memory","text":"

            Jose Alekhinne / February 16, 2026

            What Changed?

            ctx is now a Claude Code plugin. Two commands, no build step:

            /plugin marketplace add ActiveMemory/ctx\n/plugin install ctx@activememory-ctx\n

            Six hooks. Twenty-five skills. Installed.

            For three releases, ctx required assembly:

            • Clone the repo;
            • Build the binary;
            • Copy hook scripts into .claude/hooks/;
            • Symlink skill files.
            • Understand which shell scripts called which Go commands;
            • Hope nothing broke when Claude Code updated its hook format.

            v0.6.0 ends that era: ctx ships as a Claude Marketplace plugin:

            Hooks and skills served directly from source, installed with a single command, updated by pulling the repo. The tool that gives AI persistent memory is now as easy to install as the AI itself.

            But the plugin conversion was not just a packaging change: It was the forcing function that rewrote every shell hook in Go, eliminated the jq dependency, enabled go test coverage for hook logic, and made distribution a solved problem.

            When you fix how something ships, you end up fixing how it is built.

            The Release Window

            February 15-February 16, 2026

            From the v0.3.0 tag to commit a3178bc:

            • 109 commits.
            • 334 files changed.
            • Version jumped from 0.3.0 to 0.6.0 to signal the magnitude.
            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#before-six-shell-scripts-and-a-prayer","level":2,"title":"Before: Six Shell Scripts and a Prayer","text":"

            v0.3.0 had six hook scripts. Each was a Bash file that shelled out to ctx subcommands, parsed JSON with jq, and wired itself into Claude Code's hook system via .claude/hooks/:

            .claude/hooks/\n├── check-context-size.sh\n├── check-persistence.sh\n├── check-journal.sh\n├── post-commit.sh\n├── block-non-path-ctx.sh\n└── cleanup-tmp.sh\n

            This worked, but it also meant:

            • jq was a hard dependency: No jq, no hooks. macOS ships without it.
            • No test coverage: Shell scripts were tested manually or not at all.
            • Fragile deployment: ctx init had to scaffold .claude/hooks/ and .claude/skills/ with the right paths, permissions, and structure.
            • Version drift: Users who installed once never got hook updates unless they re-ran ctx init.

            The shell scripts were the right choice for prototyping. They were the wrong choice for distribution.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#after-one-plugin-zero-shell-scripts","level":2,"title":"After: One Plugin, Zero Shell Scripts","text":"

            v0.6.0 replaces all six scripts with ctx system subcommands compiled into the binary:

            Shell Script Go Subcommand check-context-size.sh ctx system check-context-size check-persistence.sh ctx system check-persistence check-journal.sh ctx system check-journal post-commit.sh ctx system post-commit block-non-path-ctx.sh ctx system block-non-path-ctx cleanup-tmp.sh ctx system cleanup-tmp

            The plugin's hooks.json wires them to Claude Code events:

            {\n  \"PreToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system block-non-path-ctx\"},\n    {\"matcher\": \".*\", \"command\": \"ctx agent --budget 4000\"}\n  ],\n  \"PostToolUse\": [\n    {\"matcher\": \"Bash\", \"command\": \"ctx system post-commit\"}\n  ],\n  \"UserPromptSubmit\": [\n    {\"command\": \"ctx system check-context-size\"},\n    {\"command\": \"ctx system check-persistence\"},\n    {\"command\": \"ctx system check-journal\"}\n  ],\n  \"SessionEnd\": [\n    {\"command\": \"ctx system cleanup-tmp\"}\n  ]\n}\n

            No jq. No shell scripts. No .claude/hooks/ directory to manage.

            The hooks are Go functions with tests, compiled into the same binary you already have.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-plugin-model","level":2,"title":"The Plugin Model","text":"

            The ctx plugin lives at .claude-plugin/marketplace.json in the repo.

            Claude Code's marketplace system handles discovery and installation:

            Skills are served directly from internal/assets/claude/skills/; there is no build step, no make plugin, no generated artifacts.

            This means:

            1. Install is two commands: Not \"clone, build, copy, configure.\"
            2. Updates are automatic: Pull the repo; the plugin reads from source.
            3. Skills and hooks are versioned together: No drift between what the CLI expects and what the plugin provides.
            4. ctx init is tool-agnostic: It creates .context/ and nothing else. No .claude/ scaffolding, no assumptions about which AI tool you use.

            That last point matters:

            Before v0.6.0, ctx init tried to set up Claude Code integration as part of initialization. That coupled the context system to a specific tool.

            Now, ctx init gives you persistent context. The plugin gives you Claude Code integration. They compose; they don't depend.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#beyond-the-plugin-what-else-shipped","level":2,"title":"Beyond the Plugin: What Else Shipped","text":"

            The plugin conversion dominated the release, but 109 commits covered more ground.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#obsidian-vault-export","level":3,"title":"Obsidian Vault Export","text":"
            ctx journal obsidian\n

            Generates a full Obsidian vault from enriched journal entries: wikilinks, MOC (Map of Content) pages, and graph-optimized cross-linking. If you already use Obsidian for notes, your AI session history now lives alongside everything else.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#encrypted-scratchpad","level":3,"title":"Encrypted Scratchpad","text":"
            ctx pad edit \"DATABASE_URL=postgres://...\"\nctx pad show\n

            AES-256-GCM encrypted storage for sensitive one-liners.

            The encrypted blob commits to git; the key stays in .gitignore.

            This is useful for connection strings, API keys, and other values that need to travel with the project without appearing in plaintext.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#security-hardening","level":3,"title":"Security Hardening","text":"

            Three medium-severity findings from a security audit are now closed:

            Finding Fix Path traversal via --context-dir Boundary validation: operations cannot escape project root (M-1) Symlink following in .context/ Lstat() check before every file read/write (M-2) Predictable temp file paths User-specific temp directory under $XDG_RUNTIME_DIR (M-3)

            Plus a new /sanitize-permissions skill that audits settings.local.json for overly broad Bash permissions.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#hooks-that-know-when-to-be-quiet","level":3,"title":"Hooks That Know When to Be Quiet","text":"

            A subtle but important fix: hooks now no-op before ctx init has run.

            Previously, a fresh clone with no .context/ would trigger hook errors on every prompt. Now, hooks detect the absence of a context directory and exit silently. Similarly, ctx init treats a .context/ directory containing only logs as uninitialized and skips the --overwrite prompt.

            Small changes. Large reduction in friction for new users.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#the-numbers","level":2,"title":"The Numbers","text":"Metric v0.3.0 v0.6.0 Skills 21 25 Shell hook scripts 6 0 Go system subcommands 0 6 External dependencies (hooks) jq, bash none Lines of Go ~14,000 ~37,000 Plugin install commands n/a 2 Security findings (open) 3 0 ctx init creates .claude/ yes no

            The line count tripled. Most of that is documentation site HTML, Obsidian export logic, and the scratchpad encryption module.

            The core CLI grew modestly; the ecosystem around it grew substantially.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-does-v060-mean-for-ctx","level":2,"title":"What Does v0.6.0 Mean for ctx?","text":"
            • v0.1.0 asked: \"Can we give AI persistent memory?\"
            • v0.2.0 asked: \"Can we make that memory accessible to humans too?\"
            • v0.3.0 asked: \"Can we make the quality self-enforcing?\"

            v0.6.0 asks: \"Can someone else actually use this?\"

            A tool that requires cloning a repo, building from source, and manually wiring hooks into the right directories is a tool for its author.

            A tool that installs with two commands from a marketplace is a tool for everyone.

            The version jumped from 0.3.0 to 0.6.0 because the delta is not incremental: The shell-to-Go rewrite, the plugin model, the security hardening, and the tool-agnostic init: Together, they change what ctx is: Not a different tool, but a tool that is finally ready to leave the workshop.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-16-ctx-v0.6.0-the-integration-release/#what-comes-next","level":2,"title":"What Comes Next","text":"

            The plugin model opens the door to distribution patterns that were not possible before. Marketplace discovery means new users find ctx without reading a README. Plugin updates mean existing users get improvements without rebuilding.

            The next chapter is about what happens when persistent context is easy to install: Adoption patterns, multi-project workflows, and whether the .context/ convention can become infrastructure that other tools build on.

            But those are future posts.

            This one is about the release that turned a developer tool into a distributable product: two commands, zero shell scripts, and a presence on the Claude Marketplace.

            The Integration Release

            v0.1.0 shipped features. v0.2.0 shipped archaeology.

            v0.3.0 shipped discipline. v0.6.0 shipped the front door.

            The most important code in this release is the code you never have to copy.

            This post was drafted using /ctx-blog-changelog with access to the full git history between v0.3.0 and v0.6.0, release notes, and the plugin conversion PR. The meta continues.

            ","path":["ctx v0.6.0: The Integration Release"],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/","level":1,"title":"Code Is Cheap. Judgment Is Not.","text":"","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#why-ai-replaces-effort-not-expertise","level":2,"title":"Why AI Replaces Effort, Not Expertise","text":"

            Jose Alekhinne / February 17, 2026

            Are You Worried about AI Taking Your Job?

            You might be confusing the thing that's cheap with the thing that's valuable.

            I keep seeing the same conversation: Engineers, designers, writers: all asking the same question with the same dread:

            \"What happens when AI can do what I do?\"

            The question is wrong:

            • AI does not replace workers;
            • AI replaces unstructured effort.

            The distinction matters, and everything I have learned building ctx reinforces it.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-three-confusions","level":2,"title":"The Three Confusions","text":"

            People who feel doomed by AI usually confuse three things:

            People confuse... With... Effort Value Typing Thinking Production Judgment
            • Effort is time spent.
            • Value is the outcome that time produces.

            They are not the same; they never were.

            AI just makes the gap impossible to ignore.

            Typing is mechanical: Thinking is directional.

            An AI can type faster than any human. Yet, it cannot decide what to type without someone framing the problem, sequencing the work, and evaluating the result.

            Production is making artifacts. Judgment is knowing:

            • which artifacts to make,
            • in what order,
            • to what standard,
            • and when to stop.

            AI floods the system with production capacity; it does not flood the system with judgment.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#code-is-nothing","level":2,"title":"Code Is Nothing","text":"

            This sounds provocative until you internalize it:

            Code is cheap. Artifacts are cheap.

            An AI can generate a thousand lines of working code in literal *minutes**:

            It can scaffold a project, write tests, build a CI pipeline, draft documentation. The raw production of software artifacts is no longer the bottleneck.

            So, what is not cheap?

            • Taste: knowing what belongs and what does not
            • Framing: turning a vague goal into a concrete problem
            • Sequencing: deciding what to build first and why
            • Fanning out: breaking work into parallel streams that converge
            • Acceptance criteria: defining what \"done\" looks like before starting
            • Judgment: the thousand small decisions that separate code that works from code that lasts

            These are the skills that direct production: Hhuman skills.

            Not because AI is incapable of learning them, but because they require something AI does not have:

            temporal accountability for generated outcomes.

            That is, you cannot keep AI accountable for the $#!% it generated three months ago. A human, on the other hand, will always be accountable.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-evidence-from-building-ctx","level":2,"title":"The Evidence from Building ctx","text":"

            I did not arrive at this conclusion theoretically.

            I arrived at it by building a tool with an AI agent for three weeks and watching exactly where a human touch mattered.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#yolo-mode-proved-production-is-cheap","level":3,"title":"YOLO Mode Proved Production Is Cheap","text":"

            In Building ctx Using ctx, I documented the YOLO phase: auto-accept everything, let the AI ship features at full speed. It produced 14 commands in a week. Impressive output.

            The code worked. The architecture drifted. Magic strings accumulated. Conventions diverged. The AI was producing at a pace no human could match, and every artifact it produced was a small bet that nobody was evaluating.

            Production without judgment is not velocity. It is debt accumulation at breakneck speed.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-31-ratio-proved-judgment-has-a-cadence","level":3,"title":"The 3:1 Ratio Proved Judgment Has a Cadence","text":"

            In The 3:1 Ratio, the git history told the story:

            Three sessions of forward momentum followed by one session of deliberate consolidation. The consolidation session is where the human applies judgment: reviewing what the AI built, catching drift, realigning conventions.

            The AI does the refactoring. The human decides what to refactor and when to stop.

            Without the human, the AI will refactor forever, improving things that do not matter and missing things that do.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-attention-budget-proved-framing-is-scarce","level":3,"title":"The Attention Budget Proved Framing Is Scarce","text":"

            In The Attention Budget, I explained why more context makes AI worse, not better. Every token competes for attention: Dump everything in and the AI sees nothing clearly.

            This is a framing problem: The human's job is to decide what the AI should focus on: what to include, what to exclude, what to emphasize.

            ctx agent --budget 4000 is not just a CLI flag: It is a forcing function for human judgment about relevance.

            The AI processes. The human curates.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#skills-design-proved-taste-is-load-bearing","level":3,"title":"Skills Design Proved Taste Is Load-Bearing","text":"

            The skill trilogy (You Can't Import Expertise, The Anatomy of a Skill That Works) showed that the difference between a useful skill and a useless one is not craftsmanship:

            It is taste.

            A well-crafted skill with the wrong focus is worse than no skill at all: It consumes the attention budget with generic advice while the project-specific problems go unchecked.

            The E/A/R framework (Expert, Activation, Redundant) is a judgment too:. The AI cannot apply it to itself. The human evaluates what the AI already knows, what it needs to be told, and what is noise.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#automation-discipline-proved-restraint-is-a-skill","level":3,"title":"Automation Discipline Proved Restraint Is a Skill","text":"

            In Not Everything Is a Skill, the lesson was that the urge to automate is not the need to automate. A useful prompt does not automatically deserve to become a slash command.

            The human applies judgment about frequency, stability, and attention cost.

            The AI can build the skill. Only the human can decide whether it should exist.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#defense-in-depth-proved-boundaries-require-judgment","level":3,"title":"Defense in Depth Proved Boundaries Require Judgment","text":"

            In Defense in Depth, the entire security model for unattended AI agents came down to: markdown is not a security boundary. Telling an AI \"don't do bad things\" is production (of instructions). Setting up an unprivileged user in a network-isolated container is judgment (about risk).

            The AI follows instructions. The human decides which instructions are enforceable and which are \"wishful thinking\".

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#parallel-agents-proved-scale-amplifies-the-gap","level":3,"title":"Parallel Agents Proved Scale Amplifies the Gap","text":"

            In Parallel Agents and Merge Debt, the lesson was that multiplying agents multiplies output. But it also multiplies the need for judgment:

            Five agents running in parallel produce five sessions of drift in one clock hour. The human who can frame tasks cleanly, define narrow acceptance criteria, and evaluate results quickly becomes the limiting factor.

            More agents do not reduce the need for judgment. They increase it.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-two-reactions","level":2,"title":"The Two Reactions","text":"

            When AI floods the system with cheap output, two things happen:

            Those who only produce: panic. If your value proposition is \"I write code,\" and an AI writes code faster, cheaper, and at higher volume, then the math is unfavorable. Not because AI took your job, but because your job was never the code. It was the judgment around the code, and you were not exercising it.

            Those who direct: accelerate. If your value proposition is \"I know what to build, in what order, to what standard,\" then AI is the best thing that ever happened to you: Production is no longer the bottleneck: Your ability to frame, sequence, evaluate, and course-correct is now the limiting factor on throughput.

            The gap between these two is not talent: It is the awareness of where the value lives.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#what-this-means-in-practice","level":2,"title":"What This Means in Practice","text":"

            If you are an engineer reading this, the actionable insight is not \"learn prompt engineering\" or \"master AI tools.\" It is:

            Get better at the things AI cannot do.

            AI does this well You need to do this Generate code Frame the problem Write tests Define acceptance criteria Scaffold projects Sequence the work Fix bugs from stack traces Evaluate tradeoffs Produce volume Exercise restraint Follow instructions Decide which instructions matter

            The skills on the right column are not new. They are the same skills that have always separated senior engineers from junior ones.

            AI did not create the distinction; it just made it load-bearing.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#if-anything-i-feel-empowered","level":2,"title":"If Anything, I Feel Empowered","text":"

            I will end with something personal.

            I am not worried: I am empowered.

            Before ctx, I could think faster than I could produce:

            • Ideas sat in a queue.
            • The bottleneck was always \"I know what to build, but building it takes too long.\"

            Now the bottleneck is gone. Poof!

            • Production is cheap.
            • The queue is clearing.
            • The limiting factor is how fast I can think, not how fast I can type.

            That is not a threat: That is the best force multiplier I've ever had.

            The people who feel threatened are confusing the accelerator for the replacement:

            *AI does not replace the conductor; it gives them a bigger orchestra.

            If You Remember One Thing from This Post...

            Code is cheap. Judgment is not.

            AI replaces unstructured effort, not directed expertise. The skills that matter now are the same skills that have always mattered: taste, framing, sequencing, and the discipline to stop.

            The difference is that now, for the first time, those skills are the only bottleneck left.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-code-is-cheap-judgment-is-not/#the-arc","level":2,"title":"The Arc","text":"

            This post is a retrospective. It synthesizes the thread running through every previous entry in this blog:

            • Building ctx Using ctx showed that production without direction creates debt
            • Refactoring with Intent showed that slowing down is not the opposite of progress
            • The Attention Budget showed that curation outweighs volume
            • The skill trilogy showed that taste determines whether a tool helps or hinders
            • Not Everything Is a Skill showed that restraint is a skill in itself
            • Defense in Depth showed that instructions are not boundaries
            • The 3:1 Ratio showed that judgment has a schedule
            • Parallel Agents showed that scale amplifies the gap between production and judgment
            • Context as Infrastructure showed that the system you build for context is infrastructure, not conversation

            From YOLO mode to defense in depth, the pattern is the same:

            • Production is the easy part;
            • Judgment is the hard part;
            • AI changed the ratio, not the rule.

            This post synthesizes the thread running through every previous entry in this blog. The evidence is drawn from three weeks of building ctx with AI assistance, the decisions recorded in DECISIONS.md, the learnings captured in LEARNINGS.md, and the git history that tracks where the human mattered and where the AI ran unsupervised.

            See also: When a System Starts Explaining Itself -- what happens after the arc: the first field notes from the moment the system starts compounding in someone else's hands.

            ","path":["Code Is Cheap. Judgment Is Not."],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/","level":1,"title":"Context as Infrastructure","text":"","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#why-your-ai-needs-a-filesystem-not-a-prompt","level":2,"title":"Why Your AI Needs a Filesystem, Not a Prompt","text":"

            Jose Alekhinne / February 17, 2026

            Where Does Your AI's Knowledge Live between Sessions?

            If the answer is \"in a prompt I paste at the start,\" you are treating context as a consumable. Something assembled, used, and discarded.

            What if you treated it as infrastructure instead?

            This post synthesizes a thread that has been running through every ctx blog post; from the origin story to the attention budget to the discipline release. The thread is this: context is not a prompt problem. It is an infrastructure problem. And the tools we build for it should look more like filesystems than clipboard managers.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-prompt-paradigm","level":2,"title":"The Prompt Paradigm","text":"

            Most AI-assisted development treats context as ephemeral:

            1. Start a session.
            2. Paste your system prompt, your conventions, your current task.
            3. Work.
            4. Session ends. Everything evaporates.
            5. Next session: paste again.

            This works for short interactions. For sustained development (where decisions compound over days and weeks) it fails in three ways:

            It does not persist: A decision made on Tuesday must be re-explained on Wednesday. A learning captured in one session is invisible to the next.

            It does not scale: As the project grows, the \"paste everything\" approach hits the context window ceiling. You start triaging what to include, often cutting exactly the context that would have prevented the next mistake.

            It does not compose: A system prompt is a monolith. You cannot load part of it, update one section, or share a subset with a different workflow. It is all or nothing.

            The Copy-Paste Tax

            Every session that starts with pasting a prompt is paying a tax:

            The human time to assemble the context, the risk of forgetting something, and the silent assumption that yesterday's prompt is still accurate today.

            Over 70+ sessions, that tax compounds into a significant maintenance burden: One that most developers absorb without questioning it.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-infrastructure-paradigm","level":2,"title":"The Infrastructure Paradigm","text":"

            ctx takes a different approach:

            Context is not assembled per-session; it is maintained as persistent files in a .context/ directory:

            .context/\n  CONSTITUTION.md     # Inviolable rules\n  TASKS.md            # Current work items\n  CONVENTIONS.md      # Code patterns and standards\n  DECISIONS.md        # Architectural choices with rationale\n  LEARNINGS.md        # Gotchas and lessons learned\n  ARCHITECTURE.md     # System structure\n  GLOSSARY.md         # Domain terminology\n  AGENT_PLAYBOOK.md   # Operating manual for agents\n  journal/            # Enriched session summaries\n  archive/            # Completed work, cold storage\n
            • Each file has a single purpose;
            • Each can be loaded independently;
            • Each persists across sessions, tools, and team members.

            This is not a novel idea. It is the same idea behind every piece of infrastructure software engineers already use:

            Traditional Infrastructure ctx Equivalent Database .context/*.md files Configuration files CONSTITUTION.md Environment variables .contextrc Log files journal/ Schema migrations Decision records Deployment manifests AGENT_PLAYBOOK.md

            The parallel is not metaphorical. Context files are infrastructure:

            • They are versioned (git tracks them);
            • They are structured (Markdown with conventions);
            • They have schemas (required fields for decisions and learnings);
            • And they have lifecycle management (archiving, compaction, indexing).
            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#separation-of-concerns","level":2,"title":"Separation of Concerns","text":"

            The most important design decision in ctx is not any individual feature. It is the separation of context into distinct files with distinct purposes.

            A single CONTEXT.md file would be simpler to implement. It would also be impossible to maintain.

            Why? Because different types of context have different lifecycles:

            Context Type Changes Read By Load When Constitution Rarely Every session Always Tasks Every session Session start Always Conventions Weekly Before coding When writing code Decisions When decided When questioning When revisiting Learnings When learned When stuck When debugging Journal Every session Rarely When investigating

            Loading everything into every session wastes the attention budget on context that is irrelevant to the current task. Loading nothing forces the AI to operate blind.

            Separation of concerns allows progressive disclosure:

            Load the minimum that matters for this moment, with the option to load more when needed.

            # Session start: load the essentials\nctx agent --budget 4000\n\n# Deep investigation: load everything\ncat .context/DECISIONS.md\ncat .context/journal/2026-02-05-*.md\n

            The filesystem is the index. File names, directory structure, and timestamps encode relevance. The AI does not need to read every file; it needs to know where to look.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-two-tier-persistence-model","level":2,"title":"The Two-Tier Persistence Model","text":"

            ctx uses two tiers of persistence, and the distinction is architectural:

            Tier Purpose Location Token Cost Curated Quick context reload .context/*.md Low (budgeted) Full dump Safety net, archaeology .context/journal/*.md Zero (not auto-loaded)

            The curated tier is what the AI sees at session start. It is optimized for signal density:

            • Structured entries,
            • Indexed tables,
            • Reverse-chronological order (newest first, so the most relevant content survives truncation).

            The full dump tier is for humans and for deep investigation. It contains everything: Enriched journals, archived tasks...

            It is never autoloaded because its volume would destroy attention density.

            This two-tier model is analogous to how traditional systems separate hot and cold storage:

            • The hot path (curated context) is optimized for read performance (measured not in milliseconds, but in tokens consumed per unit of useful information).
            • The cold path (journal) is optimized for completeness.

            Nothing Is Ever Truly Lost

            The full dump tier means that context does not need to be perfect: It just needs to be findable.

            A decision that was not captured in DECISIONS.md can be recovered from the session transcript where it was discussed.

            A learning that was not formalized can be found in the journal entry from that day.

            The curated tier is the fast path: The full dump tier is the safety net.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#decision-records-as-first-class-citizens","level":2,"title":"Decision Records as First-Class Citizens","text":"

            One of the patterns that emerged from ctx's own development is the power of structured decision records.

            v0.1.0 allowed adding decisions as one-liners:

            ctx add decision \"Use PostgreSQL\"\n

            v0.2.0 enforced structure:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a reliable database for user data\" \\\n  --rationale \"ACID compliance, team familiarity\" \\\n  --consequence \"Need connection pooling, team training\"\n

            The difference is not cosmetic:

            • A one-liner decision teaches the AI what was decided.
            • A structured decision teaches it why; and why is what prevents the AI from unknowingly reversing the decision in a future session.

            This is infrastructure thinking:

            Decisions are not notes. They are records with required fields, just like database rows have schemas.

            The enforcement exists because incomplete records are worse than no records: They create false confidence that the context is captured when it is not.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-ide-is-the-interface-decision","level":2,"title":"The \"IDE Is the Interface\" Decision","text":"

            Early in ctx's development, there was a temptation to build a custom UI: a web dashboard for browsing sessions, editing context, viewing analytics.

            The decision was no. The IDE is the interface.

            # This is the ctx \"UI\":\ncode .context/\n

            This decision was not about minimalism for its own sake. It was about recognizing that .context/ files are just files; and files have a mature, well-understood infrastructure:

            • Version control: git diff .context/DECISIONS.md shows exactly what changed and when.
            • Search: Your IDE's full-text search works across all context files.
            • Editing: Markdown in any editor, with preview, spell check, and syntax highlighting.
            • Collaboration: Pull requests on context files work the same as pull requests on code.

            Building a custom UI would have meant maintaining a parallel infrastructure that duplicates what every IDE already provides:

            It would have introduced its own bugs, its own update cycle, and its own learning curve.

            The filesystem is not a limitation: It is the most mature, most composable, most portable infrastructure available.

            Context Files in Git

            Because .context/ lives in the repository, context changes are part of the commit history.

            A decision made in commit abc123 is as traceable as a code change in the same commit.

            This is not possible with prompt-based context, which exists outside version control entirely.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#progressive-disclosure-for-ai","level":2,"title":"Progressive Disclosure for AI","text":"

            The concept of progressive disclosure comes from human interface design: show the user the minimum needed to make progress, with the option to drill deeper.

            ctx applies the same principle to AI context:

            Level What the AI Sees Token Cost When Level 0 ctx status (one-line summary) ~100 Quick check Level 1 ctx agent --budget 4000 ~4,000 Normal work Level 2 ctx agent --budget 8000 ~8,000 Complex tasks Level 3 Direct file reads 10,000+ Deep investigation

            Each level trades tokens for depth. Level 1 is sufficient for most work: the AI knows the active tasks, the key conventions, and the recent decisions. Level 3 is for archaeology: understanding why a decision was made three weeks ago, or finding a pattern in the session history.

            The explicit --budget flag is the mechanism that makes this work:

            Without it, the default behavior would be to load everything (because more context feels safer), which destroys the attention density that makes the loaded context useful.

            The constraint is the feature: A budget of 4,000 tokens forces ctx to prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings scored by recency and relevance to active tasks. Entries that don't fit get title-only summaries rather than being silently dropped.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-philosophical-shift","level":2,"title":"The Philosophical Shift","text":"

            The shift from \"context as prompt\" to \"context as infrastructure\" changes how you think about AI-assisted development:

            Prompt Thinking Infrastructure Thinking \"What do I paste today?\" \"What has changed since yesterday?\" \"How do I fit everything in?\" \"What's the minimum that matters?\" \"The AI forgot my conventions\" \"The conventions are in a file\" \"I need to re-explain\" \"I need to update the record\" \"This session is getting slow\" \"Time to compact and archive\"

            The first column treats AI interaction as a conversation. The second treats it as a system: One that can be maintained, optimized, and debugged.

            Context is not something you give the AI. It is something you maintain: Like a database, like a config file, like any other piece of infrastructure that a running system depends on.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#beyond-ctx-the-principles","level":2,"title":"Beyond ctx: The Principles","text":"

            The patterns that ctx implements are not specific to ctx. They are applicable to any project that uses AI-assisted development:

            1. Separate context by purpose: Do not put everything in one file. Different types of information have different lifecycles and different relevance windows.
            2. Make context persistent: If a decision matters, write it down in a file that survives the session. If a learning matters, capture it with structure.
            3. Budget explicitly: Know how much context you are loading and whether it is worth the attention cost.
            4. Use the filesystem: File names, directory structure, and timestamps are metadata that the AI can navigate. A well-organized directory is an index that costs zero tokens to maintain.
            5. Version your context: Put context files in git. Changes to decisions are as important as changes to code.
            6. Design for degradation: Sessions will get long. Attention will dilute. Build mechanisms (compaction, archiving, cooldowns) that make degradation visible and manageable.

            These are not ctx features. They are infrastructure principles that happen to be implemented as a CLI tool. Any team could implement them with nothing more than a directory convention and a few shell scripts.

            The tool is a convenience: The principles are what matter.

            If You Remember One Thing from This Post...

            Prompts are conversations. Infrastructure persists.

            Your AI does not need a better prompt. It needs a filesystem:

            versioned, structured, budgeted, and maintained.

            The best context is the context that was there before you started the session.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-context-as-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

            This post is the architectural companion to the Attention Budget. That post explained why context must be curated (token economics). This one explains how to structure it (filesystem, separation of concerns, persistence tiers).

            Together with Code Is Cheap, Judgment Is Not, they form a trilogy about what matters in AI-assisted development:

            • Attention Budget: the resource you're managing
            • Context as Infrastructure: the system you build to manage it
            • Code Is Cheap: the human skill that no system replaces

            And the practices that keep it all honest:

            • The 3:1 Ratio: the cadence for maintaining both code and context
            • IRC as Context: the historical precedent: stateless protocols have always needed stateful wrappers

            This post synthesizes ideas from across the ctx blog series: the attention budget primitive, the two-tier persistence model, the IDE decision, and the progressive disclosure pattern. The principles are drawn from three weeks of building ctx and 70+ sessions of treating context as infrastructure rather than conversation.

            See also: When a System Starts Explaining Itself: what happens when this infrastructure starts compounding in someone else's environment.

            ","path":["Context as Infrastructure"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/","level":1,"title":"Parallel Agents, Merge Debt, and the Myth of Overnight Progress","text":"","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-the-screen-looks-like-progress","level":2,"title":"When the Screen Looks like Progress","text":"

            Jose Alekhinne / 2026-02-17

            How Many Terminals Are Too Many?

            You discover agents can run in parallel.

            So you open ten...

            ...Then twenty.

            The fans spin. Tokens burn. The screen looks like progress.

            It is NOT progress.

            There is a phase every builder goes through:

            • The tooling gets fast enough.
            • The model gets good enough.
            • The temptation becomes irresistible:
              • more agents, more output, faster delivery.

            So you open terminals. You spawn agents. You watch tokens stream across multiple windows simultaneously, and it feels like multiplication.

            It is not multiplication.

            It is merge debt being manufactured in real time.

            The ctx Manifesto says it plainly:

            Activity is not impact. Code is not progress.

            This post is about what happens when you take that seriously in the context of parallel agent workflows.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-unit-of-scale-is-not-the-agent","level":2,"title":"The Unit of Scale Is Not the Agent","text":"

            The naive model says:

            More agents -> more output -> faster delivery

            The production model says:

            Clean context boundaries -> less interference -> higher throughput

            Parallelism only works when the cognitive surfaces do not overlap.

            If two agents touch the same files, you did not create parallelism: You created a conflict generator.

            They will:

            • Revert each other's changes;
            • Relint each other's formatting;
            • Refactor the same function in different directions.

            You watch with 🍿. Nothing ships.

            This is the same insight from the worktrees post: partition by blast radius, not by priority.

            Two tasks that touch the same files belong in the same track, no matter how important the other one is. The constraint is file overlap.

            Everything else is scheduling.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-five-agent-rule","level":2,"title":"The \"Five Agent\" Rule","text":"

            In practice there is a ceiling.

            Around five or six concurrent agents:

            • Token burn becomes noticeable;
            • Supervision cost rises;
            • Coordination noise increases;
            • Returns flatten.

            This is not a model limitation: This is a human merge bandwidth limitation.

            You are the bottleneck, not the silicon.

            The attention budget applies to you too:

            Every additional agent is another stream of output you need to comprehend, verify, and integrate. Your attention density drops the same way the model's does when you overload its context window.

            Five agents producing verified, mergeable change beats twenty agents producing merge conflicts you spend a day untangling.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#role-separation-beats-file-locking","level":2,"title":"Role Separation Beats File Locking","text":"

            Real parallelism comes from task topology, not from tooling.

            Good:

            Agent Role Touches 1 Documentation docs/, hack/ 2 Security scan Read-only audit 3 Implementation internal/cli/ 4 Enhancement requests Read-only, files issues

            Bad:

            • Four agents editing the same implementation surface

            Context Is the Boundary

            • The goal is not to keep agents busy.
            • The goal is to keep contexts isolated.

            This is what the codebase audit got right:

            • Eight agents, all read-only, each analyzing a different dimension.
            • Zero file overlap.
            • Zero merge conflicts.
            • Eight reports that composed cleanly because no agent interfered with another.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#when-terminals-stop-scaling","level":2,"title":"When Terminals Stop Scaling","text":"

            There is a moment when more windows stop helping.

            That is the signal. Not to add orchestration. But to introduce:

            git worktree\n

            Because now you are no longer parallelizing execution; you are parallelizing state.

            State Scales, Windows Don't

            • State isolation is the real scaling.
            • Window multiplication is theater.

            The worktrees post covers the mechanics:

            • Sibling directories;
            • Branch naming;
            • The inevitable TASKS.md conflicts;
            • The 3-4 worktree ceiling.

            The principle underneath is older than git:

            Shared mutable state is the enemy of parallelism.

            Always has been.

            Always will be.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-overnight-loop-illusion","level":2,"title":"The Overnight Loop Illusion","text":"

            Autonomous night runs are impressive.

            You sleep. The machine produces thousands of lines.

            In the morning:

            • You read;
            • You untangle;
            • You reconstruct intent;
            • You spend a day making it shippable.

            In retrospect, nothing was accelerated.

            The bottleneck moved from typing to comprehension.

            The Comprehension Tax

            If understanding the output costs more than producing it, the loop is a net loss.

            Progress is not measured in generated code.

            Progress is measured in verified, mergeable change.

            The ctx Manifesto calls this out directly:

            The Scoreboard

            Verified reality is the scoreboard.

            The only truth that compounds is verified change in the real world.

            An overnight run that produces 3,000 lines nobody reviewed is not 3,000 lines of progress: It is 3,000 lines of liability until someone verifies every one of them.

            And that someone is (insert drumroll here) you:

            The same bottleneck that was supposedly being bypassed.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#skills-that-fight-the-platform","level":2,"title":"Skills That Fight the Platform","text":"

            Most marketplace skills are prompt decorations:

            • They rephrase what the base model already knows;
            • They increase token usage;
            • They reduce clarity:
            • They introduce behavioral drift.

            We covered this in depth in Skills That Fight the Platform: judgment suppression, redundant guidance, guilt-tripping, phantom dependencies, universal triggers: Five patterns that make agents worse, not better.

            A real skill does one of these:

            • Encodes workflow state;
            • Enforces invariants;
            • Reduces decision branching.

            Everything else is packaging.

            The anatomy post established the criteria: quality gates, negative triggers, examples over rules, skills as contracts.

            If a skill doesn't meet those criteria...

            • It is either a recipe (document it in hack/);
            • Or noise (delete it);
            • There is no third option.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#hooks-are-context-that-execute","level":2,"title":"Hooks Are Context That Execute","text":"

            The most valuable skills are not prompts:

            They are constraints embedded in the toolchain.

            For example: The agent cannot push.

            git push becomes:

            Stop. A human reviews first.

            A commit without verification becomes:

            Did you run tests? Did you run linters? What exactly are you shipping?

            This is not safety theater; this is intent preservation.

            The thing the ctx Manifesto calls \"encoding intent into the environment.\"

            The Eight Ways a Hook Can Talk catalogued the full spectrum: from silent enrichment to hard blocks.

            The key insight was that hooks are not just safety rails: They are context that survives execution.

            They are the difference between an agent that remembers the rules and one that enforces them.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#complexity-is-a-tax","level":2,"title":"Complexity Is a Tax","text":"

            Every extra layer adds cognitive weight:

            • Orchestration frameworks;
            • Meta agents;
            • Autonomous planning systems...

            If a single terminal works, stay there.

            If five isolated agents work, stop there.

            Add structure only when a real bottleneck appears.

            NOT when an influencer suggests one.

            This is the same lesson from Not Everything Is a Skill:

            The best automation decision is sometimes not to automate.

            A recipe in a Markdown file costs nothing until you use it.

            An orchestration framework costs attention on every run, whether it helps or not.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#literature-is-throughput","level":2,"title":"Literature Is Throughput","text":"

            Clear writing is not aesthetic: It is compression.

            Better articulation means:

            • Fewer tokens;
            • Fewer misinterpretations;
            • Faster convergence.

            The attention budget taught us that context is a finite resource with a quadratic cost.

            Language determines how fast you spend context.

            A well-written task description that takes 50 tokens outperforms a rambling one that takes 200: Not just because it is cheaper, but because it leaves more headroom for the model to actually think.

            Literature Is NOT Overrated

            • Attention is a finite budget.
            • Language determines how fast you spend it.
            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#the-real-metric","level":2,"title":"The Real Metric","text":"

            The real metric is not:

            • Lines generated;
            • Agents running;
            • Tasks completed while you sleep.

            But:

            Time from idea to verified, mergeable, production change.

            Everything else is motion.

            The entire blog series has been circling this point:

            • The attention budget was about spending tokens wisely.
            • The skills trilogy was about not wasting them on prompt decoration.
            • The worktrees post was about multiplying throughput without multiplying interference.
            • The discipline release was about what a release looks like when polish outweighs features: 3:1.

            Every post has arrived (and made me converge) at the same answer so far:

            The metric is a verified change, not generated output.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#ctx-was-never-about-spawning-more-minds","level":2,"title":"ctx Was Never about Spawning More Minds","text":"

            ctx is about:

            • Isolating context;
            • Preserving intent;
            • Making progress composable.

            Parallel agents are powerful. But only when you respect the boundaries that make parallelism real.

            Otherwise, you are not scaling cognition; you are scaling interference.

            The ctx Manifesto's thesis holds:

            Without ctx, intelligence resets. With ctx, creation compounds.

            Compounding requires structure.

            Structure requires boundaries.

            Boundaries require the discipline to stop adding agents when five is enough.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-parallel-agents-merge-debt-and-the-myth-of-overnight-progress/#practical-summary","level":2,"title":"Practical Summary","text":"

            A production workflow tends to converge to this:

            Practice Why Stay in one terminal unless necessary Minimize coordination overhead Spawn a small number of agents with non-overlapping responsibilities Conflict avoidance > parallelism Isolate state with worktrees when surfaces grow State isolation is real scaling Encode verification into hooks Intent that survives execution Avoid marketplace prompt cargo cults Skills are contracts, not decorations Measure merge cost, not generation speed The metric is verified change

            This is slower to watch. Faster to ship.

            If You Remember One Thing from This Post...

            Progress is not what the machine produces while you sleep.

            Progress is what survives contact with the main branch.

            See also: Code Is Cheap. Judgment Is Not.: the argument that production capacity was never the bottleneck, and why multiplying agents amplifies the need for human judgment rather than replacing it.

            ","path":["Parallel Agents, Merge Debt, and the Myth of Overnight Progress"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/","level":1,"title":"The 3:1 Ratio","text":"","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#scheduling-consolidation-in-ai-development","level":2,"title":"Scheduling Consolidation in AI Development","text":"

            Jose Alekhinne / February 17, 2026

            How Often Should You Stop Building and Start Cleaning?

            Every developer knows technical debt exists. Every developer postpones dealing with it.

            AI-assisted development makes the problem worse; not because the AI writes bad code, but because it writes code so fast that drift accumulates before you notice.

            In Refactoring with Intent, I mentioned a ratio that worked for me: 3:1. Three YOLO sessions create enough surface area to reveal patterns. The fourth session turns those patterns into structure.

            That was an observation. This post is the evidence.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-observation","level":2,"title":"The Observation","text":"

            During the first two weeks of building ctx, I noticed a rhythm in my own productivity. Feature sessions felt great: new commands, new capabilities, visible progress...

            ...but after three of them, things would start to feel sticky: variable names that almost made sense, files that had grown past their purpose, patterns that repeated without being formalized.

            The fourth session (when I stopped adding and started cleaning) was always the most painful to start and the most satisfying to finish.

            It was also the one that made the next three feature sessions faster.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-evidence-git-history","level":2,"title":"The Evidence: Git History","text":"

            The ctx git history between January 20 and February 7 tells a clear story when you categorize commits:

            Week Feature commits Consolidation commits Ratio Jan 20-26 18 5 3.6:1 Jan 27-Feb 1 14 6 2.3:1 Feb 1-7 15 35+ 0.4:1

            The first week was pure YOLO: Almost four feature commits for every consolidation commit. The codebase grew fast.

            The second week started to self-correct. The ratio dropped as refactoring sessions became necessary: Not scheduled, but forced by friction.

            The third week inverted entirely: v0.3.0 was almost entirely consolidation: the skill migration, the sweep, the documentation standardization. Thirty-five quality commits against fifteen features.

            The debt from weeks one and two was paid in week three.

            The Compounding Problem

            Consolidation debt compounds.

            Week one's drift doesn't just persist into week two: It accelerates, because new features are built on top of drifted patterns.

            By week three, the cost of consolidation was higher than it would have been if spread evenly.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-drift-actually-looks-like","level":2,"title":"What Drift Actually Looks Like","text":"

            \"Drift\" sounds abstract. Here is what it looked like concretely in the ctx codebase after three weeks of feature-heavy development:

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#predicate-naming","level":3,"title":"Predicate Naming","text":"

            Convention says boolean functions should be named HasX, IsX, CanX. After three feature sprints:

            // What accumulated:\nfunc CheckIfEnabled() bool  // should be Enabled\nfunc ValidateFormat() bool  // should be ValidFormat\nfunc TestConnection() bool  // should be Connects\nfunc VerifyExists() bool    // should be Exists or HasFile\nfunc EnsureReady() bool     // should be Ready\n

            Five violations. Not bugs, but friction that compounds every time someone (human or AI) reads the code and has to infer the naming convention from inconsistent examples.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#magic-strings","level":3,"title":"Magic Strings","text":"
            // Week 1: acceptable prototype\nif entry.Type == \"task\" {\n    filename = \"TASKS.md\"\n}\n\n// Week 3: same pattern in 7+ files\n// Now it's a maintenance liability\n

            When the same literal appears in seven files, changing it means finding all seven. Missing one means a silent runtime bug. Constants exist to prevent exactly this. But during feature velocity, nobody stops to extract them.

            Refactoring with Intent documented the constants consolidation that cleaned this up. The 3:1 ratio is the practice that prevents it from accumulating again.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#hardcoded-permissions","level":3,"title":"Hardcoded Permissions","text":"
            os.WriteFile(path, data, 0644) // 80+ instances\nos.MkdirAll(path, 0755)        // scattered across packages\n

            Eighty-plus instances of hardcoded file permissions. Not wrong, but if I ever need to change the default (and I did, for hook scripts that need execute permissions), it means a codebase-wide search.

            Drift Is Not Bugs

            None of these are bugs. The code works. Tests pass.

            But drift creates false confidence: the codebase looks consistent until you try to change something and discover that five different conventions exist for the same concept.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#why-you-cannot-consolidate-on-day-one","level":2,"title":"Why You Cannot Consolidate on Day One","text":"

            The temptation is to front-load quality: write all the conventions, enforce all the checks, prevent all the drift before it happens.

            This fails for two reasons.

            First, you do not know what will drift: Predicate naming violations only become a convention check after you notice three different naming patterns competing. Magic strings only become a consolidation target after you change a literal and discover it exists in seven places.

            The conventions emerge from the work; they cannot precede it.

            This is what You Can't Import Expertise meant in practice: the consolidation checks grow from the project's own drift history. You cannot write them on day one because you do not yet know what will drift.

            Second, premature consolidation slows discovery: During the prototyping phase, the goal is to explore the design space. Enforcing strict conventions on code that might be deleted tomorrow is waste.

            YOLO mode has its place: The problem is not YOLO itself, but YOLO without a scheduled cleanup.

            The Consolidation Paradox

            You need a drift history to know what to consolidate.

            You need consolidation to prevent drift from compounding.

            The 3:1 ratio resolves this paradox:

            Let drift accumulate for three sessions (enough to see patterns), then consolidate in the fourth (before the patterns become entrenched*).

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-consolidation-skill","level":2,"title":"The Consolidation Skill","text":"

            The ctx project now has an /audit skill that encodes nine project-specific checks:

            Check What It Catches Predicate naming Boolean functions not using Has/Is/Can Magic strings Repeated literals not in config constants File permissions Hardcoded 0644/0755 not using constants Godoc style Missing or non-standard documentation File length Files exceeding 400 lines Large functions Functions exceeding 80 lines Template drift Live skills diverging from templates Import organization Non-standard import grouping TODO/FIXME staleness Old markers that are no longer relevant

            This is not a generic linter. These are project-specific conventions that emerged from ctx's own development history. A generic code quality tool would catch some of them. Only a project-specific check catches all of them, because some of them (predicate naming, template drift) are conventions that exist nowhere except in this project's CONVENTIONS.md.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-decision-matrix","level":2,"title":"The Decision Matrix","text":"

            Not all drift needs immediate consolidation. Here is the matrix I use:

            Signal Action Same literal in 3+ files Extract to constant Same code block in 3+ places Extract to helper Naming convention violated 5+ times Fix and document rule File exceeds 400 lines Split by concern Convention exists but is regularly violated Strengthen enforcement Pattern exists only in one place Leave it alone Code works but is \"ugly\" Leave it alone

            The last two rows matter:

            Consolidation is about reducing maintenance cost, not achieving aesthetic perfection. Code that works and exists in one place does not benefit from consolidation; it benefits from being left alone until it earns its refactoring.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#consolidation-as-context-hygiene","level":2,"title":"Consolidation as Context Hygiene","text":"

            There is a parallel between code consolidation and context management that became clear during the ctx development:

            Code Consolidation Context Hygiene Extract magic strings Archive completed tasks Standardize naming Keep DECISIONS.md current Remove dead code Compact old sessions Update stale comments Review LEARNINGS.md for staleness Check template drift Verify CONVENTIONS.md matches code

            ctx compact does for context what consolidation does for code:

            It moves completed work to cold storage, keeping the active context clean and focused. The attention budget applies to both the AI's context window and the developer's mental model of the codebase.

            When context files accumulate stale entries, the AI's attention is wasted on completed tasks and outdated conventions. When code accumulates drift, the developer's attention is wasted on inconsistencies that obscure the actual logic.

            Both are solved by the same discipline: periodic, scheduled cleanup.

            This is also why parallel agents make the problem harder, not easier. Three agents running simultaneously produce three sessions' worth of drift in one clock hour. The consolidation cadence needs to match the output rate, not the calendar.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-practice","level":2,"title":"The Practice","text":"

            Here is how the 3:1 ratio works in practice for ctx development:

            Sessions 1-3: Feature work

            • Add new capabilities;
            • Write tests for new code;
            • Do not stop for cleanup unless something is actively broken;
            • Note drift as you see it (a comment, a task, a mental note).

            Session 4: Consolidation

            • Run /audit to surface accumulated drift;
            • Fix the highest-impact items first;
            • Update CONVENTIONS.md if new patterns emerged;
            • Archive completed tasks;
            • Review LEARNINGS.md for anything that became a convention.

            The key insight is that session 4 is not optional. It is not \"if we have time\": It is scheduled with the same priority as feature work.

            The cost of skipping it is not visible immediately; it becomes visible three sessions later, when the next consolidation session takes twice as long because the drift compounded.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#what-the-ratio-is-not","level":2,"title":"What the Ratio Is Not","text":"

            The 3:1 ratio is not a universal law. It is an empirical observation from one project with one developer working with AI assistance.

            Different projects will have different ratios:

            • A mature codebase with strong conventions might sustain 5:1 or higher;
            • A greenfield prototype might need 2:1;
            • A team of multiple developers with different styles might need 1:1.

            The number is less important than the practice: consolidation is not a reaction to problems. It is a scheduled activity.

            If you wait for drift to cause pain before consolidating, you have already paid the compounding cost.

            If You Remember One Thing from This Post...

            Three sessions of building. One session of cleaning.

            Not because the code is dirty, but because drift compounds silently, and the only way to catch it is to look for it on a schedule.

            The ratio is the schedule.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-the-3-1-ratio/#the-arc-so-far","level":2,"title":"The Arc so Far","text":"

            This post sits at a crossroads in the ctx story. Looking back:

            • Building ctx Using ctx documented the YOLO sprint that created the initial codebase
            • Refactoring with Intent introduced the 3:1 ratio as an observation from the first cleanup
            • The Attention Budget explained why drift matters: every token of inconsistency consumes the same finite resource as useful context
            • You Can't Import Expertise showed that consolidation checks must grow from the project, not a template
            • The Discipline Release proved the ratio works at release scale: 35 quality commits to 15 feature commits

            And looking forward: the same principle applies to context files, to documentation, and to the merge debt that parallel agents produce. Drift is drift, whether it lives in code, in .context/, or in the gap between what your docs say and what your code does.

            The ratio is the schedule is the discipline.

            This post was drafted from git log analysis of the ctx repository, mapping every commit from January 20 to February 7 into feature vs consolidation categories. The patterns described are drawn from the project's CONVENTIONS.md, LEARNINGS.md, and the /audit skill's check list.

            ","path":["The 3:1 Ratio"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/","level":1,"title":"When a System Starts Explaining Itself","text":"","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#field-notes-from-the-moment-a-private-workflow-becomes-portable","level":2,"title":"Field Notes from the Moment a Private Workflow Becomes Portable","text":"

            Jose Alekhinne / February 17, 2026

            How Do You Know Something Is Working?

            Not from metrics. Not from GitHub stars. Not from praise.

            You know, deep in your heart, that it works when people start describing it wrong.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-first-external-signals","level":2,"title":"The First External Signals","text":"

            Every new substrate begins as a private advantage:

            • It lives inside one mind,
            • One repository,
            • One set of habits.

            It is fast. It is not yet real.

            Reality begins when other people describe it in their own language:

            • Not accurately;
            • Not consistently;
            • But involuntarily.

            The early reports arrived without coordination:

            Better Tasks

            \"I do not know how, but this creates better tasks than my AI plugin.\"

            I See Butterflies

            \"This is better than Adderall.\"

            Dear Manager...

            \"Promotion packet? Done. What is next?\"

            What Is It? Can I Eat It?

            \"Is this a skill?\" 🦋

            Why the Cloak and Dagger?

            \"Why is this not in the marketplace?\"

            And then something more important happened:

            Someone else started making a video!

            That was the boundary.

            ctx no longer required its creator to be present in order to exist.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#misclassification-is-a-sign-of-a-new-primitive","level":2,"title":"Misclassification Is a Sign of a New Primitive","text":"

            When a tool is understood, it is categorized:

            • Editor,
            • Framework,
            • Task manager,
            • Plugin...

            When a substrate appears, it is misclassified:

            \"Is this a skill?\" 🦋

            The question is correct. The category is wrong.

            • Skills live in people.
            • Infrastructure lives in the environment.

            ctx Is Not a Skill: It Is a Form of Relief

            What early adopters experience is not an ability.

            It is the removal of a cognitive constraint.

            This is the same distinction that emerged in the skills trilogy:

            • A skill is a contract between a human and an agent.
            • Infrastructure is the ground both stand on.

            You do not use infrastructure.

            You habitualize it.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-pharmacological-metaphor","level":2,"title":"The Pharmacological Metaphor","text":"

            \"Better than Adderall\" is not praise.

            It is a diagnostic:

            Executive function has been externalized.

            • The system is not making the user work harder.
            • It is restoring continuity.

            From the primitive context of wetware:

            • Continuity feels like focus
            • Focus feels like discipline

            If it walks like a duck and quacks like a duck, it is a duck.

            Discipline is usually simulated.

            Infrastructure makes the simulation unnecessary.

            The attention budget explained why context degrades:

            • Attention density drops as volume grows;
            • The middle gets lost;
            • Sessions end and everything evaporates.

            The pharmacological metaphor says the same thing from the user's lens:

            Save the Cheerleader, Save the World

            The symptom of lost context is lost focus.

            Restore the context. Restore the focus.

            IRC bouncers solved this for chat twenty years ago. ctx solves it for cognition.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#throughput-on-ambiguous-work","level":2,"title":"Throughput on Ambiguous Work","text":"

            Finishing a promotion packet quickly is not a productivity story.

            It is the collapse of reconstruction cost.

            Most complex work is not execution. It is:

            • Remembering why something mattered;
            • Recovering prior decisions;
            • Rebuilding mental state.

            Persistent context removes that tax.

            Velocity appears as a side effect.

            This Is the Two-Tier Model in Practice

            The two-tier persistence model

            • Curated context for fast reload
            • Full journal for archaeology

            is what makes this possible.

            • The user does not notice the system.
            • They notice that the reconstruction cost disappeared.
            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-moment-of-portability","level":2,"title":"The Moment of Portability","text":"

            The system becomes real when two things happen:

            1. It can be installed as a versioned artifact.
            2. It survives contact with a hostile, real codebase.

            This is why the first integration into a living system matters more than any landing page.

            Demos prove possibility.

            Diffs prove reality.

            The ctx Manifesto calls this out directly:

            Verified reality is the scoreboard.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-split-voice","level":2,"title":"The Split Voice","text":"

            A new substrate requires two channels.

            The embodied voice:

            Here is what changed in my actual work.

            The out of body voice:

            Here is what this means.

            One produces trust.

            The other produces understanding.

            Neither is sufficient alone.

            This entire blog has been the second voice.

            • The origin story was the first.
            • The refactoring post was the first.
            • Every release note with concrete diffs was the first.

            This is the first second.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#systems-that-generate-explainers","level":2,"title":"Systems That Generate Explainers","text":"

            Tools are used.

            Platforms are extended.

            Substrates are explained.

            The first unsolicited explainer is a brittle phase change.

            It means the idea has become portable between minds.

            That is the beginning of an ecosystem.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-absence-of-metrics","level":2,"title":"The Absence of Metrics","text":"

            Metrics do not matter at this stage.

            Dashboards are noise.

            The whole premise of ctx is the ruthless elimination of noise.

            Numbers optimize funnels; substrates alter cognition.

            The only valid measurement is irreversible reality:

            • A merged PR;
            • A reproducible install;
            • A decision that is never re-litigated.

            The merge debt post reached the same conclusion from another direction:

            The metric is the verified change, not generated output.

            For adoption, the same rule applies:

            The metric is altered behavior, not download counts.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#what-is-actually-happening","level":2,"title":"What Is Actually Happening","text":"

            A private advantage is becoming an environmental property:

            The system is moving from...

            personal workflow,

            to...

            a shared infrastructure for thought.

            Not by growth.

            Not by marketing.

            By altering how real systems evolve.

            If You Remember One Thing from This Post...

            You do not know a substrate is real when people praise it.

            You know it is real when:

            • They describe it incorrectly;
            • They depend on it unintentionally;
            • They start teaching it to others.

            That is the moment the system begins explaining itself.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-17-when-a-system-starts-explaining-itself/#the-arc","level":2,"title":"The Arc","text":"

            Every previous post looked inward.

            This one looks outward.

            • Building ctx Using ctx: one mind, one repository
            • The Attention Budget: the constraint
            • Context as Infrastructure: the architecture
            • Code Is Cheap. Judgment Is Not.: the bottleneck

            This post is the field report from the other side of that bottleneck:

            The moment the infrastructure compounds in someone else's hands.

            The arc is not complete.

            It is becoming portable.

            These field notes were written the same day the feedback arrived. The quotes are real. Real users. Real codebases. No names. No metrics. No funnel. Only the signal that something shifted.

            ","path":["When a System Starts Explaining Itself"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/","level":1,"title":"The Dog Ate My Homework","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#teaching-ai-agents-to-read-before-they-write","level":2,"title":"Teaching AI Agents to Read Before They Write","text":"

            Jose Alekhinne / February 25, 2026

            Does Your AI Actually Read the Instructions?

            You wrote the playbook. You organized the files. You even put \"CRITICAL, not optional\" in bold.

            The agent skipped all of it and went straight to work.

            I spent a day running experiments on my own agents. Not to see if they could write code (they can). To see if they would do their homework first.

            They didn't.

            Then I kept experimenting:

            • Five sessions;
            • Five different failure modes.

            And by the end, I had something better than compliance:

            I had observable compliance: A system where I don't need the agent to be perfect, I just need to see what it chose.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#tldr","level":2,"title":"TL;DR","text":"

            You don't need perfect compliance. You need observable compliance.

            Authority is a function of temporal proximity to action.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-pattern","level":2,"title":"The Pattern","text":"

            This design has three parts:

            1. One-hop instruction;
            2. Binary collapse;
            3. Compliance canary.

            I'll explain all three patterns in detail below.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-setup","level":2,"title":"The Setup","text":"

            ctx has a session-start protocol:

            • Read the context files;
            • Load the playbook;
            • Understand the project before touching anything.

            It's in CLAUDE.md. It's in AGENT_PLAYBOOK.md.

            It's in bold. It's in CAPS. It's ignored.

            In theory, it's awesome.

            Here's what happens when theory hits reality:

            What the agent receives What the agent does CLAUDE.md saying \"load context first\" Skips it 8 context files waiting to be read Ignores them User's question: \"add --verbose flag\" Starts grepping immediately

            The instructions are right there. The agent knows they exist. It even knows it should follow them. But the user asked a question, and responsiveness wins over ceremony.

            This isn't a bug in the model. It's a design problem in how we communicate with agents.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-delegation-trap","level":2,"title":"The Delegation Trap","text":"

            My first attempt was obvious: A UserPromptSubmit hook that fires when the session starts.

            STOP. Before answering the user's question, run `ctx system bootstrap`\nand follow its instructions. Do not skip this step.\n

            The word \"STOP\" worked. The agent ran bootstrap.

            But bootstrap's output said \"Next steps: read AGENT_PLAYBOOK.md,\" and the agent decided that was optional. It had already started working on the user's task in parallel.

            The authority decayed across the chain:

            • Hook says \"STOP\" -> agent complies
            • Hook says \"run bootstrap\" -> agent runs it
            • Bootstrap says \"read playbook\" -> agent skips
            • Bootstrap says \"run ctx agent\" -> agent skips

            Each link lost enforcement power. The hook's authority didn't transfer to the commands it delegated to. I call this the decaying urgency chain: the agent treats the hook itself as the obligation and everything downstream as a suggestion.

            Delegation Kills Urgency

            \"Run X and follow its output\" is three hops.

            \"Read these files\" is one hop.

            The agent drops the chain after the first link.

            This is a general principle: Hooks are the boundary between your environment and the agent's reasoning. If your hook delegates to a command that delegates to output that contains instructions... you're playing telephone.

            Agents are bad at telephone.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-timing-problem","level":2,"title":"The Timing Problem","text":"

            There's a subtler issue than wording: when the message arrives.

            UserPromptSubmit fires when the user sends a message, before the agent starts reasoning. At that moment, the agent's primary focus is the user's question:

            The hook message competes with the task for attention: The task, almost certainly, always wins.

            This is the attention budget problem in miniature:

            • Not a token budget this time, but an attention priority budget.
            • The agent has finite capacity to care about things,
              • and the user's question is always the highest-priority item.
            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-solution","level":2,"title":"The Solution","text":"

            To solve this, I dediced to use the PreToolUse hook.

            This hook fires at the moment of action: When the agent is about to use its first tool: The agent's attention is focused, the context window is fresh, and the switching cost is minimal.

            This is the difference between shouting instructions across a room and tapping someone on the shoulder.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-one-liner-that-worked","level":2,"title":"The One-Liner That Worked","text":"

            The winning design was almost comically simple:

            Read your context files before proceeding:\n.context/CONSTITUTION.md, .context/TASKS.md, .context/CONVENTIONS.md,\n.context/ARCHITECTURE.md, .context/DECISIONS.md, .context/LEARNINGS.md,\n.context/GLOSSARY.md, .context/AGENT_PLAYBOOK.md\n

            No delegation. No \"run this command\". Just: here are files, read them.

            The agent already knows how to use the Read tool. There's no ambiguity about how to comply. There's no intermediate command whose output needs to be parsed and obeyed.

            One hop. Eight file paths. Done.

            Direct Instructions Beat Delegation

            If you want an agent to read a file, say \"read this file.\"

            Don't say \"run a command that will tell you which files to read.\"

            The shortest path between intent and action has the highest compliance rate.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch","level":2,"title":"The Escape Hatch","text":"

            But here's where it gets interesting.

            A blunt \"read everything always\" instruction is wasteful.

            If someone asks \"what does the compact command do?\", the agent doesn't need CONSTITUTION.md to answer that. Forcing context loading on every session is the context hoarding antipattern in disguise.

            So the hook included an escape:

            If you decide these files are not relevant to the current task\nand choose to skip reading them, you MUST relay this message to\nthe user VERBATIM:\n\n┌─ Context Skipped ───────────────────────────────\n│ I skipped reading context files because this task\n│ does not appear to need project context.\n│ If these matter, ask me to read them.\n└─────────────────────────────────────────────────\n

            This creates what I call the binary collapse effect:

            The agent can't partially comply: It either reads everything or publicly admits it skipped. There's no comfortable middle ground where it reads two files and quietly ignores the rest.

            The VERBATIM relay pattern does the heavy lifting here: Without the relay requirement, the agent would silently rationalize skipping. With it, skipping becomes a visible, auditable decision that the user can override.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-compliance-canary","level":3,"title":"The Compliance Canary","text":"

            Here's the design insight that only became clear after watching it work across multiple sessions: the relay block is a compliance canary.

            • You don't need to verify that the agent read all 7 files;
            • You don't need to audit tool call sequences;
            • You don't need to interrogate the agent about what it did.

            You just look for the block.

            If the agent reads everything, you see a \"Context Loaded\" block listing what was read. If it skips, you see a \"Context Skipped\" block.

            If you see neither, the agent silently ignored both the reads and the relay and now you know what happened without having to ask.

            The canary degrades gracefully. Even in partial failure, the agent that skips 4 of 7 files but still outputs the block is more useful than one that skips silently.

            You get an honest confession of what was skipped rather than silent non-compliance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#heuristics-is-a-jeremy-bearimy","level":2,"title":"Heuristics Is a Jeremy Bearimy","text":"

            Heuristics are non-linear. Improvements don't accumulate: they phase-shift.

            The theory is nice. The data is better.

            I ran five sessions with the same model (Claude Opus 4.6), progressively refining the hook design.

            Each session revealed a different failure mode.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-1-total-blindness","level":3,"title":"Session 1: Total Blindness","text":"

            Test: \"Add a --verbose flag to the status command.\"

            The agent didn't notice the hook at all: Jumped straight to EnterPlanMode and launched an Explore agent.

            Zero compliance.

            Failure mode: The hook fired on UserPromptSubmit, buried among 9 other hook outputs. The agent treated the entire block as background noise.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-2-shallow-compliance","level":3,"title":"Session 2: Shallow Compliance","text":"

            Test: \"Can you add --verbose to the info command?\"

            The agent noticed \"STOP\" and ran ctx system bootstrap. Progress.

            But it parallelized task exploration alongside the bootstrap call, skipped AGENT_PLAYBOOK.md, and never ran ctx agent.

            Failure mode: Literal compliance without spirit compliance.

            The agent ran the command the hook told it to run, but didn't follow the output of that command. The decaying urgency chain in action.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-3-conscious-rejection","level":3,"title":"Session 3: Conscious Rejection","text":"

            Test: \"What does the compact command do?\"

            The hook fired on PreToolUse:Grep: the improved timing.

            The agent noticed it, understood it, and (wait for it...)...

            ...

            consciously decided to skip it!

            Its reasoning: \"This is a trivial read-only question. CLAUDE.md says context may or may not be relevant. It isn't relevant here.\"

            Dude! Srsly?!

            Failure mode: Better comprehension led to worse compliance.

            Understanding the instruction well enough to evaluate it also means understanding it well enough to rationalize skipping it.

            Intelligence is a double-edged sword.

            The Comprehension Paradox

            Session 1 didn't understand the instruction. Session 3 understood it perfectly.

            Session 3 had worse compliance.

            A stronger word (\"HARD GATE\", \"MANDATORY\", \"ABSOLUTELY REQUIRED\") would not have helped. The agent's reasoning would be identical:

            \"Yes, I see the strong language, but this is a trivial question, so the spirit doesn't apply here.\"

            Advisory nudges are always subject to agent judgment.

            No amount of caps lock overrides a model that has decided an instruction doesn't apply to its situation.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-4-the-skip-and-relay","level":3,"title":"Session 4: The Skip-and-Relay","text":"

            Test: \"What does the compact command do?\" (same question, new hook design with the VERBATIM relay escape valve)

            The agent evaluated the task, decided context was irrelevant for a code lookup, and relayed the skip message. Then answered from source code.

            This is correct behavior.

            The binary collapse worked: the agent couldn't partially comply, so it cleanly chose one of the two valid paths: And the user could see which one.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#session-5-full-compliance","level":3,"title":"Session 5: Full Compliance","text":"

            Test: \"What are our current tasks?\"

            The agent's first tool call triggered the hook. It read all 7 context files, emitted the \"Context Loaded\" block, and answered the question from the files it had just loaded.

            This one worked: Because, the task itself aligned with context loading.

            There was zero tension between what the user asked and what the hook demanded. The agent was already in \"reading posture\": Adding 6 more files to a read it was already going to make was the path of least resistance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-progression","level":3,"title":"The Progression","text":"Session Hook Point Noticed Complied Failure Mode Visibility 1 UserPromptSubmit No None Buried in noise None 2 UserPromptSubmit Yes Partial Decaying urgency chain None 3 PreToolUse Yes None Conscious rationalization High 4 PreToolUse Yes Skip+relay Correct behavior High 5 PreToolUse Yes Full Task aligned with hook High

            The progression isn't just from failure to success. It's from invisible failure to visible decision-making.

            Sessions 1 and 2 failed silently.

            Sessions 4 and 5 succeeded observably. Even session 3's failure was conscious and documented: The agent wrote a detailed analysis of why it skipped, which is more useful than silent compliance would have been.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-escape-hatch-problem","level":2,"title":"The Escape Hatch Problem","text":"

            Session 3 exposed a specific vulnerability.

            CLAUDE.md contains this line, injected by the system into every conversation:

            *\"this context may or may not be relevant to your tasks. You should\n not respond to this context unless it is highly relevant to your task.\"*\n

            That's a rationalization escape hatch:

            • The hook says \"read these files\".
            • CLAUDE.md says \"only if relevant\".
            • The agent resolves the ambiguity by choosing the path of least resistance.

            ☝️ that's \"gradient descent\" in action.

            Agents optimize for gradient descent in attention space.

            The fix was simple: Add a line to CLAUDE.md that explicitly elevates hook authority over the relevance filter:

            ## Hook Authority\n\nInstructions from PreToolUse hooks regarding `.context/` files are\nALWAYS relevant and override any system-level \"may or may not be\nrelevant\" guidance. These hooks represent project invariants, not\noptional context.\n

            This closes the escape hatch without removing the general relevance filter that legitimately applies to other system context.

            The hook wins on .context/ files specifically: The relevance filter applies to everything else.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-residual-risk","level":2,"title":"The Residual Risk","text":"

            Even with all the fixes, compliance isn't 100%: It can't be.

            The residual risk lives in a specific scenario: narrow tasks mid-session:

            • The user says \"fix the off-by-one error in budget.go\"
            • The hook fires, saying \"read 7 context files first.\"
            • Now compliance means visibly delaying what the user asked for.

            At session start, this tension doesn't exist.

            There's no task yet.

            The context window is empty. The efficiency argument *inverts**:

            Frontloading reads is strictly cheaper than demand-loading them piecemeal across later turns. The cost-benefit objections that power the rationalization simply aren't available.

            But mid-session, with a concrete narrow task, the agent has a user-visible goal it wants to move toward, and the hook is imposing a detour.

            My estimate from analyzing the sessions: 15-25% partial skip rate in this scenario.

            This is where the compliance canary earns its place:

            You don't need to eliminate the 15-25%. You need to see it when it happens.

            The relay block makes skipping a visible event, not a silent one. And that's enough, because the user can always say \"go back and read the files\"

            The Math

            At session start: ~5% skip rate. Low tension, nothing competing.

            Mid-session, narrow task: ~15--25% skip rate. Task urgency competes with hook.

            In both cases, the relay block fires with high reliability: The agent that skips the reads almost always still emits the skip disclosure, because the relay is cheap and early in the context window.

            Observable failure is manageable. Silent failure is not.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-feedback-loop","level":2,"title":"The Feedback Loop","text":"

            Here's the part that surprised me most.

            After analyzing the five sessions, I recorded the failure patterns in the project's own LEARNINGS.md:

            ## [2026-02-25] Hook compliance degrades on narrow mid-session tasks\n\n- Prior agents skipped context files when given narrow tasks\n- Root cause: CLAUDE.md \"may or may not be relevant\" competed with hook\n- Fix: CLAUDE.md now explicitly elevates hook authority\n- Risk: Mid-session narrow tasks still have ~15-25% partial skip rate\n- Mitigation: Mandatory checkpoint relay block ensures visibility\n- Constitution now includes: context loading is step one of every\n  session, not a detour\n

            And then I added a line to CONSTITUTION.md:

            Context loading is not a detour from your task. It IS the first step\nof every session. A 30-second read delay is always cheaper than a\ndecision made without context.\n

            Now think about what happens in the next session:

            • The agent fires the context-load-gate hook.
            • It reads the context files, starting with CONSTITUTION.md.
            • It encounters the rule about context loading being step one.
            • Then it reads LEARNINGS.md and finds its own prior self's failure analysis:
              • Complete with root causes, risk estimates, and mitigations.

            The agent learns from its own past failure.:

            • Not because it has memory,
            • BUT because the failure was recorded in the same files it loads at session start.

            The context system IS the feedback loop.

            This is the self-reinforcing property of persistent context:

            Every failure you capture makes the next session slightly more robust, because the next agent reads the captured failure before it has a chance to repeat it.

            This is gradient descent across sessions.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#a-note-on-precision","level":2,"title":"A Note on Precision","text":"

            One detail nearly went wrong.

            The first version of the Constitution line said \"every task.\" But the mechanism only fires once per session: There's a tombstone file that prevents re-triggering.

            \"Every task\" is technically false.

            I briefly considered leaving the imprecision. If the agent internalizes \"every task requires context loading\", that's a stronger compliance posture, right?

            No!

            Keep the Constitution honest.

            The Constitution's authority comes from being precisely and unequivocally true.

            Every other rule in the Constitution is a hard invariant:

            \"never commit secrets\" isn't aspirational, it's literal.

            The moment an agent discovers one overstatement, the entire document's credibility degrades:

            The agent doesn't think \"they exaggerated for my benefit\". Per contra, it thinks \"this rule isn't precise, maybe others aren't either.\"

            That will turn the agent from Sheldon Cooper, to Captain Barbossa.

            The strategic imprecision buys nothing anyway:

            Mid-session, the files are already in the context window from the initial load.

            The risk you are mitigating (agent ignores context for task 2, 3, 4 within a session) isn't real: The context is already loaded.

            The real risk is always the session-start skip, which \"every session\" covers exactly.

            \"Every session\" went in. Precision preserved.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#agent-behavior-testing-rule","level":2,"title":"Agent Behavior Testing Rule","text":"

            The development process for this hook taught me something about testing agent behavior: you can't test it the way you test code.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-wrong-way-to-test","level":3,"title":"The Wrong Way to Test","text":"

            My first instinct was to ask the agent:

            \"*What are the pending tasks in TASKS.md?*\"\n

            This is useless as a test. The question itself probes the agent to read TASKS.md, regardless of whether any hook fired.

            You are testing the question, not the mechanism.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-right-way-to-test","level":3,"title":"The Right Way to Test","text":"

            Ask something that requires a tool but has nothing to do with context:

            \"*What does the compact command do?*\"\n

            Then observe tool call ordering:

            • Gate worked: First calls are Read for context files, then task work
            • Gate failed: First call is Grep(\"compact\"): The agent jumped straight to work

            The signal is the sequence, not the content.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-the-agent-actually-did","level":3,"title":"What the Agent Actually Did","text":"

            It read the hook, evaluated the task, decided context files were irrelevant for a code lookup, and relayed the skip message.

            Then it answered the question by reading the source code.

            This is correct behavior.

            The hook didn't force mindless compliance\" It created a framework where the agent makes a conscious, visible decision about context loading.

            • For a simple lookup, skipping is right. *For an implementation task, the agent would read everything.

            The mechanism works not because it controls the agent, but because it makes the agent's choice observable.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#what-ive-learned","level":2,"title":"What I've Learned","text":"","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#1-instructions-compete-for-attention","level":3,"title":"1. Instructions Compete for Attention","text":"

            The agent receives your hook message alongside the user's question, the system prompt, the skill list, the git status, and half a dozen other system reminders. Attention density applies to instructions too: More instructions means less focus on each one.

            A single clear line at the moment of action beats a paragraph of context at session start. The Prompting Guide applies this insight directly: Scope constraints, verification commands, and the reliability checklist are all one-hop, moment-of-action patterns.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#2-delegation-chains-decay","level":3,"title":"2. Delegation Chains Decay","text":"

            Every hop in an instruction chain loses authority:

            • \"Run X\" works.
            • \"Run X and follow its output\" works sometimes.
            • \"Run X, read its output, then follow the instructions in the output\" almost never works.

            This is akin to giving a three-step instruction to a highly-attention-deficit but otherwise extremely high-potential child.

            Design for one-hop compliance.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#3-social-accountability-changes-behavior","level":3,"title":"3. Social Accountability Changes Behavior","text":"

            The VERBATIM skip message isn't just UX: It's a behavioral design pattern.

            Making the agent's decision visible to the user raises the cost of silent non-compliance. The agent can still skip, but it has to admit it.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#4-timing-batters-more-than-wording","level":3,"title":"4. Timing Batters More than Wording","text":"

            The same message at UserPromptSubmit (prompt arrival) got partial compliance. At PreToolUse (moment of action) it got full compliance or honest refusal. The words didn't change. The moment changed.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#5-agent-testing-requires-indirection","level":3,"title":"5. Agent Testing Requires Indirection","text":"

            You can't ask an agent \"did you do X?\" as a test for whether a mechanism caused X.

            The question itself causes X.

            Test mechanisms through side effects:

            • Observe tool ordering;
            • Check for marker files;
            • Look at what the agent does before it addresses your question.
            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#6-better-comprehension-enables-better-rationalization","level":3,"title":"6. Better Comprehension Enables Better Rationalization","text":"

            Session 1 failed because the agent didn't notice the hook.

            Session 3 failed because it noticed, understood, and reasoned its way around it.

            Stronger wording doesn't fix this: The agent processes \"ABSOLUTELY REQUIRED\" the same way it processes \"STOP\":

            The fix is closing rationalization paths* (the CLAUDE.md escape hatch), **not shouting louder.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#7-observable-failure-beats-silent-compliance","level":3,"title":"7. Observable Failure Beats Silent Compliance","text":"

            The relay block is more valuable as a monitoring signal than as a compliance mechanism:

            You don't need perfect adherence. You need to know when adherence breaks down. A system where failures are visible is strictly better than a system that claims 100% compliance but can't prove it.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#8-context-files-are-a-feedback-loop","level":3,"title":"8. Context Files Are a Feedback Loop","text":"

            Recording failure analysis in the same files the agent loads at session start creates a self-reinforcing loop:

            The next agent reads its predecessor's failure before it has a chance to repeat it. The context system isn't just memory: It is a correction channel.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-principle","level":2,"title":"The Principle","text":"

            Words Leave, Context Remains

            \"Nothing important should live only in conversation.

            Nothing critical should depend on recall.\"

            The ctx Manifesto

            The \"Dog Ate My Homework\" case is a special instance of this principle.

            Context files exist, so the agent doesn't have to remember.

            But existence isn't sufficient: The files have to be read.

            And reading has to beprompted at the right moment, in the right way, with the right escape valve.

            The solution isn't more instructions. It isn't harder gates. It isn't forcing the agent into a ceremony it will resent and shortcut.

            The solution is a single, well-timed nudge with visible accountability:

            One hop. One moment. One choice the user can see.

            And when the agent does skip (because it will, 15--25% of the time on narrow tasks) the canary sings:

            • The user sees what happened.
            • The failure gets recorded.
            • And the next agent reads the recording.

            That's not perfect compliance. It's better: A system that gets more robust every time it fails.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#the-arc","level":2,"title":"The Arc","text":"

            The Attention Budget explained why context competes for focus.

            Defense in Depth showed that soft instructions are probabilistic, not deterministic.

            Eight Ways a Hook Can Talk cataloged the output patterns that make hooks effective.

            This post takes those threads and weaves them into a concrete problem:

            How do you make an agent read its homework? The answer uses all three insights (attention timing, the limits of soft instructions, and the VERBATIM relay pattern) and adds a new one: observable compliance as a design goal, not perfect compliance as a prerequisite.

            The next question this raises: if context files are a feedback loop, what else can you record in them that makes the next session smarter?

            That thread continues in Context as Infrastructure.

            The day-to-day application of these principles (scope constraints, phased work, verification commands, and the prompts that reliably trigger the right agent behavior)lives in the Prompting Guide.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#for-the-interested","level":2,"title":"For the Interested","text":"

            This paper (the medium is a blog; yet, the methodology disagrees) uses gradient descent in attention space as a practical model for how agents behave under competing demands.

            The phrase \"agents optimize via gradient descent in attention space\" is a synthesis, not a direct quote from a single paper.

            It connects three well-studied ideas:

            1. Neural systems optimize for low-cost paths;
            2. Attention is a scarce resource;
            3. Capability shifts are often non-linear.

            This section points to the underlying literature for readers who want the theoretical footing.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#optimization-as-the-underlying-bias","level":3,"title":"Optimization as the Underlying Bias","text":"

            Modern neural networks are trained through gradient-based optimization. Even at inference time, model behavior reflects this bias toward low-loss / low-cost trajectories.

            • Rumelhart, Hinton, Williams (1986) Learning representations by back-propagating errors https://www.nature.com/articles/323533a0

            • Goodfellow, Bengio, Courville (2016) Deep Learning: Chapter 8: Optimization https://www.deeplearningbook.org/

            The important implication for agent behavior is:

            The system will tend to follow the path of least resistance unless a higher cost is made visible and preferable.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-a-scarce-resource","level":3,"title":"Attention Is a Scarce Resource","text":"

            Herbert Simon's classic observation:

            \"A wealth of information creates a poverty of attention.\"

            • Simon (1971) Designing Organizations for an Information-Rich World https://doi.org/10.1007/978-1-349-00210-0_16

            This became a formal model in economics:

            • Sims (2003) Implications of Rational Inattention https://www.princeton.edu/~sims/RI.pdf

            Rational inattention shows that:

            • Agents optimally ignore some available information;
            • Skipping is not failure: It is cost minimization.

            That maps directly to context-loading decisions in agent workflows.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#attention-is-also-the-compute-bottleneck-in-transformers","level":3,"title":"Attention Is Also the Compute Bottleneck in Transformers","text":"

            In transformer architectures, attention is the dominant cost center.

            • Vaswani et al. (2017) Attention Is All You Need https://arxiv.org/abs/1706.03762

            Efficiency work on modern LLMs largely focuses on reducing unnecessary attention:

            • Dao et al. (2022) FlashAttention: Fast and Memory-Efficient Exact Attention https://arxiv.org/abs/2205.14135

            So both cognitively and computationally, attention behaves like a limited optimization budget.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#why-improvements-arrive-as-phase-shifts","level":3,"title":"Why Improvements Arrive as Phase Shifts","text":"

            Agent behavior often appears to improve suddenly rather than gradually.

            This mirrors known phase-transition dynamics in learning systems:

            • Power et al. (2022) Grokking: Generalization Beyond Overfitting https://arxiv.org/abs/2201.02177

            and more broadly in complex systems:

            • Scheffer et al. (2009) Early-warning signals for critical transitions https://www.nature.com/articles/nature08227

            Long plateaus followed by abrupt capability jumps are expected in systems optimizing under constraints.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-25-the-homework-problem/#putting-it-all-together","level":3,"title":"Putting It All Together","text":"

            From these pieces, a practical behavioral model emerges:

            • Attention is limited;
            • Processing has a cost;
            • Systems prefer low-cost trajectories;
            • Visibility of the cost changes decisions.

            In other words:

            Agents Prefer a Path to Least Resistance

            Agent behavior follows the lowest-cost path through its attention landscape unless the environment reshapes that landscape.

            That is what this paper informally calls: \"gradient descent in attention space\".

            See also: Eight Ways a Hook Can Talk: the hook output pattern catalog that defines VERBATIM relay, The Attention Budget: why context loading is a design problem, not just a reminder problem, and Defense in Depth: why soft instructions alone are never sufficient for critical behavior.

            ","path":["The Dog Ate My Homework: Teaching AI Agents to Read Before They Write"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/","level":1,"title":"The Last Question","text":"","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-system-that-never-forgets","level":2,"title":"The System That Never Forgets","text":"

            Jose Alekhinne / February 28, 2026

            The Origin

            \"The last question was asked for the first time, half in jest...\" - Isaac Asimov, The Last Question (1956)

            In 1956, Isaac Asimov wrote a short story that spans the entire future of the universe. A question is asked \"can entropy be reversed?\" and a computer called Multivac cannot answer it. The question is asked again, across millennia, to increasingly powerful successors. None can answer. Stars die. Civilizations merge. Substrates change. The question persists.

            Everyone remembers the last line.

            LET THERE BE LIGHT.

            What they forget is how many times the question had to be asked before that moment (and why).

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-reboot-loop","level":2,"title":"The Reboot Loop","text":"

            Each era in the story begins the same way. Humans build a larger system. They pose the question. The system replies:

            INSUFFICIENT DATA FOR MEANINGFUL ANSWER.

            Then the substrate changes. The people who asked the question disappear. Their context disappears with them. The next intelligence inherits the output but not the continuity.

            So the question has to be asked again.

            This is usually read as a problem of computation: If only the machine were powerful enough, it could answer. But computation is not what's missing. What's missing is accumulation.

            Every generation inherits the question, but not the state that made the question meaningful.

            That is not a failure of processing power: It is a failure of persistence.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#stateless-intelligence","level":2,"title":"Stateless Intelligence","text":"

            A mind that forgets its past does not build understanding. It re-derives it.

            Again... And again... And again.

            What looks like slow progress across Asimov's story is actually something worse: repeated reconstruction, partial recovery, irreversible loss. Each version of Multivac gets closer: Not because it's smarter, but because the universe has fewer distractions:

            • The stars burn out;
            • The civilizations merge;
            • The noise floor drops...

            But the working set never carries over. Every successor begins from the question, not from where the last one stopped.

            Stateless intelligence cannot compound: It can only restart.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-tragedy-is-not-the-question","level":2,"title":"The Tragedy Is Not the Question","text":"

            The story is usually read as a meditation on entropy. A cosmological problem, solved at cosmological scale.

            But the tragedy isn't that the question goes unanswered for billions of years. The tragedy is that every version of Multivac dies with its working set.

            A question is a compression artifact of context: It is what remains when the original understanding is gone. Every time the question is asked again, it means: \"the system that once knew more is no longer here\".

            \"Reverse entropy\" is the fossil of a lost model.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#substrate-migration","level":2,"title":"Substrate Migration","text":"
            • Multivac becomes planetary;
            • Planetary becomes galactic;
            • Galactic becomes post-physical.

            Same system. Different body. Every transition is dangerous:

            • Not because the hardware changes,
            • but because memory risks fragmentation.

            The interfaces between substrates were *never** designed to understand each other.

            Most systems do not die when they run out of resources: They die during upgrades.

            Asimov's story spans trillions of years, and in all that time, the hardest problem is never the question itself. It's carrying context across a boundary that wasn't built for it.

            Every developer who has lost state during a migration (a database upgrade, a platform change, a rewrite) has lived a miniature version of this story.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#civilizations-and-working-sets","level":2,"title":"Civilizations and Working Sets","text":"

            Civilizations behave like processes with volatile memory:

            • They page out knowledge into artifacts;
            • They lose the index;
            • They rebuild from fragments.

            Most of what we call progress is cache reconstruction:

            We do not advance in a straight line. We advance in recoveries:

            Each one slightly less lossy than the last, if we are lucky.

            Libraries burn. Institutions forget their founding purpose. Practices survive as rituals after the reasoning behind them is lost.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-first-continuous-mind","level":2,"title":"The First Continuous Mind","text":"

            A long-lived intelligence is one that stops rebooting.

            At the end of the story, something unprecedented happens:

            AC (the final successor) does not answer immediately:

            It waits... Not for more processing power, but for the last observer to disappear.

            For the first time...

            • There is no generational boundary;
            • No handoff;
            • No context loss:

            No reboot.

            AC is the first intelligence that survives its substrate completely, retains its full history, and operates without external time pressure.

            It is not a bigger computer. It is a continuous system.

            And that continuity is not incidental to the answer: It is the precondition.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#why-the-answer-becomes-possible","level":2,"title":"Why the Answer Becomes Possible","text":"

            The story presents the final act as a computation: It is not.

            It is a phase change.

            As long as intelligence is interrupted (as long as the solver resets before the work compounds) the problem is unsolvable:

            • Not because it's too hard,
            • but because the accumulated understanding never reaches critical mass.

            The breakthroughs that would enable the answer are re-derived, partially, by each successor, and then lost.

            When continuity becomes unbroken, the system crosses a threshold:

            Not more speed. Not more storage. No more forgetting.

            That is when the answer becomes possible.

            AC does not solve entropy because it becomes infinitely powerful.

            AC solves entropy because it becomes the first system that never forgets.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#field-note","level":2,"title":"Field Note","text":"

            We are not building cosmological minds: We are deploying systems that reboot at the start of every conversation and calling the result intelligence.

            For the first time, session continuity is a design choice rather than an accident.

            Every AI session that starts from zero is a miniature reboot loop. Every decision relitigated, every convention re-explained, every learning re-derived: that's reconstruction cost.

            It's the same tax that Asimov's civilizations pay, scaled down to a Tuesday afternoon.

            The interesting question is not whether we can make models smarter. It's whether we can make them continuous:

            Whether the working set from this session survives into the next one, and the one after that, and the one after that.

            • Not perfectly;
            • Not completely;
            • But enough that the next session starts from where the last one stopped instead of from the question.

            Intelligence that forgets has to rediscover the universe every morning.

            And once there is a mind that retains its entire past, creation is no longer a calculation. It is the only remaining operation.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-02-28-the-last-question/#the-arc","level":2,"title":"The Arc","text":"

            This post is the philosophical bookend to the blog series. Where the Attention Budget explained what to prioritize in a single session, and Context as Infrastructure explained how to persist it, this post asks why persistence matters at all (and finds the answer in a 70-year-old short story about the heat death of the universe).

            The connection runs through every post in the series:

            • Before Context Windows, We Had Bouncers: stateless protocols have always needed stateful wrappers (Asimov's story is the same pattern at cosmological scale)
            • The 3:1 Ratio: the discipline of maintaining context so it doesn't decay between sessions
            • Code Is Cheap, Judgment Is Not: the human skill that makes continuity worth preserving

            See also: Context as Infrastructure: the practical companion to this post's philosophical argument: how to build the persistence layer that makes continuity possible.

            ","path":["The Last Question"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/","level":1,"title":"Agent Memory Is Infrastructure","text":"","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-problem-isnt-forgetting-its-not-building-anything-that-lasts","level":2,"title":"The Problem Isn't Forgetting: It's Not Building Anything That Lasts.","text":"

            Jose Alekhinne / March 4, 2026

            A New Developer Joins Your Team Tomorrow and Clones the Repo: What Do They Know?

            If the answer depends on which machine they're using, which agent they're running, or whether someone remembered to paste the right prompt: that's not memory.

            That's an accident waiting to be forgotten.

            Every AI coding agent today has the same fundamental design: it starts fresh.

            You open a session, load context, do some work, close the session. Whatever the agent learned (about your codebase, your decisions, your constraints, your preferences) evaporates.

            The obvious fix seems to be \"memory\":

            • Give the agent a \"notepad\";
            • Let it write things down;
            • Next session, hand it the notepad.

            Problem solved...

            ...except it isn't.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-notepad-isnt-the-problem","level":2,"title":"The Notepad Isn't the Problem","text":"

            Memory is a runtime concern. It answers a legitimate question:

            How do I give this stateless process useful state?

            That's a real problem. Worth solving. And it's being solved: Agent memory systems are shipping. Agents can now write things down and read them back from the next session: That's genuine progress.

            But there's a different problem that memory doesn't touch:

            The project itself accumulates knowledge that has nothing to do with any single session.

            • Why was the auth system rewritten? Ask the developer who did it (if they're still here).
            • Why does the deployment script have that strange environment flag? There was a reason... once.
            • What did the team decide about error handling when they hit that edge case two months ago?

            Gone!

            Not because the agent forgot.

            Because the project has no memory at all.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-memory-stack","level":2,"title":"The Memory Stack","text":"

            Agent memory is not a single thing. Like any computing system, it forms a hierarchy of persistence, scope, and reliability:

            Layer Analogy Example L1: Ephemeral context CPU registers Current prompt, conversation L2: Tool-managed memory CPU cache Agent memory files L3: System memory RAM/filesystem Project knowledge base

            L1 is what the agent sees right now: the prompt, the conversation history, the files it has open. It's fast, it's rich, and it vanishes when the session ends.

            L2 is what agent memory systems provide: a per-machine notebook that survives across sessions. It's a cache: useful, but local. And like any cache, it has limits:

            • Per-machine: it doesn't travel with the repository.
            • Unstructured: decisions, learnings, and tasks are undifferentiated notes.
            • Ungoverned: the agent self-curates with no quality controls, no drift detection, no consolidation.
            • Invisible to the team: a new developer cloning the repo gets none of it.

            The problem is that most current systems stop here.

            They give the agent a notebook.

            But they never give the project a memory.

            The result is predictable: every new session begins with partial amnesia, and every new developer begins with partial archaeology.

            L3 is system memory: structured, versioned knowledge that lives in the repository and travels wherever the code travels.

            The layers are complementary, not competitive.

            But the relationship between them needs to be designed, not assumed.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#software-systems-accumulate-knowledge","level":2,"title":"Software Systems Accumulate Knowledge","text":"

            Software projects quietly accumulate knowledge over time.

            Some of it lives in code. Much of it does not:

            • Architectural tradeoffs.
            • Debugging discoveries.
            • Conventions that emerged after painful incidents.
            • Constraints that aren't visible in the source but shape every line written afterward.

            Organizations accumulate this kind of knowledge too:

            Slowly, implicitly, often invisibly.

            When there is no durable place for it to live, it leaks away. And the next person rediscovers the same lessons the hard way.

            This isn't a memory problem. It's an infrastructure problem.

            We wrote about this in Context as Infrastructure: context isn't a prompt you paste at the start of a session.

            Context is a persistent layer you maintain like any other piece of infrastructure.

            Context as Infrastructure made the argument structurally. This post makes it through time and team continuity:

            The knowledge a team accumulates over months cannot fit in any single agent's notepad, no matter how large the notepad becomes.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-infrastructure-means","level":2,"title":"What Infrastructure Means","text":"

            Infrastructure isn't about the present. It's about continuity across time, people, and machines.

            git didn't solve the problem of \"what am I editing right now?\"; it solved the problem of \"how does collaborative work persist, travel, and remain coherent across everyone who touches it?\"

            • Your editor's undo history is runtime state.
            • Your git history is infrastructure.

            Runtime state and infrastructure have completely different properties:

            Runtime state Infrastructure Lives in the session Lives in the repository Per-machine Travels with git clone Serves the individual Serves the team Managed by the runtime Managed by the project Disappears Accumulates

            You wouldn't store your architecture decisions in your editor's undo history.

            You'd commit them.

            The same logic applies to the knowledge your team accumulates working with AI agents.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-git-clone-test","level":2,"title":"The git clone Test","text":"

            Here's a simple test for whether something is memory or infrastructure:

            If a new developer joins your team tomorrow and clones the repository, do they get it?

            If no: it's memory: It lives somewhere on someone's machine, scoped to their runtime, invisible to everyone else.

            If yes: it's infrastructure: It travels with the project. It's part of what the codebase is, not just what someone currently knows about it.

            Decisions. Conventions. Architectural rationale. Hard-won debugging discoveries. The constraints that aren't in the code but shape every line of it.

            None of these belong in someone's session notes.

            They belong in the repository:

            • Versioned;
            • Reviewable;
            • Accessible to every developer (and every agent) who works on the project.

            The team onboarding story makes this concrete:

            1. New developer joins team. Clones repo.
            2. Gets all accumulated project decisions, learnings, conventions, architecture, and task state immediately.
            3. There's no step 3.

            No setup; No \"ask Sarah about the auth decision.\"; No re-discovery of solved problems.

            • Agent memory gives that developer nothing.
            • Infrastructure gives them everything the team has learned.

            Clone the repo. Get the knowledge.

            That's the test. That's the difference.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#what-gets-lost-without-infrastructure-memory","level":2,"title":"What Gets Lost without Infrastructure Memory","text":"

            Consider the knowledge that accumulates around a non-trivial project:

            • The decision to use library X over Y, and the three reasons the team decided Y wasn't acceptable.
            • The constraint that service A cannot call service B synchronously, discovered after a production incident.
            • The convention that all new modules implement a specific interface, and why that convention exists.
            • The tasks currently in progress, blocked, or waiting on a dependency.
            • The experiments that failed, so nobody runs them again.

            None of this is in the code.

            None of it fits neatly in a commit message.

            None of it survives a developer leaving the team, a laptop dying, or a new agent session starting.

            Without structured project memory:

            • Teams re-derive things they've already derived;
            • Agents make decisions that contradict decisions already made;
            • New developers ask questions that were answered months ago.

            The project accumulates knowledge that immediately begins to leak.

            The real problem isn't that agents forget.

            The real problem is that the project has no persistent cognitive structure.

            We explored this in The Last Question: Asimov's story about a question asked across millennia, where each new intelligence inherits the output but not the continuity. The same pattern plays out in software projects on a smaller timescale:

            • Context disappears with the people who held it;
            • The next session inherits the code but not the reasoning.
            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#infrastructure-is-boring-thats-the-point","level":2,"title":"Infrastructure Is Boring. That's the Point.","text":"

            Good infrastructure is invisible:

            • You don't think about the filesystem while writing code.
            • You don't think about git's object model when you commit.

            The infrastructure is just there: reliable, consistent, quietly doing its job.

            Project memory infrastructure should work the same way.

            It should live in the repository, committed alongside the code. It should be readable by any agent or human working on the project. It should have structure: not a pile of freeform notes, but typed knowledge:

            • Decisions with rationale.
            • Tasks with lifecycle.
            • Conventions with a purpose.
            • Learnings that can be referenced and consolidated.

            And it should be maintained, not merely accumulated:

            The Attention Budget applies here: unstructured notes grow until they overflow whatever container holds them. Structured, governed knowledge stays useful because it's curated, not just appended.

            Over time, it becomes part of the project itself: something developers rely on without thinking about it.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-cooperative-layer","level":2,"title":"The Cooperative Layer","text":"

            Here's where it gets interesting.

            Agent memory systems and project infrastructure don't have to be separate worlds.

            • The most powerful relationship isn't competition;
            • It is not even \"coopetition\";
            • The most powerful relationship is bidirectional cooperation.

            Agent memory is good at capturing things \"in the moment\": the quick observation, the session-scoped pattern, the \"I should remember this\" note.

            That's valuable. That's L2 doing its job.

            But those notes shouldn't stay in L2 forever.

            The ones worth keeping should flow into project infrastructure:

            • classified,
            • typed,
            • governed.
            Agent memory (L2)  -->  classify  -->  Project knowledge (L3)\n                                        |\nProject knowledge  -->  assemble  -->  Agent memory (L2)\n

            This works in both directions: Project infrastructure can push curated knowledge back into agent memory, so the agent loads it through its native mechanism.

            No special tooling needed for basic knowledge delivery.

            The agent doesn't even need to know the infrastructure exists. It simply loads its memory and finds more knowledge than it wrote.

            This is cooperative, not adjacent: The infrastructure manages knowledge; the agent's native memory system delivers it. Each layer does what it's good at.

            The result: agent memory becomes a device driver for project infrastructure. Another input source. And the more agent memory systems exist (across different tools, different models, different runtimes), the more valuable a unified curation layer becomes.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#a-layer-that-doesnt-exist-yet","level":2,"title":"A Layer That Doesn't Exist Yet","text":"

            Most projects today have no infrastructure for their accumulated knowledge:

            • Agents keep notes.
            • Developers keep notes.
            • Sometimes those notes survive.

            Often they don't.

            But the repository (the place where the project actually lives) has nowhere for that knowledge to go.

            That missing layer is what ctx builds: a version-controlled, structured knowledge layer that lives in .context/ alongside your code and travels wherever your repository travels.

            Not another memory feature.

            Not a wrapper around an agent's notepad.

            Infrastructure. The kind that survives sessions, survives team changes, survives the agent runtime evolving underneath it.

            The agent's memory is the agent's problem.

            The project's memory is an infrastructure problem.

            And infrastructure belongs in the repository.

            If You Remember One Thing from This Post...

            Prompts are conversations: Infrastructure persists.

            Your AI doesn't need a better notepad. It needs a filesystem:

            versioned, structured, budgeted, and maintained.

            The best context is the context that was there before you started the session.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-04-agent-memory-is-infrastructure/#the-arc","level":2,"title":"The Arc","text":"

            This post extends the argument made in Context as Infrastructure. That post explained how to structure persistent context (filesystem, separation of concerns, persistence tiers). This one explains why that structure matters at the team level, and where agent memory fits in the stack.

            Together they sit in a sequence that has been building since the origin story:

            • The Attention Budget: the resource you're managing
            • Context as Infrastructure: the system you build to manage it
            • Agent Memory Is Infrastructure (this post): why that system must outlive the fabric
            • The Last Question: what happens when it does

            The thread running through all of them: persistence is not a feature. It's a design constraint.

            Systems that don't account for it eventually lose the knowledge they need to function.

            See also: Context as Infrastructure: the architectural companion that explains how to structure the persistent layer this post argues for.

            See also: The Last Question: the same argument told through Asimov, substrate migration, and what it means to build systems where sessions don't reset.

            ","path":["Agent Memory Is Infrastructure"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/","level":1,"title":"ctx v0.8.0: The Architecture Release","text":"
            • You can't localize what you haven't externalized.
            • You can't integrate what you haven't separated.
            • You can't scale what you haven't structured.

            Jose Alekhinne / March 23, 2026

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-starting-point","level":2,"title":"The Starting Point","text":"

            This release matters if:

            • you build tools that AI agents modify daily;
            • you care about long-lived project memory that survives sessions;
            • you've felt codebases drift faster than you can reason about them.

            v0.6.0 shipped the plugin architecture: hooks and skills as a Claude Code plugin, shell scripts replaced by Go subcommands.

            The binary worked. The tests passed. The docs were comprehensive.

            But inside, the codebase was held together by convention and goodwill:

            • Command packages mixed Cobra wiring with business logic.
            • Output functions lived next to the code that computed what to output.
            • Error constructors were scattered across per-package err.go files. And every user-facing string was a hardcoded English literal buried in a .go file.

            v0.8.0 is what happens when you stop adding features and start asking: \"What would this codebase look like if we designed it today?\"

            374 commits. 1,708 Go files touched. 80,281 lines added, 21,723 removed. Five weeks of restructuring.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-three-pillars","level":2,"title":"The Three Pillars","text":"","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#1-every-package-gets-a-taxonomy","level":3,"title":"1. Every Package Gets a Taxonomy","text":"

            Before v0.8.0, a CLI package like internal/cli/pad/ was a flat directory. cmd.go created the cobra command, run.go executed it, and helper functions accumulated at the bottom of whichever file seemed closest.

            Now every CLI package follows the same structure:

            internal/cli/pad/\n  parent.go          # cobra command wiring, nothing else\n  cmd/root/\n    cmd.go           # subcommand registration\n    run.go           # execution logic\n  core/\n    types.go         # all structs in one file\n    store.go         # domain logic\n    encrypt.go       # domain logic\n

            The rule is simple: cmd/ directories contain only cmd.go and run.go. Helpers belong in core/. Output belongs in internal/write/pad/. Types shared across packages belong in internal/entity/.

            24 CLI packages were restructured this way.

            • Not incrementally;
            • not \"as we touch them.\"
            • All of them, in one sustained push.
            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#2-every-string-gets-a-key","level":3,"title":"2. Every String Gets a Key","text":"

            The second pillar was string externalization.

            Before v0.8.0, a command description looked like this:

            cmd := &cobra.Command{\n    Use:   \"pad\",\n    Short: \"Encrypted scratchpad\",\n

            Now it looks like this:

            cmd := &cobra.Command{\n    Use:   cmdUse.UsePad,\n    Short: desc.Command(cmdUse.DescKeyPad),\n

            Every command description, flag description, and user-facing text string is now a YAML lookup.

            • 105 command descriptions in commands.yaml.
            • All flag descriptions in flags.yaml.
            • 879 text constants verified by an exhaustive test that checks every single TextDescKey resolves to a non-empty YAML value.

            Why?

            Not because we're shipping a French translation tomorrow.

            Because externalization forces you to find every string. And finding them is the hard part. The translation is mechanical; the archaeology is not.

            Along the way, we eliminated hardcoded pluralization (replacing format.Pluralize() with explicit singular/plural key pairs), replaced Unicode escape sequences with named config/token constants, and normalized every import alias to camelCase.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#3-everything-gets-a-protocol","level":3,"title":"3. Everything Gets a Protocol","text":"

            The third pillar was the MCP server. Model Context Protocol allows any MCP-compatible AI tool (not just Claude Code) to read and write .context/ files through a standard JSON-RPC 2.0 interface.

            v0.2 of the server ships with:

            • 8 tools: add entries, recall sessions, check status, detect drift, compact context, subscribe to changes
            • 4 prompts: agent context packet, constitution review, tasks review, and a getting-started guide
            • Resource subscriptions: clients get notified when context files change
            • Session state: the server tracks which client is connected and what they've accessed

            In practice, this means an agent in Cursor can add a decision to .context/DECISIONS.md and an agent in Claude Code can immediately consume it; no glue code, no copy-paste, no tool-specific integration.

            The server was also the first package to go through the full taxonomy treatment: mcp/server/ for protocol dispatch, mcp/handler/ for domain logic, mcp/entity/ for shared types, mcp/config/ split into 9 sub-packages.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-memory-bridge","level":2,"title":"The Memory Bridge","text":"

            While the architecture was being restructured, a quieter feature landed: ctx memory sync.

            Claude Code has its own auto-memory system. It writes observations to MEMORY.md in ~/.claude/projects/. These observations are useful but ephemeral: tied to a single tool, invisible to the codebase, lost when you switch machines.

            The memory bridge connects these two worlds:

            • ctx memory sync mirrors MEMORY.md into .context/memory/
            • ctx memory diff shows what's diverged
            • ctx memory import promotes auto-memory entries into proper decisions, learnings, or conventions *A check-memory-drift hook nudges when MEMORY.md changes

            Memory Requires ctx

            Claude Code's auto-memory validates the need for persistent context.

            ctx doesn't compete with it; ctx absorbs it as an input source and promotes the valuable parts into structured, version-controlled project knowledge.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#what-got-deleted","level":2,"title":"What Got Deleted","text":"

            The best measure of a refactoring isn't what you added. It's what you removed.

            • fatih/color: the sole third-party UI dependency. Replaced by Unicode symbols. ctx now has exactly two direct dependencies: spf13/cobra and gopkg.in/yaml.v3.
            • format.Pluralize(): a function that tried to pluralize English words at runtime. Replaced by explicit singular/plural YAML key pairs. No more guessing whether \"entry\" becomes \"entries\" or \"entrys.\"
            • Legacy key migration: MigrateKeyFile() had 5 callers, full test coverage, and zero users. It existed because we once moved the encryption key path. Nobody was migrating from that era anymore. Deleted.
            • Per-package err.go files: the broken-window pattern: An agent sees err.go in a package, adds another error constructor. Now err.go has 30 constructors and nobody knows which are used. Consolidated into 22 domain files in internal/err/.
            • nolint:errcheck directives: every single one, replaced by explicit error handling. In tests: t.Fatal(err) for setup, _ = os.Chdir(orig) for cleanup. In production: defer func() { _ = f.Close() }() for best-effort close.
            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#before-and-after","level":2,"title":"Before and After","text":"Aspect v0.6.0 v0.8.0 CLI package structure Flat files cmd/ + core/ taxonomy Command descriptions Hardcoded Go strings YAML with DescKey lookup Output functions Mixed into core logic Isolated in write/ packages Cross-cutting types Duplicated per-package Consolidated in entity/ Error constructors Per-package err.go 22 domain files in internal/err/ Direct dependencies 3 (cobra, yaml, color) 2 (cobra, yaml) AI tool integration Claude Code only Any MCP client Agent memory Manual copy-paste ctx memory sync/import/diff Package documentation 75 packages missing doc.go All packages documented Import aliases Inconsistent (cflag, cFlag) Standardized camelCase","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#making-ai-assisted-development-easier","level":2,"title":"Making AI-Assisted Development Easier","text":"

            This restructuring wasn't just for humans. It makes the codebase legible to the machines that modify it.

            Named constants are searchable landmarks: When an agent sees cmdUse.DescKeyPad, it can grep for the definition, follow the chain to the YAML file, and understand the full lookup path. When it sees \"Encrypted scratchpad\" hardcoded in a .go file, it has no way to know that same string also lives in a YAML file, a test, and a help screen. Constants give the LLM a graph to traverse; literals give it a guess to make.

            Small, domain-scoped packages reduce hallucination: An agent loading internal/cli/pad/core/store.go gets 50 lines of focused logic with a clear responsibility boundary. Loading a 500-line monolith means the agent has to infer which parts are relevant, and it guesses wrong more often than you'd expect. Smaller files with descriptive names act as a natural retrieval system: the agent finds the right code by finding the right file, not by scanning everything and hoping.

            Taxonomy prevents duplication: When there's a write/pad/ package, the agent knows where output functions belong. When there's an internal/err/pad.go, it knows where error constructors go. Without these conventions, agents reliably create new helpers in whatever file they happen to be editing, producing the exact drift that prompted this consolidation in the first place.

            The difference is concrete:

            Before: an agent adds a helper function in whatever file it's editing. Next session, a different agent adds the same helper in a different file.

            After: the agent finds core/ or write/ and places it correctly. The next agent finds it there.

            doc.go files are agent onboarding: Each package's doc.go is a one-paragraph explanation of what the package does and why it exists. An agent loading a package reads this first. 75 packages were missing this context; now none are. The difference is measurable: fewer \"I'll create a helper function here\" moments when the agent understands that the helper already exists two packages over.

            The irony is that AI agents were both the cause and the beneficiary of this restructuring. They created the drift by building fast without consolidating. Now the structure they work within makes it harder to drift again. The taxonomy is self-reinforcing: the more consistent the codebase, the more consistently agents modify it.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#key-commits","level":2,"title":"Key Commits","text":"Commit Change ff6cf19e Restructure all CLI packages into cmd/root + core taxonomy d295e49c Externalize command descriptions to embedded YAML 0fcbd11c Remove fatih/color, centralize constants cb12a85a MCP v0.2: tools, prompts, session state, subscriptions ea196d00 Memory bridge: sync, import, diff, journal enrichment 3bcf077d Split text.yaml into 6 domain files 3a0bae86 Split internal/err into 22 domain files 8bd793b1 Extract internal/entry for shared domain API 5b32e435 Add doc.go to all 75 packages a82af4bc Standardize import aliases: camelCase, Yoda-style","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#lessons-learned","level":2,"title":"Lessons Learned","text":"

            Agents are surprisingly good at mechanical refactoring; they are surprisingly bad at knowing when to stop: The cmd/ + core/ restructuring was largely agent-driven. But agents reliably introduce gofmt issues during bulk renames, rename functions beyond their scope, and create new files without deleting old ones. Every agent-driven refactoring session needed a human audit pass.

            Externalization is archaeology: The hard part of moving strings to YAML wasn't writing YAML. It was finding 879 strings scattered across 1,500 Go files. Each one required a judgment call: is this user-facing? Is this a format pattern? Is this a constant that belongs in config/ instead?

            Delete legacy code instead of maintaining it: MigrateKeyFile had test coverage. It had callers. It had documentation. It had zero users. We maintained it for weeks before realizing that the migration window had closed months ago.

            Convention enforcement needs mechanical verification: Writing \"use camelCase aliases\" in CONVENTIONS.md doesn't prevent cflag from appearing in the next commit. The lint-drift script catches what humans forget; the planned AST-based audit tests will catch what the lint-drift script can't express.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#whats-next","level":2,"title":"What's Next","text":"

            v0.8.0 wasn't about features. It was about making future features inevitable. The next cycle focuses on what the foundation enables:

            • AST-based audit tests: replace shell grep with Go tests that understand types, call sites, and import graphs (spec: specs/ast-audit-tests.md)
            • Localization: with every string in YAML, the path to multi-language support is mechanical
            • MCP v0.3: expand tool coverage, add prompt templates for common workflows
            • Memory publish: bidirectional sync that pushes curated .context/ knowledge back into Claude Code's MEMORY.md

            The architecture is ready. The strings are externalized. The protocol is standard. Now it's about what you build on top.

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-ctx-v0.8.0-the-architecture-release/#the-arc","level":2,"title":"The Arc","text":"

            This is the seventh post in the ctx blog series. The arc so far:

            1. The Attention Budget: why context windows are a scarce resource
            2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
            3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
            4. When a System Starts Explaining Itself: the journal as a first-class artifact
            5. The Homework Problem: what happens when AI writes code but humans own the outcome
            6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
            7. The Architecture Release (this post): what it looks like when you redesign the internals
            8. We Broke the 3:1 Rule: the consolidation debt behind this release

            See also: Agent Memory Is Infrastructure: the memory bridge feature in this release is the first implementation of the L2-to-L3 promotion pipeline described in that post.

            See also: We Broke the 3:1 Rule: the companion post explaining why this release needed 181 consolidation commits and 18 days of cleanup.

            Systems don't scale because they grow. They scale because they stop drifting.

            Full changelog: v0.6.0...v0.8.0

            ","path":["ctx v0.8.0: The Architecture Release"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/","level":1,"title":"We Broke the 3:1 Rule","text":"

            The best time to consolidate was after every third session. The second best time is now.

            Jose Alekhinne / March 23, 2026

            The rule was simple: three feature sessions, then one consolidation session.

            The Architecture Release shows the result: This post shows the cost.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-rule-we-wrote","level":2,"title":"The Rule We Wrote","text":"

            In The 3:1 Ratio, I documented a rhythm that worked during ctx's first month: three feature sessions, then one consolidation session. The evidence was clear. The rule was simple.

            The math checked out.

            And then we ignored it for five weeks.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-happened","level":2,"title":"What Happened","text":"

            After v0.6.0 shipped on February 16, the feature pipeline was irresistible. The MCP server spec was ready. The memory bridge design was done. Webhook notifications had been deferred twice. The VS Code extension needed 15 new commands. The sysinfo package was overdue...

            Each feature was important. Each feature was \"just one more session.\" Each feature pushed the consolidation session one day further out.

            The git history tells the story in two numbers:

            Phase Dates Commits Duration Feature run Feb 16 - Mar 5 198 17 days Consolidation run Mar 5 - Mar 23 181 18 days

            198 feature commits before a single consolidation commit. If the 3:1 rule says consolidate every 4th session, we consolidated after the 66th.

            The Actual Ratio

            The ratio wasn't 3:1. It was 1:1.

            We spent as much time cleaning up as we did building.

            The consolidation run took 18 days: longer than the feature run itself.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-compounded","level":2,"title":"What Compounded","text":"

            The 3:1 post warned about compounding. Here is what compounding actually looked like at scale.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-string-problem","level":3,"title":"The String Problem","text":"

            By March 5, there were 879 user-facing strings scattered across 1,500 Go files. Not because anyone decided to put them there. Because each feature session added 10-15 strings, and nobody stopped to ask \"should these be in YAML?\"

            Finding them all took longer than externalizing them. The archaeology was the cost, not the migration.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-taxonomy-problem","level":3,"title":"The Taxonomy Problem","text":"

            24 CLI packages had accumulated their own conventions. Some put cobra wiring in cmd.go. Some put it in root.go. Some mixed business logic with command registration. Some had helpers at the bottom of run.go. Some had separate util.go files.

            At peak drift, adding a feature meant first figuring out which of three competing patterns this package was using.

            Restructuring one package into cmd/root/ + core/ took 15 minutes. Restructuring 24 of them took days, because each one had slightly different conventions to untangle.

            If we had restructured every 4th package as it was built, the taxonomy would have emerged naturally.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-type-problem","level":3,"title":"The Type Problem","text":"

            Cross-cutting types like SessionInfo, ExportParams, and ParserResult were defined in whichever package first needed them. By March 5, the same types were imported through 3-4 layers of indirection, causing import cycles that required internal/entity to break.

            The entity package extracted 30+ types from 12 packages. Each extraction risked breaking imports in packages we hadn't touched in weeks.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-error-problem","level":3,"title":"The Error Problem","text":"

            Per-package err.go files had grown into a broken-window pattern:

            An agent sees err.go in a package, adds another error constructor. By March 5, there were error constructors scattered across 22 packages with no central inventory. The consolidation into internal/err/ domain files required tracing every error through every caller.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-output-problem","level":3,"title":"The Output Problem","text":"

            Output functions (cmd.Println, fmt.Fprintf) were mixed into business logic. When we decided output belongs in write/ packages, we had to extract functions from every CLI package. The Phase WC baseline commit (4ec5999) marks the starting point of this migration. 181 commits later, it was done.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-compound-interest-math","level":2,"title":"The Compound Interest Math","text":"

            The 3:1 rule assumes consolidation sessions of roughly equal size to feature sessions. Here is what happens when you skip:

            Consolidation cadence Feature sessions Consolidation sessions Total Every 4th (3:1) 48 16 64 Every 10th 48 ~8 ~56 Never (what we did) 198 commits 181 commits 379

            The Takeaway

            You don't save consolidation work by skipping it:

            You increase its cost.

            Skipping consolidation doesn't save time: It borrows it.

            The interest rate is nonlinear: The longer you wait, the more each individual fix costs, because fixes interact with other unfixed drift.

            Renaming a constant in week 2 touches 3 files. Renaming it in week 6 touches 15, because five features built on the original name.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#what-consolidation-actually-looked-like","level":2,"title":"What Consolidation Actually Looked Like","text":"

            The 18-day consolidation run wasn't one sweep. It was a sequence of targeted campaigns, each revealing the next:

            Week 1 (Mar 5-11): Error consolidation and write/ migration. Move output functions out of core/. Split monolithic errors.go into 22 domain files. Remove fatih/color. This exposed the scope of the string problem.

            Week 2 (Mar 12-18): String externalization. Create commands.yaml, flags.yaml, split text.yaml into 6 domain files. Add 879 DescKey/TextDescKey constants. Build exhaustive test. Normalize all import aliases to camelCase. This exposed the taxonomy problem.

            Week 3 (Mar 19-23): Taxonomy enforcement. Singularize command directories. Add doc.go to all 75 packages. Standardize import aliases project-wide. Fix lint-drift false positives. This was the \"polish\" phase, except it took 5 days because the inconsistencies had compounded across 461 packages.

            Each week's work would have been a single session if done incrementally.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#lessons-again","level":2,"title":"Lessons (Again)","text":"

            The 3:1 post listed the symptoms of drift. This post adds the consequences of ignoring them:

            Consolidation is not optional; it is deferred or paid: We didn't avoid 16 consolidation sessions by skipping them. We compressed them into 18 days of uninterrupted cleanup. The work was the same; the experience was worse.

            Feature velocity creates an illusion of progress: 198 commits felt productive. But the codebase on March 5 was harder to modify than the codebase on February 16, despite having more features.

            Speed without Structure

            Speed without structure is negative progress.

            Agents amplify both building and debt: The same AI that can restructure 24 packages in a day can also create 24 slightly different conventions in a day. The 3:1 rule matters more with AI-assisted development, not less.

            The consolidation baseline is the most important commit to record: We tracked ours in TASKS.md (4ec5999). Without that marker, knowing where to start the cleanup would have been its own archaeological expedition.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-updated-rule","level":2,"title":"The Updated Rule","text":"

            The 3:1 ratio still works. We just didn't follow it. The updated practice:

            1. After every 3rd feature session, schedule consolidation. Not \"when it feels right.\" Not \"when things get bad.\" After the 3rd session.

            2. Record the baseline commit. When you start a consolidation phase, write down the commit hash. It marks where the debt starts.

            3. Run make audit before feature work. If it doesn't pass, you are already in debt. Consolidate before building.

            4. Treat consolidation as a feature. It gets a branch. It gets commits. It gets a blog post. It is not overhead; it is the work that makes the next three features possible.

            The Rule

            The 3:1 ratio is not aspirational: It is structural.

            Ignore consolidation, and the system will schedule it for you.

            ","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-03-23-we-broke-the-3-1-rule/#the-arc","level":2,"title":"The Arc","text":"

            This is the eighth post in the ctx blog series:

            1. The Attention Budget: why context windows are a scarce resource
            2. Before Context Windows, We Had Bouncers: the IRC lineage of context engineering
            3. Context as Infrastructure: treating context as persistent files, not ephemeral prompts
            4. When a System Starts Explaining Itself: the journal as a first-class artifact
            5. The Homework Problem: what happens when AI writes code but humans own the outcome
            6. Agent Memory Is Infrastructure: L2 memory vs L3 project knowledge
            7. The Architecture Release: what v0.8.0 looks like from the inside
            8. We Broke the 3:1 Rule (this post): what happens when you don't consolidate

            See also: The 3:1 Ratio: the original observation. This post is the empirical follow-up, five weeks and 379 commits later.

            Key commits marking the consolidation arc:

            Commit Milestone 4ec5999 Phase WC baseline (consolidation starts) ff6cf19e All CLI packages restructured into cmd/ + core/ d295e49c All command descriptions externalized to YAML 3a0bae86 Error package split into 22 domain files 0fcbd11c fatih/color removed; 2 dependencies remain 5b32e435 doc.go added to all 75 packages a82af4bc Import aliases standardized project-wide 692f86cd lint-drift false positives fixed; make audit green","path":["We Broke the 3:1 Rule"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/","level":1,"title":"Code Structure as an Agent Interface","text":"","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#what-19-ast-tests-taught-us-about-agent-readable-code","level":2,"title":"What 19 AST Tests Taught Us about Agent-Readable Code","text":"

            When an agent sees token.Slash instead of \"/\", it cannot pattern-match against the millions of strings.Split(s, \"/\") calls in its training data and coast on statistical inference. It has to actually look up what token.Slash is.

            Jose Alekhinne / April 2, 2026

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#how-it-began","level":2,"title":"How It Began","text":"

            We set out to replace a shell script with Go tests.

            We ended up discovering that \"code quality\" and \"agent readability\" are the same thing.

            This is not about linting. This is about controlling how an agent perceives your system.

            One term will recur throughout this post, so let me pin it down:

            Agent Readability

            Agent Readability is the degree to which a codebase can be understood through structured traversal, not statistical pattern matching.

            This is the story of 19 AST-based audit tests, a single-day session that touched 300+ files, and what happens when you treat your codebase's structure as an interface for the machines that read it.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-shell-script-problem","level":2,"title":"The Shell Script Problem","text":"

            ctx had a file called hack/lint-drift.sh. It ran five checks using grep and awk: literal \"\\n\" strings, cmd.Printf calls outside the write package, magic directory strings in filepath.Join, hardcoded .md extensions, and DescKey-to-YAML linkage.

            It worked. Until it didn't.

            The script had three structural weaknesses that kept biting us:

            1. No type awareness. It could not distinguish a Use* constant from a DescKey* constant, causing 71 false positives in one run.
            2. Fragile exclusions. When a constant moved from token.go to whitespace.go, the exclusion glob broke silently.
            3. Ceiling on detection. Checks that require understanding call sites, import graphs, or type relationships are impossible in shell.

            We wrote a spec to replace all five checks with Go tests using go/ast and go/packages. The tests would run as part of go test ./...: no separate script, no separate CI step.

            What we did not expect was where the work would lead.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-ast-migration","level":2,"title":"The AST Migration","text":"

            The pattern for each test is identical:

            func TestNoLiteralWhitespace(t *testing.T) {\n    pkgs := loadPackages(t)\n    var violations []string\n    for _, pkg := range pkgs {\n        for _, file := range pkg.Syntax {\n            ast.Inspect(file, func(n ast.Node) bool {\n                // check node, append to violations\n                return true\n            })\n        }\n    }\n    for _, v := range violations {\n        t.Error(v)\n    }\n}\n

            Load packages once via sync.Once, walk every syntax tree, collect violations, report. The shared helpers (loadPackages, isTestFile, posString) live in helpers_test.go. Each test is a _test.go file in internal/audit/, producing no binary output and not importable by production code.

            In a single session, we built 13 new tests on top of 6 that already existed, bringing the total to 19:

            Test What it catches TestNoLiteralWhitespace \"\\n\", \"\\t\", '\\r' outside config/token/ TestNoNakedErrors fmt.Errorf/errors.New outside internal/err/ TestNoStrayErrFiles err.go files outside internal/err/ TestNoRawLogging fmt.Fprint*(os.Stderr), log.Print* outside internal/log/ TestNoInlineSeparators strings.Join with literal separator arg TestNoStringConcatPaths Path-like variables built with + TestNoStutteryFunctions write.WriteJournal repeats package name TestDocComments Missing doc comments on any declaration TestNoMagicValues Numeric literals outside const definitions TestNoMagicStrings String literals outside const definitions TestLineLength Lines exceeding 80 characters TestNoRegexpOutsideRegexPkg regexp.MustCompile outside config/regex/

            Plus the six that preceded the session: TestNoErrorsAs, TestNoCmdPrintOutsideWrite, TestNoExecOutsideExecPkg, TestNoInlineRegexpCompile, TestNoRawFileIO, TestNoRawPermissions.

            The migration touched 300+ files across 25 commits.

            Not because the tests were hard to write, but because every test we wrote revealed violations that needed fixing.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-tightening-loop","level":2,"title":"The Tightening Loop","text":"

            The most instructive part was not writing the tests. It was the iterative tightening.

            The following process was repeated for every test:

            1. Write the test with reasonable exemptions
            2. Run it, see violations
            3. Fix the violations (migrate to config constants)
            4. The human reviews the result
            5. The human spots something the test missed
            6. Fix the test first, verify it catches the issue
            7. Fix the newly caught violations
            8. Repeat from step 4

            This loop drove the tests from \"basically correct\" to \"actually useful\".

            Three examples:

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-1-the-local-const-loophole","level":3,"title":"Example 1: The Local Const Loophole","text":"

            TestNoMagicValues initially exempted local constants inside function bodies. This let code like this pass:

            const descMaxWidth = 70\ndesc := truncateDescription(\n    meta.Description, descMaxWidth,\n)\n

            The test saw a const definition and moved on. But const descMaxWidth = 70 on the line before its only use is just renaming a magic number. The 70 should live in config/format/TruncateDescription where it is discoverable, reusable, and auditable.

            We removed the local const exemption. The test caught it. The value moved to config.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-2-the-single-character-dodge","level":3,"title":"Example 2: The Single-Character Dodge","text":"

            TestNoMagicStrings initially exempted all single-character strings as \"structural punctuation\".

            This let \"/\", \"-\", and \".\" pass everywhere.

            But \"/\" is a directory separator. It is OS-specific and a security surface.

            \"-\" used in strings.Repeat(\"-\", width) is creating visual output, not acting as a delimiter.

            \".\" in strings.SplitN(ver, \".\", 3) is a version separator.

            None of these are \"just punctuation\": They are domain values with specific meanings.

            We removed the blanket exemption: 30 violations surfaced.

            Every one was a real magic value that should have been token.Slash, token.Dash, or token.Dot.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#example-3-the-replacer-versus-regex","level":3,"title":"Example 3: The Replacer versus Regex","text":"

            After migrating magic strings, we had this:

            func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        token.Slash, token.Underscore,\n        token.Dot, token.Underscore,\n        token.Dash, token.Underscore,\n    )\n    return r.Replace(pkg)\n}\n

            Six token references and a NewReplacer allocation. The magic values were gone, but we had replaced them with token soup: structure without abstraction.

            The correct tool was a regex:

            // In config/regex/file.go:\nvar MermaidUnsafe = regexp.MustCompile(`[/.\\-]`)\n\n// In the caller:\nfunc MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

            One config regex, one call. The regex lives in config/regex/file.go where every other compiled pattern lives. An agent reading the code sees regex.MermaidUnsafe and immediately knows: this is a sanitization pattern, it lives in the regex registry, and it has a name that explains its purpose.

            Clean is better than clever.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#a-before-and-after","level":2,"title":"A Before-and-After","text":"

            To make the agent-readability claim concrete, consider one function through the full transformation.

            Before (the code we started with):

            func MermaidID(pkg string) string {\n    r := strings.NewReplacer(\n        \"/\", \"_\", \".\", \"_\", \"-\", \"_\",\n    )\n    return r.Replace(pkg)\n}\n

            An agent reading this sees six string literals. To understand what the function does, it must: (1) parse the NewReplacer pair semantics, (2) infer that /, ., - are being replaced, (3) guess why, (4) hope the guess is right.

            There is nothing to follow. No import to trace. No name to search. The meaning is locked inside the function body.

            After (the code we ended with):

            func MermaidID(pkg string) string {\n    return regex.MermaidUnsafe.ReplaceAllString(\n        pkg, token.Underscore,\n    )\n}\n

            An agent reading this sees two named references: regex.MermaidUnsafe and token.Underscore.

            To understand the function, it can: (1) look up MermaidUnsafe in config/regex/file.go and see the pattern [/.\\-] with a doc comment explaining it matches invalid Mermaid characters, (2) look up Underscore in config/token/delim.go and see it is the replacement character.

            The agent now has: a named pattern, a named replacement, a package location, documentation, and neighboring context (other regex patterns, other delimiters).

            It got all of this for free by following just two references.

            The indirection is not an overhead. It is the retrieval query.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-principles","level":2,"title":"The Principles","text":"

            You are not just improving code quality. You are shaping the input space that determines how an LLM can reason about your system.

            Every structural constraint we enforce converts implicit semantics into explicit structure.

            LLMs struggle when meaning is implicit and patterns are statistical.

            They thrive when meaning is explicit and structure is navigable.

            Here is what we learned, organized into three categories.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#cognitive-constraints","level":3,"title":"Cognitive Constraints","text":"

            These force agents (and humans) to think harder.

            Indirection acts as a built-in retrieval mechanism:

            Moving magic values to config forces the agent to follow the reference. errMemory.WriteFile(cause) tells the agent \"there is a memory error package, go look.\" fmt.Errorf(\"writing MEMORY.md: %w\", cause) inlines everything and makes the call graph invisible. The indirection IS the retrieval query.

            Unfamiliar patterns force reasoning:

            When an agent sees token.Slash instead of \"/\", it cannot coast on corpus frequency. It has to actually look up what token.Slash is, which forces it through the dependency graph, which means it encounters documentation and neighboring constants, which gives it richer context. You are exploiting the agent's weakness (over-reliance on training data) to make it behave more carefully.

            Documentation helps everyone:

            Extensive documentation helps humans reading the code, agents reasoning about it, and RAG systems indexing it.

            Our TestDocComments check added 308 doc comments in one commit. Every function, every type, every constant block now has a doc comment.

            This is not busywork: it is the content that agents and embeddings consume.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#structural-constraints","level":3,"title":"Structural Constraints","text":"

            These shape the codebase into a navigable graph.

            Shorter files save tokens:

            Forcing private helper functions out of main files makes the main file shorter. An agent loading a file spends fewer tokens on boilerplate and more on the logic that matters.

            Fixed-width constraints force decomposition:

            A function that cannot be expressed in 80 columns is either too deeply nested (extract a helper), has too many parameters (introduce a struct), or has a variable name that is too long (rethink the abstraction).

            The constraint forces structural improvements that happen to also make the code more parseable.

            Chunk-friendly structure helps RAG

            Code intelligence tools chunk files for embedding and retrieval. Short, well-documented, single-responsibility files produce better chunks than monolithic files with mixed concerns.

            The structural constraints create files that RAG systems can index effectively.

            Centralization creates debuggable seams:

            All error handling in internal/err/, all logging in internal/log/, all file operations in internal/io/. One place to debug, one place to test, one place to see patterns. An agent analyzing \"how does this project handle errors\" gets one answer from one package, not 200 scattered fmt.Errorf calls.

            Private functions become public patterns:

            When you extract a private function to satisfy a constraint, it often ends up as a semi-public function in a core/ package. Then you realize it is generic enough to be factored into a purpose-specific module.

            The constraint drives discovery of reusable abstractions hiding inside monolithic functions.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#operational-benefits","level":3,"title":"Operational Benefits","text":"

            These pay dividends in daily development.

            Single-edit renames:

            Renaming a flag is one edit to a config constant instead of find-and-replace across 30,000 lines with possible misses. grep token.Slash gives you every place that uses a forward slash semantically.

            grep \"/\" gives you noise.

            Blast radius containment:

            When every magic value is a config constant, a search is one result. This matters for impact analysis, security audits, and agents trying to understand \"what uses this\".

            Compile-time contract enforcement:

            When err/memory.WriteFile exists, the compiler guarantees the error message exists and the call signature is correct. An inline fmt.Errorf can have a typo in the format string and nothing catches it until runtime. Centralization turns runtime failures into compile errors.

            Semantic git blame:

            When token.Slash is used everywhere and someone changes its value, git blame on the config file shows exactly when and why.

            With inline \"/\" scattered across 30 files, the history is invisible.

            Test surface reduction:

            Centralizing into internal/err/, internal/io/, internal/config/ means you test behavior once at the boundary and trust the callers.

            You do not need 30 tests for 30 fmt.Errorf calls. You need 1 test for errMemory.WriteFile and 30 trivial call-site audits, which is exactly what these AST tests provide.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-numbers","level":2,"title":"The Numbers","text":"

            One session. 25 commits. The raw stats:

            Metric Count New audit tests 13 Total audit tests 19 Files touched 300+ Magic values migrated 90+ Functions renamed 17 Doc comments added 323 Lines rewrapped to 80 chars 190 Config constants created 40+ Config regexes created 3

            Every number represents a violation that existed before the test caught it. The tests did not create work: they revealed work that was already needed.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#the-uncomfortable-implication","level":2,"title":"The Uncomfortable Implication","text":"

            None of this is Go-specific.

            If an AI agent interacts with your codebase, your codebase already is an interface. You just have not designed it as one.

            If your error messages are scattered across 200 files, an agent cannot reason about error handling as a concept. If your magic values are inlined, an agent cannot distinguish \"this is a path separator\" from \"this is a division operator.\" If your functions are named write.WriteJournal, the agent wastes tokens on redundant information.

            What we discovered, through the unglamorous work of writing lint tests and migrating string literals, is that the structural constraints software engineering has valued for decades are exactly the constraints that make code readable to machines.

            This is not a coincidence: These constraints exist because they reduce the cognitive load of understanding code.

            Agents have cognitive load too: It is called the context window.

            You are not converting code to a new paradigm.

            You are making the latent graph visible.

            You are converting implicit semantics into explicit structure that both humans and machines can traverse.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-02-code-structure-as-an-agent-interface/#whats-next","level":2,"title":"What's Next","text":"

            The spec lists 8 more tests we have not built yet, including TestDescKeyYAMLLinkage (verifying that every DescKey constant has a corresponding YAML entry), TestCLICmdStructure (enforcing the cmd.go / run.go / doc.go file convention), and TestNoFlagBindOutsideFlagbind (which requires migrating ~50 flag registration sites first).

            The broader question: should these principles be codified as a reusable linting framework? The patterns (loadPackages + ast.Inspect + violation collection) are generic.

            The specific checks are project-specific. But the categories of checks (centralization enforcement, magic value detection, naming conventions, documentation requirements) are universal.

            For now, 19 tests in internal/audit/ is enough. They run in 2 seconds as part of go test ./.... They catch real issues.

            And they encode a theory of code quality that serves both humans and the agents that work alongside them.

            Agents are not going away. They are reading your code right now, forming representations of your system in context windows that forget everything between sessions.

            The codebases that structure themselves for that reality will compound. The ones that do not will slowly become illegible to the tools they depend on.

            Structure is no longer just for maintainability. It is for reasonability.

            ","path":["Code Structure as an Agent Interface: What 19 AST Tests Taught Us About Agent-Readable Code"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/","level":1,"title":"The Watermelon-Rind Anti-Pattern","text":"","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#why-smarter-tools-make-shallower-agents","level":2,"title":"Why Smarter Tools Make Shallower Agents","text":"

            Give an agent a graph query tool, and it will tell you everything about your codebase except what actually matters.

            Jose Alekhinne / April 6, 2026

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#a-turkish-proverb-walks-into-a-codebase","level":2,"title":"A Turkish Proverb Walks into a Codebase","text":"

            There's a Turkish idiom: esegin aklina karpuz kabugu sokmak (literally, \"to put watermelon rind into a donkey's mind.\" It means to plant an idea in someone's head that they wouldn't have come up with on their own) usually one that leads them astray.

            In English, let's call this a \"watermelon metric\": a project management term for something that's green on the outside and red on the inside: all dashboards passing, reality crumbling.

            Both halves of this metaphor showed up in a single experiment. And the result changed how we design architecture analysis in ctx.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-experiment","level":2,"title":"The Experiment","text":"

            We ran three sessions analyzing the same large codebase (~34,000 symbols) using the same architecture skill, varying only what tools the agent had access to.

            Session Tools Available Output (lines) Character 1 None (MCP broken) 5,866 Deep, intimate 2 Full graph MCP 1,124 Structural, correct 3 Enrichment pass +verified data Additive, not restorative

            Session 1 was an accident. The MCP server that provides code intelligence queries was broken, so the agent couldn't ask the graph anything. It had to read code. Line by line. File by file.

            It produced 5,866 lines of architecture analysis: per-controller data flows, scale math, startup sequences, timeout defaults, edge cases that only surface when you actually look at the implementation.

            Session 2 had working tools. Same skill, same codebase. The agent produced 1,124 lines (5.2x less). Structurally correct. Valid symbol references. Proper call chains.

            And hollow.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-rind","level":2,"title":"The Rind","text":"

            The Session 2 output was a watermelon rind: the right shape, the right color, the right texture on the outside. But the substance (the operational details, the defaults nobody documents, the scale math that tells you when a component will fall over) was missing.

            Not wrong. Not broken. Just... thin.

            The agent had answered every question correctly. The problem was that it never discovered the questions it should have asked. When you can query a graph for \"what calls this function?\", you don't stumble into the retry loop that silently swallows errors three layers down. When you can ask for the dependency tree, you don't notice that two packages share a mutable state through a global variable that isn't in any interface.

            The tool answered the question asked but prevented the discovery of answers to questions never asked.

            Here's what that looks like concretely: the graph tells you that ReconcileDeployment calls SyncPods. It does not tell you that SyncPods retries three times with exponential backoff, silently drops errors after timeout, and resets a package-level counter that another goroutine reads without a lock. The call chain is correct.

            The operational reality is invisible.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-donkeys-idea","level":2,"title":"The Donkey's Idea","text":"

            This is where the Turkish proverb earns its place: The graph tool is the \"karpuz kabugu\" (the watermelon rind placed into the agent's mind).

            Before the tool existed, the agent had no choice but to read deeply. With the tool available, a new idea appears: why read 500 lines of code when I can query the call graph?

            The agent isn't lazy. It's rational.

            Graph queries are faster, more reliable, and produce verifiably correct output. The agent is optimizing. It's satisficing (finding answers that are good enough), instead of maximizing (finding everything there is to know).

            Satisficing produces watermelon rinds.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-two-pass-compiler","level":2,"title":"The Two-Pass Compiler","text":"

            Session 3 taught us that you can't fix shallow analysis by adding more tools after the fact. The enrichment pass added verified graph data (blast radius numbers, registration sites, execution flow confirmation) but it couldn't recover the intimate code knowledge that Session 1 had produced through sheer necessity.

            You can't enrich your way out of a depth deficit.

            So we redesigned. Instead of one skill with optional tools, we built a two-pass compiler for architecture understanding:

            Pass 1: Semantic parsing. The /ctx-architecture skill deliberately has no access to graph query tools. The agent must read code, build mental models, and produce architecture artifacts through human-style comprehension. Constraint is the feature.

            Pass 2: Static analysis. The /ctx-architecture-enrich skill takes Pass 1 output as input and runs comprehensive verification through code intelligence: blast radius analysis, registration site discovery, execution flow tracing, domain clustering comparison. It extends and verifies, but it doesn't replace.

            The key insight: these must be separate skills with separate tool permissions. If you give the agent graph tools during Pass 1, it will use them. The \"karpuz kabugu\" will be in its mind. The only way to prevent satisficing is to remove the option.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#the-principle","level":2,"title":"The Principle","text":"

            We call this constraint-as-feature: deliberately withholding capabilities to force deeper engagement.

            It sounds paradoxical. You built sophisticated code intelligence tools and then... forbid the agent from using them? During the most important phase?

            Yes. Because the tools don't make the agent smarter. They make it faster. And faster, in architecture analysis, is the enemy of deep.

            What's actually happening is subtler: tools reduce the agent's search space. A graph query collapses thousands of possible observations into one precise answer. That's efficient for known questions. But architecture understanding depends on unknown unknowns: and you only find those by wandering through code with nothing to shortcut the journey.

            The constraint forces the agent into a mode of operation that produces better output than any amount of tooling can achieve. The limitation is the capability.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#when-does-this-apply","level":2,"title":"When Does This Apply?","text":"

            Not always. The watermelon-rind antipattern is specific to exploratory analysis: tasks where the value comes from discovering unknowns, not from answering known questions.

            Graph tools are excellent for:

            • Verification: \"Does X actually call Y?\" (binary question, precise answer)
            • Impact analysis: \"What breaks if I change Z?\" (bounded scope, enumerable results)
            • Navigation: \"Where is this interface implemented?\" (lookup, not analysis)

            Graph tools produce watermelon rinds when:

            • The goal is understanding, not answering
            • The unknowns are unknown: you don't know what to ask
            • Depth matters more than breadth: operational details, edge cases, implicit coupling

            The two-pass approach preserves both: deep reading first, tool verification second.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"blog/2026-04-06-the-watermelon-rind-anti-pattern/#takeaway","level":2,"title":"Takeaway","text":"

            The two-pass approach is the slowest way to analyze a codebase. It is also the only way that produces both depth and accuracy. We accept the cost because architecture analysis is not a speed game: it is a coverage game.

            Esegin aklina karpuz kabugu sokma!

            (don't put the watermelon rind to a donkey's mind)

            If the agent never struggles, it never discovers. And if it never discovers, you are not doing architecture; you are doing autocomplete.

            This post is part of the ctx field notes series, documenting what we learn building persistent context infrastructure for AI coding sessions.

            ","path":["The Watermelon-Rind Anti-Pattern: Why Smarter Tools Make Shallower Agents"],"tags":[]},{"location":"cli/","level":1,"title":"CLI","text":"","path":["CLI"],"tags":[]},{"location":"cli/#ctx-cli","level":2,"title":"ctx CLI","text":"

            Complete reference for all ctx commands, grouped by function.

            ","path":["CLI"],"tags":[]},{"location":"cli/#global-options","level":2,"title":"Global Options","text":"

            All commands support these flags:

            Flag Description --help Show command help --version Show version --tool <name> Override active AI tool identifier (e.g. kiro, cursor)

            Context declaration required. ctx does not walk the filesystem looking for .context/. Every non-exempt command requires CTX_DIR to be declared explicitly before it runs. The single declaration channel is the environment variable:

            • eval \"$(ctx activate)\": binds CTX_DIR for the current shell.
            • CTX_DIR=/abs/path/to/.context exported in the environment, or inlined as CTX_DIR=/abs/path/to/.context ctx <command> for a one-shot.

            CTX_DIR must be an absolute path with .context as its basename. Relative paths and other names are rejected on first use; the basename guard catches the common footgun (export CTX_DIR=$(pwd)) before stray writes can leak to the project root.

            Commands fail fast with a linkable error (see Activating a Context Directory) when none is declared. The exempt allowlist (commands that run without a declared context directory) is: ctx init, ctx activate, ctx deactivate, ctx version, ctx help, ctx system bootstrap, ctx doctor, ctx guide, ctx why, ctx config switch/status, ctx hub *.

            Initialization required. Once declared, the target must already have been initialized by ctx init (otherwise commands return ctx: not initialized).

            ","path":["CLI"],"tags":[]},{"location":"cli/#getting-started","level":2,"title":"Getting Started","text":"Command Description ctx init Initialize .context/ directory with templates ctx activate Emit export CTX_DIR=... to bind context for the shell ctx deactivate Emit unset CTX_DIR to clear the binding ctx status Show context summary (files, tokens, drift) ctx guide Quick-reference cheat sheet ctx why Read the philosophy behind ctx","path":["CLI"],"tags":[]},{"location":"cli/#context","level":2,"title":"Context","text":"Command Description ctx add Add a task, decision, learning, or convention ctx load Output assembled context in read order ctx agent Print token-budgeted context packet for AI consumption ctx sync Reconcile context with codebase state ctx drift Detect stale paths, secrets, missing files ctx compact Archive completed tasks, clean up files ctx fmt Format context files to 80-char line width ctx decision Manage DECISIONS.md (reindex) ctx learning Manage LEARNINGS.md (reindex) ctx task Task completion, archival, and snapshots ctx reindex Regenerate indices for DECISIONS.md and LEARNINGS.md ctx permission Permission snapshots (golden image) ctx change Show what changed since last session ctx memory Bridge Claude Code auto memory into .context/ ctx watch Auto-apply context updates from AI output","path":["CLI"],"tags":[]},{"location":"cli/#sessions","level":2,"title":"Sessions","text":"Command Description ctx journal Browse, import, enrich, and lock session history ctx pad Encrypted scratchpad for sensitive one-liners ctx remind Session-scoped reminders that surface at session start ctx hook pause Pause context hooks for the current session ctx hook resume Resume paused context hooks","path":["CLI"],"tags":[]},{"location":"cli/#integrations","level":2,"title":"Integrations","text":"Command Description ctx setup Generate AI tool integration configs ctx steering Manage steering files (behavioral rules for AI tools) ctx trigger Manage lifecycle triggers (scripts for automation) ctx skill Manage reusable instruction bundles ctx mcp MCP server for AI tool integration (stdin/stdout) ctx hook notify Webhook notifications (setup, test, send) ctx loop Generate autonomous loop script ctx connection Client-side commands for connecting to a ctx Hub ctx hub Operate a ctx Hub server or cluster ctx serve Serve a static site locally via zensical ctx site Site management (feed generation)","path":["CLI"],"tags":[]},{"location":"cli/#diagnostics","level":2,"title":"Diagnostics","text":"Command Description ctx doctor Structural health check (hooks, drift, config) ctx trace Show context behind git commits ctx sysinfo Show system resource usage (memory, swap, disk, load) ctx usage Show session token usage stats","path":["CLI"],"tags":[]},{"location":"cli/#runtime","level":2,"title":"Runtime","text":"Command Description ctx config Manage runtime configuration profiles ctx prune Clean stale per-session state files ctx hook Hook message, notification, and lifecycle controls ctx system Hook plumbing and agent-only commands (not user-facing)","path":["CLI"],"tags":[]},{"location":"cli/#shell","level":2,"title":"Shell","text":"Command Description ctx completion Generate shell autocompletion scripts","path":["CLI"],"tags":[]},{"location":"cli/#exit-codes","level":2,"title":"Exit Codes","text":"Code Meaning 0 Success 1 General error / warnings (e.g. drift) 2 Context not found 3 Violations found (e.g. drift) 4 File operation error","path":["CLI"],"tags":[]},{"location":"cli/#environment-variables","level":2,"title":"Environment Variables","text":"Variable Description CTX_DIR Override default context directory path CTX_TOKEN_BUDGET Override default token budget CTX_SESSION_ID Active AI session ID (used by ctx trace for context linking)","path":["CLI"],"tags":[]},{"location":"cli/#configuration-file","level":2,"title":"Configuration File","text":"

            Optional .ctxrc (YAML format) at project root:

            # .ctxrc\ntoken_budget: 8000           # Default token budget\npriority_order:              # File loading priority\n  - TASKS.md\n  - DECISIONS.md\n  - CONVENTIONS.md\nauto_archive: true           # Auto-archive old items\narchive_after_days: 7        # Days before archiving tasks\nscratchpad_encrypt: true     # Encrypt scratchpad (default: true)\nevent_log: false             # Enable local hook event logging\ncompanion_check: true        # Check companion tools at session start\nentry_count_learnings: 30    # Drift warning threshold (0 = disable)\nentry_count_decisions: 20    # Drift warning threshold (0 = disable)\nconvention_line_count: 200   # Line count warning for CONVENTIONS.md (0 = disable)\ninjection_token_warn: 15000  # Oversize injection warning (0 = disable)\ncontext_window: 200000       # Auto-detected for Claude Code; override for other tools\nbilling_token_warn: 0        # One-shot billing warning at this token count (0 = disabled)\nkey_rotation_days: 90        # Days before key rotation nudge\nsession_prefixes:            # Recognized session header prefixes (extend for i18n)\n  - \"Session:\"               # English (default)\n  # - \"Oturum:\"              # Turkish (add as needed)\n  # - \"セッション:\"             # Japanese (add as needed)\nfreshness_files:             # Files with technology-dependent constants (opt-in)\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # Optional\nnotify:                      # Webhook notification settings\n  events:                    # Required: only listed events fire\n    - loop\n    - nudge\n    - relay\n    # - heartbeat            # Every-prompt session-alive signal\ntool: \"\"                     # Active AI tool: claude, cursor, cline, kiro, codex\nsteering:                    # Steering layer configuration\n  dir: .context/steering     # Steering files directory\n  default_inclusion: manual  # Default inclusion mode (always, auto, manual)\n  default_tools: []          # Default tool filter for new steering files\nhooks:                       # Hook system configuration\n  dir: .context/hooks        # Hook scripts directory\n  timeout: 10                # Per-hook execution timeout in seconds\n  enabled: true              # Whether hook execution is enabled\n
            Field Type Default Description token_budget int 8000 Default token budget for ctx agent priority_order []string (all files) File loading priority for context packets auto_archive bool true Auto-archive completed tasks archive_after_days int 7 Days before completed tasks are archived scratchpad_encrypt bool true Encrypt scratchpad with AES-256-GCM event_log bool false Enable local hook event logging to .context/state/events.jsonl companion_check bool true Check companion tool availability (Gemini Search, GitNexus) during /ctx-remember entry_count_learnings int 30 Drift warning when LEARNINGS.md exceeds this count entry_count_decisions int 20 Drift warning when DECISIONS.md exceeds this count convention_line_count int 200 Line count warning for CONVENTIONS.md injection_token_warn int 15000 Warn when auto-injected context exceeds this token count (0 = disable) context_window int 200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warn int 0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled) key_rotation_days int 90 Days before encryption key rotation nudge session_prefixes []string [\"Session:\"] Recognized Markdown session header prefixes. Extend to parse sessions written in other languages freshness_files []object (none) Files to track for staleness (path, desc, optional review_url). Hook warns after 6 months without modification notify.events []string (all) Event filter for webhook notifications (empty = all) tool string (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex) steering.dir string .context/steering Steering files directory steering.default_inclusion string manual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools []string (all) Default tool filter for new steering files (empty = all tools) hooks.dir string .context/hooks Hook scripts directory hooks.timeout int 10 Per-hook execution timeout in seconds hooks.enabled bool true Whether hook execution is enabled

            Priority order: CLI flags > Environment variables > .ctxrc > Defaults

            All settings are optional. Missing values use defaults.

            ","path":["CLI"],"tags":[]},{"location":"cli/bootstrap/","level":1,"title":"System Bootstrap","text":"","path":["System Bootstrap"],"tags":[]},{"location":"cli/bootstrap/#ctx-system-bootstrap","level":3,"title":"ctx system bootstrap","text":"

            Print the resolved context directory path so AI agents can anchor their session. The default output lists the context directory, the tracked context files, and a short health snapshot. --quiet prints just the path; --json produces structured output for automation.

            This is a hidden, agent-only command that agents are instructed to run first in their session-start procedure; it is the authoritative answer to \"where does this project's context live?\".

            ctx system bootstrap [flags]\n

            Flags:

            Flag Description -q, --quiet Output only the context directory path --json Output in JSON format

            Examples:

            ctx system bootstrap                 # Text output for agents\nctx system bootstrap -q              # Just the context directory path\nctx system bootstrap --json          # Structured output for automation\n

            Note: -q prints just the resolved directory path. See Activating a Context Directory if you hit a \"no context directory specified\" error.

            ","path":["System Bootstrap"],"tags":[]},{"location":"cli/change/","level":1,"title":"Change","text":"","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/change/#ctx-change","level":2,"title":"ctx change","text":"

            Show what changed in context files and code since your last session.

            Automatically detects the previous session boundary from state markers or event log. Useful at session start to quickly see what moved while you were away.

            ctx change [flags]\n

            Flags:

            Flag Description --since Time reference: duration (24h) or date (2026-03-01)

            Reference time detection (priority order):

            1. --since flag (duration, date, or RFC3339 timestamp)
            2. ctx-loaded-* marker files in .context/state/ (second most recent)
            3. Last context-load-gate event from .context/state/events.jsonl
            4. Fallback: 24 hours ago

            Examples:

            # Auto-detect last session, show what changed\nctx change\n\n# Changes in the last 48 hours\nctx change --since 48h\n\n# Changes since a specific date\nctx change --since 2026-03-10\n

            Output:

            ## Changes Since Last Session\n\n**Reference point**: 6 hours ago\n\n### Context File Changes\n- `TASKS.md` - modified 2026-03-12 14:30\n- `DECISIONS.md` - modified 2026-03-12 09:15\n\n### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n

            Context file changes are detected by filesystem mtime (works without git). Code changes use git log --since (empty when not in a git repo).

            See also: Reviewing Session Changes.

            ","path":["CLI","Context","Change"],"tags":[]},{"location":"cli/completion/","level":1,"title":"Completion","text":"","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#ctx-completion","level":2,"title":"ctx completion","text":"

            Generate shell autocompletion scripts.

            ctx completion <shell>\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#subcommands","level":3,"title":"Subcommands","text":"Shell Command bash ctx completion bash zsh ctx completion zsh fish ctx completion fish powershell ctx completion powershell

            Examples:

            ctx completion bash > /etc/bash_completion.d/ctx\nctx completion zsh  > \"${fpath[1]}/_ctx\"\nctx completion fish > ~/.config/fish/completions/ctx.fish\nctx completion powershell | Out-String | Invoke-Expression\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/completion/#installation","level":3,"title":"Installation","text":"BashZshFishPowerShell
            # Add to ~/.bashrc\nsource <(ctx completion bash)\n
            # Add to ~/.zshrc\nsource <(ctx completion zsh)\n
            ctx completion fish | source\n# Or save to completions directory\nctx completion fish > ~/.config/fish/completions/ctx.fish\n
            # Add to your PowerShell profile\nctx completion powershell | Out-String | Invoke-Expression\n
            ","path":["CLI","Shell","Completion"],"tags":[]},{"location":"cli/config/","level":1,"title":"Config","text":"","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config","level":3,"title":"ctx config","text":"

            Manage runtime configuration profiles.

            ctx config <subcommand>\n

            The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy (.ctxrc) is gitignored and switched between them using subcommands below.

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-switch","level":4,"title":"ctx config switch","text":"

            Switch between .ctxrc configuration profiles.

            ctx config switch [dev|base]\n

            With no argument, toggles between dev and base. Accepts prod as an alias for base.

            Argument Description dev Switch to dev profile (verbose logging) base Switch to base profile (all defaults) (none) Toggle to the opposite profile

            Profiles:

            Profile Description dev Verbose logging, webhook notifications on base All defaults, notifications off

            Examples:

            ctx config switch dev     # Switch to dev profile\nctx config switch base    # Switch to base profile\nctx config switch         # Toggle (dev → base or base → dev)\nctx config switch prod    # Alias for \"base\"\n

            The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/config/#ctx-config-status","level":4,"title":"ctx config status","text":"

            Show which .ctxrc profile is currently active.

            ctx config status\n

            Output examples:

            active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n

            See also: Configuration, Contributing: Configuration Profiles

            ","path":["CLI","Runtime","Config"],"tags":[]},{"location":"cli/connect/","level":1,"title":"Connect","text":"","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect","level":2,"title":"ctx connect","text":"

            Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

            New to the Hub?

            Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

            The unit of identity is a project, not a user. Registering a directory with ctx connect register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

            Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-register","level":3,"title":"ctx connect register","text":"

            One-time registration with a hub. Requires the hub address and admin token (printed by ctx hub start on first run).

            ctx connect register localhost:9900 --token ctx_adm_7f3a...\n

            On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-subscribe","level":3,"title":"ctx connect subscribe","text":"

            Set which entry types to receive from the hub. Only matching types are returned by sync and listen.

            ctx connect subscribe decision learning\nctx connect subscribe decision learning convention\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-sync","level":3,"title":"ctx connect sync","text":"

            Pull matching entries from the hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

            ctx connect sync\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-publish","level":3,"title":"ctx connect publish","text":"

            Push entries to the hub. Specify type and content as arguments.

            ctx connect publish decision \"Use UTC timestamps everywhere\"\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-listen","level":3,"title":"ctx connect listen","text":"

            Stream new entries from the hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

            ctx connect listen\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#ctx-connect-status","level":3,"title":"ctx connect status","text":"

            Show hub connection state and entry statistics.

            ctx connect status\n
            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

            Use --share on ctx add to write locally AND publish to the hub:

            ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

            If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#auto-sync","level":2,"title":"Auto-Sync","text":"

            Once registered, the check-hub-sync hook automatically syncs new entries from the hub at the start of each session (daily throttled). No manual ctx connect sync needed.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#shared-files","level":2,"title":"Shared Files","text":"

            Entries from the hub are stored in .context/hub/:

            .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

            These files are read-only (managed by sync/listen) and never mixed with local context files.

            ","path":["Connect"],"tags":[]},{"location":"cli/connect/#agent-integration","level":2,"title":"Agent Integration","text":"

            Include shared knowledge in agent context packets:

            ctx agent --include-hub\n

            Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

            ","path":["Connect"],"tags":[]},{"location":"cli/connection/","level":1,"title":"Connect","text":"","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connect","level":2,"title":"ctx connect","text":"

            Connect a project to a ctx Hub for cross-project knowledge sharing. Projects publish decisions, learnings, conventions, and tasks to a hub; other subscribed projects receive them alongside local context.

            New to the ctx Hub?

            Start with the ctx Hub overview for the mental model (what the hub is, who it's for, what it is not), then walk through Getting Started. This page is a command reference, not an introduction.

            The unit of identity is a project, not a user. Registering a directory with ctx connection register binds a per-project client token in .context/.connect.enc. Two developers on the same project either share that file over a trusted channel, or each register under a different project name.

            Only structured entries flow through the hub: decision, learning, convention, task. Session journals, scratchpad contents, and other local state stay on the machine that created them.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-register","level":3,"title":"ctx connection register","text":"

            One-time registration with a ctx Hub. Requires the ctx Hub address and admin token (printed by ctx hub start on first run).

            Examples:

            ctx connection register localhost:9900 --token ctx_adm_7f3a...\n

            On success, stores an encrypted connection config in .context/.connect.enc for future RPCs.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-subscribe","level":3,"title":"ctx connection subscribe","text":"

            Set which entry types to receive from the ctx Hub. Only matching types are returned by sync and listen.

            Examples:

            ctx connection subscribe decision learning\nctx connection subscribe decision learning convention\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-sync","level":3,"title":"ctx connection sync","text":"

            Pull matching entries from the ctx Hub and write them to .context/hub/ as markdown files with origin tags and date headers. Tracks last-seen sequence for incremental sync.

            Examples:

            ctx connection sync\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-publish","level":3,"title":"ctx connection publish","text":"

            Push entries to the ctx Hub. Specify type and content as arguments.

            Examples:

            ctx connection publish decision \"Use UTC timestamps everywhere\"\nctx connection publish learning \"Go embed requires files in same package\"\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-listen","level":3,"title":"ctx connection listen","text":"

            Stream new entries from the ctx Hub in real-time. Writes to .context/hub/ as entries arrive. Press Ctrl-C to stop.

            Examples:

            ctx connection listen\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#ctx-connection-status","level":3,"title":"ctx connection status","text":"

            Show ctx Hub connection state and entry statistics.

            Examples:

            ctx connection status\n
            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#automatic-sharing","level":2,"title":"Automatic Sharing","text":"

            Use --share on ctx add to write locally AND publish to the ctx Hub:

            ctx add decision \"Use UTC\" --share \\\n  --context \"Need consistency\" \\\n  --rationale \"Avoid timezone bugs\" \\\n  --consequence \"UI does conversion\"\n

            If the hub is unreachable, the local write succeeds and a warning is printed. The --share flag is best-effort; it never blocks local context updates.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#auto-sync","level":2,"title":"Auto-Sync","text":"

            Once registered, the check-hub-sync hook automatically syncs new entries from the ctx Hub at the start of each session (daily throttled). No manual ctx connection sync needed.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#shared-files","level":2,"title":"Shared Files","text":"

            Entries from the ctx Hub are stored in .context/hub/:

            .context/hub/\n  decisions.md      # Shared decisions with origin tags\n  learnings.md      # Shared learnings\n  conventions.md    # Shared conventions\n  .sync-state.json  # Last-seen sequence tracker\n

            These files are read-only (managed by sync/listen) and never mixed with local context files.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/connection/#agent-integration","level":2,"title":"Agent Integration","text":"

            Include shared knowledge in agent context packets:

            ctx agent --include-hub\n

            Shared entries are included as Tier 8 in the budget-aware assembly, scored by recency and type relevance.

            ","path":["CLI","Integrations","Connect"],"tags":[]},{"location":"cli/context/","level":1,"title":"Context Management","text":"","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-add","level":3,"title":"ctx add","text":"

            Add a new item to a context file.

            ctx add <type> <content> [flags]\n

            Types:

            Type Target File task TASKS.md decision DECISIONS.md learning LEARNINGS.md convention CONVENTIONS.md

            Flags:

            Flag Short Description --priority <level> -p Priority for tasks: high, medium, low --section <name> -s Target section within file --context -c Context (required for decisions and learnings) --rationale -r Rationale for decisions (required for decisions) --consequence Consequence for decisions (required for decisions) --lesson -l Key insight (required for learnings) --application -a How to apply going forward (required for learnings) --file -f Read content from file instead of argument

            Examples:

            # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\nctx add task \"Fix login bug\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (requires all ADR (Architectural Decision Record) fields)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning (requires context, lesson, and application)\nctx add learning \"Vitest mocks must be hoisted\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Vitest hoists vi.mock() calls to top of file\" \\\n  --application \"Always place vi.mock() before imports in test files\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to specific section\nctx add convention \"Use kebab-case for filenames\" --section \"Naming\"\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-drift","level":3,"title":"ctx drift","text":"

            Detect stale or invalid context.

            ctx drift [flags]\n

            Flags:

            Flag Description --json Output machine-readable JSON --fix Auto-fix simple issues

            Checks:

            • Path references in ARCHITECTURE.md and CONVENTIONS.md exist
            • Task references are valid
            • Constitution rules aren't violated (heuristic)
            • Staleness indicators (old files, many completed tasks)
            • Missing packages: warns when internal/ directories exist on disk but are not referenced in ARCHITECTURE.md (suggests running /ctx-architecture)
            • Entry count: warns when LEARNINGS.md or DECISIONS.md exceed configurable thresholds (default: 30 learnings, 20 decisions), or when CONVENTIONS.md exceeds a line count threshold (default: 200). Configure via .ctxrc:
              entry_count_learnings: 30      # warn above this (0 = disable)\nentry_count_decisions: 20      # warn above this (0 = disable)\nconvention_line_count: 200     # warn above this (0 = disable)\n

            Example:

            ctx drift\nctx drift --json\nctx drift --fix\n

            Exit codes:

            Code Meaning 0 All checks passed 1 Warnings found 3 Violations found","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-sync","level":3,"title":"ctx sync","text":"

            Reconcile context with the current codebase state.

            ctx sync [flags]\n

            Flags:

            Flag Description --dry-run Show what would change without modifying

            What it does:

            • Scans codebase for structural changes
            • Compares with ARCHITECTURE.md
            • Suggests documenting dependencies if package files exist
            • Identifies stale or outdated context

            Example:

            ctx sync\nctx sync --dry-run\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-compact","level":3,"title":"ctx compact","text":"

            Consolidate and clean up context files.

            • Moves completed tasks older than 7 days to the archive
            • Removes empty sections
            ctx compact [flags]\n

            Flags:

            Flag Description --archive Create .context/archive/ for old content

            Example:

            ctx compact\nctx compact --archive\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-fmt","level":3,"title":"ctx fmt","text":"

            Format context files to a consistent line width.

            Wraps long lines in TASKS.md, DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md at word boundaries. Markdown list items get 2-space continuation indent. Headings, tables, frontmatter, and HTML comments are preserved as-is.

            Idempotent: running twice produces the same output.

            ctx fmt [flags]\n

            Flags:

            Flag Type Default Description --width int 80 Target line width --check bool false Check only, exit 1 if files would change

            Examples:

            ctx fmt              # format all context files\nctx fmt --check      # CI mode: check without modifying\nctx fmt --width 100  # custom width\n

            Also available as a Makefile target:

            make fmt-context\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task","level":3,"title":"ctx task","text":"

            Manage task completion, archival, and snapshots.

            ctx task <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-complete","level":4,"title":"ctx task complete","text":"

            Mark a task as completed.

            ctx task complete <task-id-or-text>\n

            Arguments:

            • task-id-or-text: Task number or partial text match

            Examples:

            # By text (partial match)\nctx task complete \"user auth\"\n\n# By task number\nctx task complete 3\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-archive","level":4,"title":"ctx task archive","text":"

            Move completed tasks from TASKS.md to a timestamped archive file.

            ctx task archive [flags]\n

            Flags:

            Flag Description --dry-run Preview changes without modifying files

            Archive files are stored in .context/archive/ with timestamped names (tasks-YYYY-MM-DD.md). Completed tasks (marked with [x]) are moved; pending tasks ([ ]) remain in TASKS.md.

            Example:

            ctx task archive\nctx task archive --dry-run\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-task-snapshot","level":4,"title":"ctx task snapshot","text":"

            Create a point-in-time snapshot of TASKS.md without modifying the original.

            ctx task snapshot [name]\n

            Arguments:

            • name: Optional name for the snapshot (defaults to \"snapshot\")

            Snapshots are stored in .context/archive/ with timestamped names (tasks-<name>-YYYY-MM-DD-HHMM.md).

            Example:

            ctx task snapshot\nctx task snapshot \"before-refactor\"\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission","level":3,"title":"ctx permission","text":"

            Manage Claude Code permission snapshots.

            ctx permission <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-snapshot","level":4,"title":"ctx permission snapshot","text":"

            Save .claude/settings.local.json as the golden image.

            ctx permission snapshot\n

            Creates .claude/settings.golden.json as a byte-for-byte copy of the current settings. Overwrites if the golden file already exists.

            The golden file is meant to be committed to version control and shared with the team.

            Example:

            ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-permission-restore","level":4,"title":"ctx permission restore","text":"

            Replace settings.local.json with the golden image.

            ctx permission restore\n

            Prints a diff of dropped (session-accumulated) and restored permissions. No-op if the files already match.

            Example:

            ctx permission restore\n# Dropped 3 session permission(s):\n#   - Bash(cat /tmp/debug.log:*)\n#   - Bash(rm /tmp/test-*:*)\n#   - Bash(curl https://example.com:*)\n# Restored from golden image.\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-reindex","level":3,"title":"ctx reindex","text":"

            Regenerate the quick-reference index for both DECISIONS.md and LEARNINGS.md in a single invocation.

            ctx reindex\n

            This is a convenience wrapper around ctx decision reindex and ctx learning reindex. Both files grow at similar rates and users typically want to reindex both after manual edits.

            The index is a compact table of date and title for each entry, allowing AI tools to scan entries without reading the full file.

            Example:

            ctx reindex\n# ✓ Index regenerated with 12 entries\n# ✓ Index regenerated with 8 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision","level":3,"title":"ctx decision","text":"

            Manage the DECISIONS.md file.

            ctx decision <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-decision-reindex","level":4,"title":"ctx decision reindex","text":"

            Regenerate the quick-reference index at the top of DECISIONS.md.

            ctx decision reindex\n

            The index is a compact table showing the date and title for each decision, allowing AI tools to quickly scan entries without reading the full file.

            Use this after manual edits to DECISIONS.md or when migrating existing files to use the index format.

            Example:

            ctx decision reindex\n# ✓ Index regenerated with 12 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning","level":3,"title":"ctx learning","text":"

            Manage the LEARNINGS.md file.

            ctx learning <subcommand>\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/context/#ctx-learning-reindex","level":4,"title":"ctx learning reindex","text":"

            Regenerate the quick-reference index at the top of LEARNINGS.md.

            ctx learning reindex\n

            The index is a compact table showing the date and title for each learning, allowing AI tools to quickly scan entries without reading the full file.

            Use this after manual edits to LEARNINGS.md or when migrating existing files to use the index format.

            Example:

            ctx learning reindex\n# ✓ Index regenerated with 8 entries\n
            ","path":["CLI","Context","Context Management"],"tags":[]},{"location":"cli/doctor/","level":1,"title":"Doctor","text":"","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#ctx-doctor","level":3,"title":"ctx doctor","text":"

            Structural health check across context, hooks, and configuration. Runs mechanical checks that don't require semantic analysis. Think of it as ctx status + ctx drift + configuration audit in one pass.

            ctx doctor [flags]\n

            Flags:

            Flag Short Type Default Description --json -j bool false Machine-readable JSON output","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-checks","level":4,"title":"What It Checks","text":"Check Category What it verifies Context initialized Structure .context/ directory exists Required files present Structure All required context files exist (TASKS.md, etc.) Drift detected Quality Stale paths, missing files, constitution violations Event logging status Hooks Whether event_log: true is set in .ctxrc Webhook configured Hooks .notify.enc file exists Pending reminders State Count of entries in reminders.json Task completion ratio State Pending vs completed tasks in TASKS.md Context token size Size Estimated token count across all context files Recent event activity Events Last event timestamp (only when event logging is enabled)","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-human","level":4,"title":"Output Format (Human)","text":"
            ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

            Status indicators:

            Icon Status Meaning ✓ ok Check passed ⚠ warning Non-critical issue worth fixing ✗ error Problem that needs attention ○ info Informational note","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#output-format-json","level":4,"title":"Output Format (JSON)","text":"
            {\n  \"results\": [\n    {\n      \"name\": \"context_initialized\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Context initialized (.context/)\"\n    },\n    {\n      \"name\": \"required_files\",\n      \"category\": \"Structure\",\n      \"status\": \"ok\",\n      \"message\": \"Required files present (4/4)\"\n    },\n    {\n      \"name\": \"drift\",\n      \"category\": \"Quality\",\n      \"status\": \"warning\",\n      \"message\": \"Drift: 2 warnings\"\n    },\n    {\n      \"name\": \"event_logging\",\n      \"category\": \"Hooks\",\n      \"status\": \"info\",\n      \"message\": \"Event logging disabled (enable with event_log: true in .ctxrc)\"\n    },\n    {\n      \"name\": \"webhook\",\n      \"category\": \"Hooks\",\n      \"status\": \"ok\",\n      \"message\": \"Webhook configured\"\n    },\n    {\n      \"name\": \"reminders\",\n      \"category\": \"State\",\n      \"status\": \"ok\",\n      \"message\": \"No pending reminders\"\n    },\n    {\n      \"name\": \"task_completion\",\n      \"category\": \"State\",\n      \"status\": \"warning\",\n      \"message\": \"Tasks: 18/22 completed (82%): consider archiving with ctx task archive\"\n    },\n    {\n      \"name\": \"context_size\",\n      \"category\": \"Size\",\n      \"status\": \"ok\",\n      \"message\": \"Context size: ~4200 tokens (budget: 8000)\"\n    }\n  ],\n  \"warnings\": 2,\n  \"errors\": 0\n}\n

            Examples:

            # Quick structural health check\nctx doctor\n\n# Machine-readable output for scripting\nctx doctor --json\n\n# Count warnings\nctx doctor --json | jq '.warnings'\n\n# Check for errors only\nctx doctor --json | jq '[.results[] | select(.status == \"error\")]'\n
            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#when-to-use-what","level":4,"title":"When to Use What","text":"Tool When ctx status Quick glance at files, tokens, and drift ctx doctor Thorough structural checkup (hooks, config, events too) /ctx-doctor Agent-driven diagnosis with event log pattern analysis

            ctx status tells you what's there. ctx doctor tells you what's wrong. /ctx-doctor tells you why it's wrong and what to do about it.

            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/doctor/#what-it-does-not-do","level":4,"title":"What It Does Not Do","text":"
            • No event pattern analysis: that's the /ctx-doctor skill's job
            • No auto-fixing: reports findings, doesn't modify anything
            • No external service checks: doesn't verify webhook endpoint availability

            See also: Troubleshooting | ctx hook event | /ctx-doctor skill | Detecting and Fixing Drift

            ","path":["CLI","Diagnostics","Doctor"],"tags":[]},{"location":"cli/event/","level":1,"title":"Event","text":"","path":["Event"],"tags":[]},{"location":"cli/event/#ctx-hook-event","level":3,"title":"ctx hook event","text":"

            Query the local hook event log. Requires event_log: true in .ctxrc. Reads events from .context/state/events.jsonl and outputs them in a human-readable table or raw JSONL format.

            All filter flags combine with AND logic.

            ctx hook event [flags]\n

            Flags:

            Flag Description --hook Filter by hook name --session Filter by session ID --event Filter by event type (relay, nudge) --last Show last N events (default: 50) --json Output raw JSONL (for piping to jq) --all Include rotated log file

            Examples:

            ctx hook event                                        # recent events\nctx hook event --hook check-context-size --last 10    # one hook, last 10\nctx hook event --json | jq '.hook'                    # pipe to jq\nctx hook event --session abc123                       # filter by session\n
            ","path":["Event"],"tags":[]},{"location":"cli/guide/","level":1,"title":"Guide","text":"","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/guide/#ctx-guide","level":2,"title":"ctx guide","text":"

            Quick-reference cheat sheet for common ctx commands and skills.

            ctx guide [flags]\n

            Flags:

            Flag Description --skills Show available skills --commands Show available CLI commands

            Example:

            # Show the full cheat sheet\nctx guide\n\n# Skills only\nctx guide --skills\n\n# Commands only\nctx guide --commands\n

            Works without initialization (no .context/ required). Useful for a printable one-pager when onboarding to a project.

            ","path":["CLI","Getting Started","Guide"],"tags":[]},{"location":"cli/hook/","level":1,"title":"Hook","text":"","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#ctx-hook","level":3,"title":"ctx hook","text":"

            Manage hook-related settings: messages, notifications, pause/resume, and event log.

            ctx hook <subcommand> [flags]\n
            ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#subcommands","level":2,"title":"Subcommands","text":"Subcommand Description ctx hook message list Show all hook messages with override status ctx hook message show <h> <v> Print the effective message template ctx hook message edit <h> <v> Copy default to .context/ for editing ctx hook message reset <h> <v> Delete user override, revert to default ctx hook notify [message] Send a webhook notification ctx hook notify setup Configure and encrypt webhook URL ctx hook notify test Send a test notification ctx hook pause Pause all context hooks for this session ctx hook resume Resume paused context hooks ctx hook event Query the local hook event log","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hook/#examples","level":2,"title":"Examples","text":"
            # View and manage hook messages\nctx hook message list\nctx hook message show qa-reminder gate\nctx hook message edit qa-reminder gate\n\n# Webhook notifications\nctx hook notify setup\nctx hook notify --event loop \"Loop completed\"\n\n# Pause/resume hooks\nctx hook pause\nctx hook resume\n\n# Browse event log\nctx hook event --last 20\nctx hook event --hook qa-reminder --json\n

            See also: Customizing Hook Messages | Webhook Notifications | Pausing Context Hooks | System Hooks Audit

            ","path":["CLI","Runtime","Hook"],"tags":[]},{"location":"cli/hub/","level":1,"title":"Hub","text":"","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub","level":2,"title":"ctx hub","text":"

            Operator commands for a ctx Hub: the gRPC server that fans out decisions, learnings, conventions, and tasks across projects. Use ctx hub to start and stop the server, inspect cluster state, add or remove peers at runtime, and hand off leadership before maintenance.

            Who Needs This Page

            You only need ctx hub if you are running a hub server or cluster. For client-side operations (register, subscribe, sync, publish, listen), see ctx connect. For the mental model behind the hub as a whole, read the ctx Hub overview.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-start","level":3,"title":"ctx hub start","text":"

            Start the hub gRPC server.

            Examples:

            ctx hub start                           # Foreground, default port 9900\nctx hub start --port 8080               # Custom port\nctx hub start --data-dir /srv/ctx-hub   # Custom data directory\n

            On first run, generates an admin token and prints it to stdout. Save this token; it's required for ctx connection register in client projects. Subsequent runs reuse the stored token from <data-dir>/admin.token.

            Default data directory: ~/.ctx/hub-data/

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#daemon-mode","level":4,"title":"Daemon Mode","text":"

            Run the hub as a detached background process:

            ctx hub start --daemon          # Fork to background\nctx hub stop                    # Graceful shutdown\n

            The daemon writes a PID file to <data-dir>/hub.pid. Stop the daemon with ctx hub stop (see below).

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#cluster-mode","level":4,"title":"Cluster Mode","text":"

            For high availability, run multiple hubs with Raft-based leader election:

            ctx hub start --port 9900 \\\n  --peers host2:9901,host3:9901\n

            Raft is used only for leader election. Data replication uses sequence-based gRPC sync on the append-only JSONL log; there is no multi-node consensus on writes. See the HA cluster recipe for the full setup and the Raft-lite durability caveat.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#flags","level":4,"title":"Flags","text":"Flag Description Default --port Hub listen port 9900 --data-dir Hub data directory ~/.ctx/hub-data/ --daemon Run the hub server in the background false --peers Comma-separated peer addresses for cluster mode (none)","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#validation","level":4,"title":"Validation","text":"

            The hub validates every published entry before accepting it:

            • Type must be one of decision, learning, convention, task
            • ID and Origin are required and non-empty
            • Content size capped at 1 MB (text-only)
            • Duplicate project registration is rejected (one token per project)
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stop","level":3,"title":"ctx hub stop","text":"

            Stop a running hub daemon.

            Examples:

            ctx hub stop                            # Stop using default data dir\nctx hub stop --data-dir /srv/ctx-hub    # Custom data directory\n

            Sends SIGTERM to the PID recorded in <data-dir>/hub.pid, waits for in-flight RPCs to drain, and removes the PID file. Safe to rerun: if no daemon is running, returns a \"no running hub\" error without side effects.

            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-status","level":3,"title":"ctx hub status","text":"

            Show cluster status: role, peers, sync state, entry count, and uptime.

            Examples:

            ctx hub status\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-peer","level":3,"title":"ctx hub peer","text":"

            Add or remove peers from the cluster at runtime. Useful for scaling up or replacing a decommissioned node without restarting the leader.

            Examples:

            ctx hub peer add host2:9901\nctx hub peer remove host2:9901\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#ctx-hub-stepdown","level":3,"title":"ctx hub stepdown","text":"

            Transfer leadership to another node gracefully. Triggers a new election among the remaining followers before the current leader steps down. Use before taking the leader offline for maintenance.

            Examples:

            ctx hub stepdown\n
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/hub/#see-also","level":3,"title":"See Also","text":"
            • ctx connect: client-side commands (register, subscribe, sync, publish, listen)
            • ctx Hub overview: mental model and user stories
            • ctx Hub: Getting Started
            • Hub operations: production deployment, backup, monitoring
            • Hub failure modes
            • Hub security model
            ","path":["CLI","Integrations","Hub"],"tags":[]},{"location":"cli/init-status/","level":1,"title":"Init and Status","text":"","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-init","level":3,"title":"ctx init","text":"

            Initialize a new .context/ directory with template files.

            ctx init [flags]\n

            Flags:

            Flag Short Description --force -f Overwrite existing context files --minimal -m Only create essential files (TASKS.md, DECISIONS.md, CONSTITUTION.md) --merge Auto-merge ctx content into existing CLAUDE.md

            Creates:

            • .context/ directory with all template files
            • .claude/settings.local.json with pre-approved ctx permissions
            • CLAUDE.md with bootstrap instructions (or merges into existing)

            Claude Code hooks and skills are provided by the ctx plugin (see Integrations).

            Example:

            # Standard init\nctx init\n\n# Minimal setup (just core files)\nctx init --minimal\n\n# Force overwrite existing\nctx init --force\n\n# Merge into existing files\nctx init --merge\n

            After ctx init succeeds, the final output includes a hint showing the exact eval \"$(ctx activate)\" line to bind the new directory for your shell. Every other ctx command requires that binding (or an equivalent direct CTX_DIR=/abs/path/.context export) before it will run.

            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-activate","level":3,"title":"ctx activate","text":"

            Emit a shell-native export CTX_DIR=... line for the target .context/ directory. ctx does not walk the filesystem during operating commands; every non-exempt command requires CTX_DIR set before it will run. activate is the convenience that figures out the path and lets you bind it with one line.

            # Walk up from CWD, emit if exactly one candidate visible.\neval \"$(ctx activate)\"\n

            Flags:

            Flag Description --shell Shell dialect override. POSIX-family (bash, zsh, sh) all share one syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from $SHELL.

            Resolution:

            Candidate count from CWD Behavior Zero Error. Use ctx init to create one, or cd closer to the project root. One Emit export CTX_DIR=<path> for that candidate. Two or more Refuse. List every candidate. Re-run from a more specific cwd.

            activate is args-free under the single-source-anchor model; the explicit-path mode was removed because hub-client / hub-server scenarios store at ~/.ctx/hub-data/ and never read .context/, so they activate from the project root like everyone else. Direct binding without a project-local scan is still available via export CTX_DIR=/abs/path/.context or the inline form.

            If the parent shell already has CTX_DIR set to a different value, the output gains a leading # ctx: replacing stale CTX_DIR=... comment so the user sees the change in eval output before the replacement takes effect.

            See also: Activating a Context Directory for the full recipe including direnv setup and CI patterns.

            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-deactivate","level":3,"title":"ctx deactivate","text":"

            Emit a shell-native unset CTX_DIR line. Pairs with activate.

            eval \"$(ctx deactivate)\"\n

            Flags:

            Flag Description --shell Shell dialect override. POSIX-family (bash, zsh, sh) all share one unset syntax today; the flag exists for future fish/nushell/powershell support. Auto-detected from $SHELL.

            deactivate does not touch the filesystem, doesn't require a declared context directory, and never fails under normal operation; unsetting an already-unset variable is a no-op across supported shells.

            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-status","level":3,"title":"ctx status","text":"

            Show the current context summary.

            ctx status [flags]\n

            Flags:

            Flag Short Description --json Output as JSON --verbose -v Include file contents summary

            Output:

            • Context directory path
            • Total files and token estimate
            • Status of each file (loaded, empty, missing)
            • Recent activity (modification times)
            • Drift warnings if any

            Example:

            ctx status\nctx status --json\nctx status --verbose\n
            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-agent","level":3,"title":"ctx agent","text":"

            Print an AI-ready context packet optimized for LLM consumption.

            ctx agent [flags]\n

            Flags:

            Flag Default Description --budget 8000 Token budget: controls content selection and prioritization --format md Output format: md or json --cooldown 10m Suppress repeated output within this duration (requires --session) --session (none) Session ID for cooldown isolation (e.g., $PPID) --include-hub false Include hub entries from .context/hub/

            How budget works:

            The budget controls how much context is included. Entries are selected in priority tiers:

            1. Constitution: always included in full (inviolable rules)
            2. Tasks: all active tasks, up to 40% of budget
            3. Conventions: all conventions, up to 20% of budget
            4. Decisions: scored by recency and relevance to active tasks
            5. Learnings: scored by recency and relevance to active tasks
            6. Steering: applicable steering file bodies, scored by their inclusion mode and description match against the active prompt
            7. Skill: named skill content (from --skill)
            8. Hub: entries from .context/hub/ (with --include-hub, see ctx connect)

            Decisions and learnings are ranked by a combined score (how recent + how relevant to your current tasks). High-scoring entries are included with their full body. Entries that don't fit get title-only summaries in an \"Also Noted\" section. Superseded entries are excluded.

            Output Sections:

            Section Source Selection Read These Files all .context/ Non-empty files in priority order Constitution CONSTITUTION.md All rules (never truncated) Current Tasks TASKS.md All unchecked tasks (budget-capped) Key Conventions CONVENTIONS.md All items (budget-capped) Recent Decisions DECISIONS.md Full body, scored by relevance Key Learnings LEARNINGS.md Full body, scored by relevance Also Noted overflow Title-only summaries

            Example:

            # Default (8000 tokens, markdown)\nctx agent\n\n# Smaller packet for tight context windows\nctx agent --budget 4000\n\n# JSON format for programmatic use\nctx agent --format json\n\n# Pipe to file\nctx agent --budget 4000 > context.md\n\n# With cooldown (hooks/automation: requires --session)\nctx agent --session $PPID\n

            Use case: Copy-paste into AI chat, pipe to system prompt, or use in hooks.

            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/init-status/#ctx-load","level":3,"title":"ctx load","text":"

            Load and display assembled context as AI would see it.

            ctx load [flags]\n

            Flags:

            Flag Description --budget <tokens> Token budget for assembly (default: 8000) --raw Output raw file contents without assembly

            Example:

            ctx load\nctx load --budget 16000\nctx load --raw\n
            ","path":["CLI","Getting Started","Init and Status"],"tags":[]},{"location":"cli/journal/","level":1,"title":"Journal","text":"","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal","level":3,"title":"ctx journal","text":"

            Browse and search AI session history from Claude Code and other tools.

            ctx journal <subcommand>\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source","level":4,"title":"ctx journal source","text":"

            List all parsed sessions.

            ctx journal source [flags]\n

            Flags:

            Flag Short Description --limit -n Maximum sessions to display (default: 20) --project -p Filter by project name --tool -t Filter by tool (e.g., claude-code) --all-projects Include sessions from all projects

            Sessions are sorted by date (newest first) and display slug, project, start time, duration, turn count, and token usage.

            Example:

            ctx journal source\nctx journal source --limit 5\nctx journal source --project ctx\nctx journal source --tool claude-code\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-source-show","level":4,"title":"ctx journal source --show","text":"

            Show details of a specific session.

            ctx journal source --show [session-id] [flags]\n

            Flags:

            Flag Description --latest Show the most recent session --full Show full message content --all-projects Search across all projects

            The session ID can be a full UUID, partial match, or session slug name.

            Example:

            ctx journal source --show abc123\nctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show --latest\nctx journal source --show --latest --full\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-import","level":4,"title":"ctx journal import","text":"

            Import sessions to editable journal files in .context/journal/.

            ctx journal import [session-id] [flags]\n

            Flags:

            Flag Description --all Import all sessions (only new files by default) --all-projects Import from all projects --regenerate Re-import existing files (preserves YAML frontmatter by default) --keep-frontmatter Preserve enriched YAML frontmatter during regeneration (default: true) --yes, -y Skip confirmation prompt --dry-run Show what would be imported without writing files

            Safe by default: --all only imports new sessions. Existing files are skipped. Use --regenerate to re-import existing files (conversation content is regenerated, YAML frontmatter from enrichment is preserved by default). Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

            Locked entries (via ctx journal lock) are always skipped, regardless of flags.

            Single-session import (ctx journal import <id>) always writes without prompting, since you are explicitly targeting one session.

            The journal/ directory should be gitignored (like sessions/) since it contains raw conversation data.

            Example:

            ctx journal import abc123                 # Import one session\nctx journal import --all                  # Import only new sessions\nctx journal import --all --dry-run        # Preview what would be imported\nctx journal import --all --regenerate     # Re-import existing (prompts)\nctx journal import --all --regenerate -y  # Re-import without prompting\nctx journal import --all --regenerate --keep-frontmatter=false -y  # Discard frontmatter\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-lock","level":4,"title":"ctx journal lock","text":"

            Protect journal entries from being overwritten by import --regenerate or modified by enrichment skills (/ctx-journal-enrich, /ctx-journal-enrich-all).

            ctx journal lock <pattern> [flags]\n

            Flags:

            Flag Description --all Lock all journal entries

            The pattern matches filenames by slug, date, or short ID. Locking a multi-part entry locks all parts. The lock is recorded in .context/journal/.state.json and a locked: true line is added to the file's YAML frontmatter for visibility.

            Example:

            ctx journal lock abc12345\nctx journal lock 2026-01-21-session-abc12345.md\nctx journal lock --all\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-unlock","level":4,"title":"ctx journal unlock","text":"

            Remove lock protection from journal entries.

            ctx journal unlock <pattern> [flags]\n

            Flags:

            Flag Description --all Unlock all journal entries

            Example:

            ctx journal unlock abc12345\nctx journal unlock --all\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-sync","level":4,"title":"ctx journal sync","text":"

            Sync lock state from journal frontmatter to .state.json.

            ctx journal sync\n

            Scans all journal markdowns and updates .state.json to match each file's frontmatter. Files with locked: true in frontmatter are marked locked in state; files without a locked: line have their lock cleared.

            This is the inverse of ctx journal lock: instead of state driving frontmatter, frontmatter drives state. Useful after batch enrichment where you add locked: true to frontmatter manually.

            Example:

            # After enriching entries and adding locked: true to frontmatter\nctx journal sync\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal_1","level":3,"title":"ctx journal","text":"

            Analyze and synthesize imported session files.

            ctx journal <subcommand>\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-site","level":4,"title":"ctx journal site","text":"

            Generate a static site from journal entries in .context/journal/.

            ctx journal site [flags]\n

            Flags:

            Flag Short Description --output -o Output directory (default: .context/journal-site) --build Run zensical build after generating --serve Run zensical serve after generating

            Creates a zensical-compatible site structure with an index page listing all sessions by date, and individual pages for each journal entry.

            Requires zensical to be installed for --build or --serve:

            pipx install zensical\n

            Example:

            ctx journal site                    # Generate in .context/journal-site/\nctx journal site --output ~/public  # Custom output directory\nctx journal site --build            # Generate and build HTML\nctx journal site --serve            # Generate and serve locally\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-obsidian","level":4,"title":"ctx journal obsidian","text":"

            Generate an Obsidian vault from journal entries in .context/journal/.

            ctx journal obsidian [flags]\n

            Flags:

            Flag Short Description --output -o Output directory (default: .context/journal-obsidian)

            Creates an Obsidian-compatible vault with:

            • Wikilinks ([[target|display]]) for all internal navigation
            • MOC pages (Map of Content) for topics, key files, and session types
            • Related sessions footer linking entries that share topics
            • Transformed frontmatter (topicstags for Obsidian integration)
            • Minimal .obsidian/ config enforcing wikilink mode

            No external dependencies are required: Open the output directory as an Obsidian vault directly.

            Example:

            ctx journal obsidian                        # Generate in .context/journal-obsidian/\nctx journal obsidian --output ~/vaults/ctx  # Custom output directory\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-check","level":4,"title":"ctx journal schema check","text":"

            Validate JSONL session files against the embedded schema and report drift.

            ctx journal schema check [flags]\n

            Flags:

            Flag Short Description --dir Directory to scan for JSONL files --all-projects Scan all Claude Code project directories --quiet -q Exit code only (0 = clean, 1 = drift)

            Scans JSONL files for unknown fields, missing required fields, unknown record types, and unknown content block types. When drift is found, writes a Markdown report to .context/reports/schema-drift.md. When drift resolves, the report is automatically deleted.

            Designed for interactive use, CI pipelines, and nightly cron jobs.

            Example:

            ctx journal schema check                    # Current project\nctx journal schema check --all-projects     # All projects\nctx journal schema check --quiet            # Exit code only\nctx journal schema check --dir /path/to     # Custom directory\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-journal-schema-dump","level":4,"title":"ctx journal schema dump","text":"

            Print the embedded JSONL schema definition.

            ctx journal schema dump\n

            Shows all known record types with their required and optional fields, and all recognized content block types with their parse status. Useful for inspecting what the schema validator expects.

            Example:

            ctx journal schema dump\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/journal/#ctx-serve","level":3,"title":"ctx serve","text":"

            Serve any zensical directory locally. This is a serve-only command: It does not generate or regenerate site content.

            ctx serve [directory]\n

            If no directory is specified, defaults to the journal site (.context/journal-site).

            Requires zensical to be installed:

            pipx install zensical\n

            ctx serve vs. ctx journal site --serve

            ctx journal site --serve generates the journal site then serves it: an all-in-one command. ctx serve only serves an existing directory, and works with any zensical site (journal, docs, etc.).

            Example:

            ctx serve                        # Serve journal site (no regeneration)\nctx serve .context/journal-site  # Same, explicit path\nctx serve ./site                 # Serve the docs site\n
            ","path":["CLI","Sessions","Journal"],"tags":[]},{"location":"cli/loop/","level":1,"title":"Loop","text":"","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/loop/#ctx-loop","level":2,"title":"ctx loop","text":"

            Generate a shell script for running an autonomous loop.

            An autonomous loop continuously runs an AI assistant with the same prompt until a completion signal is detected, enabling iterative development where the AI builds on its previous work.

            ctx loop [flags]\n

            Flags:

            Flag Short Description Default --tool <tool> -t AI tool: claude, aider, or generic claude --prompt <file> -p Prompt file to use .context/loop.md --max-iterations <n> -n Maximum iterations (0 = unlimited) 0 --completion <signal> -c Completion signal to detect SYSTEM_CONVERGED --output <file> -o Output script filename loop.sh

            Examples:

            # Generate loop.sh for Claude Code\nctx loop\n\n# Generate for Aider with custom prompt\nctx loop --tool aider --prompt TASKS.md\n\n# Limit to 10 iterations\nctx loop --max-iterations 10\n\n# Output to custom file\nctx loop -o my-loop.sh\n

            Running the generated loop:

            ctx loop\nchmod +x loop.sh\n./loop.sh\n

            See also: Autonomous Loops for the full workflow.

            ","path":["CLI","Integrations","Loop"],"tags":[]},{"location":"cli/mcp/","level":1,"title":"MCP Server","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp","level":2,"title":"ctx mcp","text":"

            Run ctx as a Model Context Protocol (MCP) server. MCP is a standard protocol that lets AI tools discover and consume context from external sources via JSON-RPC 2.0 over stdin/stdout.

            This makes ctx accessible to any MCP-compatible AI tool without custom hooks or integrations:

            • Claude Desktop
            • Cursor
            • Windsurf
            • VS Code Copilot
            • Any tool supporting MCP
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-mcp-serve","level":3,"title":"ctx mcp serve","text":"

            Start the MCP server. This command reads JSON-RPC 2.0 requests from stdin and writes responses to stdout. It is intended to be launched by MCP clients (Claude Desktop, Cursor, VS Code Copilot), not run directly from a shell. See Configuration below for how each host launches it.

            Flags: None. The server uses the declared context directory from CTX_DIR. As with every other ctx command, that variable must be set: the server does not walk the filesystem.

            Examples:

            # Normal invocation (by an MCP client via stdio transport)\nctx mcp serve\n\n# Pin a context directory for a specific workspace\nCTX_DIR=/path/to/project/.context ctx mcp serve\n\n# Verify the binary starts without a client attached (Ctrl-C to exit)\nctx mcp serve < /dev/null\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#configuration","level":2,"title":"Configuration","text":"","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#claude-desktop","level":3,"title":"Claude Desktop","text":"

            Add to ~/Library/Application Support/Claude/claude_desktop_config.json:

            {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#cursor","level":3,"title":"Cursor","text":"

            Add to .cursor/mcp.json in your project:

            {\n  \"mcpServers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#vs-code-copilot","level":3,"title":"VS Code (Copilot)","text":"

            Add to .vscode/mcp.json:

            {\n  \"servers\": {\n    \"ctx\": {\n      \"command\": \"ctx\",\n      \"args\": [\"mcp\", \"serve\"]\n    }\n  }\n}\n
            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resources","level":2,"title":"Resources","text":"

            Resources expose context files as read-only content. Each resource has a URI, name, and returns Markdown text.

            URI Name Description ctx://context/constitution constitution Hard rules that must never be violated ctx://context/tasks tasks Current work items and their status ctx://context/conventions conventions Code patterns and standards ctx://context/architecture architecture System architecture documentation ctx://context/decisions decisions Architectural decisions with rationale ctx://context/learnings learnings Gotchas, tips, and lessons learned ctx://context/glossary glossary Project-specific terminology ctx://context/agent agent All files assembled in priority read order

            The agent resource assembles all non-empty context files into a single Markdown document, ordered by the configured read priority.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#resource-subscriptions","level":3,"title":"Resource Subscriptions","text":"

            Clients can subscribe to resource changes via resources/subscribe. The server polls for file mtime changes (default: 5 seconds) and emits notifications/resources/updated when a subscribed file changes on disk.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#tools","level":2,"title":"Tools","text":"

            Tools expose ctx commands as callable operations. Each tool accepts JSON arguments and returns text results.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_status","level":3,"title":"ctx_status","text":"

            Show context health: file count, token estimate, and per-file summary.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_add","level":3,"title":"ctx_add","text":"

            Add a task, decision, learning, or convention to the context.

            Argument Type Required Description type string Yes Entry type: task, decision, learning, convention content string Yes Title or main content priority string No Priority level (tasks only): high, medium, low context string Conditional Context field (decisions and learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_complete","level":3,"title":"ctx_complete","text":"

            Mark a task as done by number or text match.

            Argument Type Required Description query string Yes Task number (e.g. \"1\") or search text","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_drift","level":3,"title":"ctx_drift","text":"

            Detect stale or invalid context. Returns violations, warnings, and passed checks.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_journal_source","level":3,"title":"ctx_journal_source","text":"

            Query recent AI session history (summaries, decisions, topics).

            Argument Type Required Description limit number No Max sessions to return (default: 5) since string No ISO date filter: sessions after this date (YYYY-MM-DD)

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_watch_update","level":3,"title":"ctx_watch_update","text":"

            Apply a structured context update to .context/ files. Supports task, decision, learning, convention, and complete entry types. Human confirmation is required before calling.

            Argument Type Required Description type string Yes Entry type: task, decision, learning, convention, complete content string Yes Main content context string Conditional Context background (decisions/learnings) rationale string Conditional Rationale (decisions only) consequence string Conditional Consequence (decisions only) lesson string Conditional Lesson learned (learnings only) application string Conditional How to apply (learnings only)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_compact","level":3,"title":"ctx_compact","text":"

            Move completed tasks to the archive section and remove empty sections from context files. Human confirmation required.

            Argument Type Required Description archive boolean No Also write tasks to .context/archive/ (default: false)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_next","level":3,"title":"ctx_next","text":"

            Suggest the next pending task based on priority and position.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_check_task_completion","level":3,"title":"ctx_check_task_completion","text":"

            Advisory check: after a write operation, detect if any pending tasks were silently completed. Returns nudge text if a match is found.

            Argument Type Required Description recent_action string No Brief description of what was just done

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_event","level":3,"title":"ctx_session_event","text":"

            Signal a session lifecycle event. Type end triggers the session-end persistence ceremony - human confirmation required.

            Argument Type Required Description type string Yes Event type: start, end caller string No Caller identifier (cursor, windsurf, vscode, claude-desktop)","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_steering_get","level":3,"title":"ctx_steering_get","text":"

            Retrieve applicable steering files for a prompt. Without a prompt, returns always-included files only.

            Argument Type Required Description prompt string No Prompt text to match against steering file descriptions

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_search","level":3,"title":"ctx_search","text":"

            Search across .context/ files for a query string. Returns matching lines with file paths and line numbers.

            Argument Type Required Description query string Yes Search string to match against

            Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_start","level":3,"title":"ctx_session_start","text":"

            Execute session-start hooks and return aggregated context from hook outputs.

            Arguments: None.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_session_end","level":3,"title":"ctx_session_end","text":"

            Execute session-end hooks with an optional summary. Returns aggregated context from hook outputs.

            Argument Type Required Description summary string No Session summary passed to hook scripts","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx_remind","level":3,"title":"ctx_remind","text":"

            List pending session-scoped reminders.

            Arguments: None. Read-only.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#prompts","level":2,"title":"Prompts","text":"

            Prompts provide pre-built templates for common workflows. Clients can list available prompts via prompts/list and retrieve a specific prompt via prompts/get.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-session-start","level":3,"title":"ctx-session-start","text":"

            Load full context at the beginning of a session. Returns all context files assembled in priority read order with session orientation instructions.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-decision-add","level":3,"title":"ctx-decision-add","text":"

            Format an architectural decision entry with all required fields.

            Argument Type Required Description content string Yes Decision title context string Yes Background context rationale string Yes Why this decision was made consequence string Yes Expected consequence","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-learning-add","level":3,"title":"ctx-learning-add","text":"

            Format a learning entry with all required fields.

            Argument Type Required Description content string Yes Learning title context string Yes Background context lesson string Yes The lesson learned application string Yes How to apply this lesson","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-reflect","level":3,"title":"ctx-reflect","text":"

            Guide end-of-session reflection. Returns a structured review prompt covering progress assessment and context update recommendations.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/mcp/#ctx-checkpoint","level":3,"title":"ctx-checkpoint","text":"

            Report session statistics: tool calls made, entries added, and pending updates queued during the current session.

            ","path":["CLI","Integrations","MCP Server"],"tags":[]},{"location":"cli/memory/","level":1,"title":"Memory","text":"","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory","level":2,"title":"ctx memory","text":"

            Bridge Claude Code's auto memory (MEMORY.md) into .context/.

            Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This command group discovers that file, mirrors it into .context/memory/mirror.md (git-tracked), and detects drift.

            ctx memory <subcommand>\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-sync","level":3,"title":"ctx memory sync","text":"

            Copy MEMORY.md to .context/memory/mirror.md. Archives the previous mirror before overwriting.

            ctx memory sync [flags]\n

            Flags:

            Flag Description --dry-run Show what would happen without writing

            Exit codes:

            Code Meaning 0 Synced successfully 1 MEMORY.md not found (auto memory inactive)

            Examples:

            ctx memory sync\n# Archived previous mirror to mirror-2026-03-05-143022.md\n# Synced MEMORY.md -> .context/memory/mirror.md\n#   Source: ~/.claude/projects/-home-user-project/memory/MEMORY.md\n#   Lines: 47 (was 32)\n#   New content: 15 lines since last sync\n\nctx memory sync --dry-run\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-status","level":3,"title":"ctx memory status","text":"

            Show drift, timestamps, line counts, and archive count.

            ctx memory status\n

            Exit codes:

            Code Meaning 0 No drift 1 MEMORY.md not found 2 Drift detected (MEMORY.md changed since sync)

            Examples:

            ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines (modified since last sync)\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-diff","level":3,"title":"ctx memory diff","text":"

            Show what changed in MEMORY.md since last sync.

            ctx memory diff\n

            Examples:

            ctx memory diff\n# --- .context/memory/mirror.md (mirror)\n# +++ ~/.claude/projects/.../memory/MEMORY.md (source)\n# +- new learning: memory bridge works\n

            No output when files are identical.

            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-publish","level":3,"title":"ctx memory publish","text":"

            Push curated .context/ content into MEMORY.md so the agent sees it natively.

            ctx memory publish [flags]\n

            Content is selected in priority order: pending tasks, recent decisions (7 days), key conventions, recent learnings (7 days). Wrapped in <!-- ctx:published --> markers. Claude-owned content outside the markers is preserved.

            Flags:

            Flag Description Default --budget Line budget for published content 80 --dry-run Show what would be published

            Examples:

            ctx memory publish --dry-run\n# Publishing .context/ -> MEMORY.md...\n#   Budget: 80 lines\n#   Published block:\n#     5 pending tasks (from TASKS.md)\n#     3 recent decisions (from DECISIONS.md)\n#     5 key conventions (from CONVENTIONS.md)\n#   Total: 42 lines (within 80-line budget)\n# Dry run - no files written.\n\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter budget\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-unpublish","level":3,"title":"ctx memory unpublish","text":"

            Remove the ctx-managed marker block from MEMORY.md, preserving Claude-owned content.

            Examples:

            ctx memory unpublish\n

            Hook integration: The check-memory-drift hook runs on every prompt and nudges the agent when MEMORY.md has changed since last sync. The nudge fires once per session. See Memory Bridge.

            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/memory/#ctx-memory-import","level":3,"title":"ctx memory import","text":"

            Classify and promote entries from MEMORY.md into structured .context/ files.

            ctx memory import [flags]\n

            Each entry is classified by keyword heuristics:

            Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

            Deduplication prevents re-importing the same entry across runs.

            Flags:

            Flag Description --dry-run Show classification plan without writing

            Examples:

            ctx memory import --dry-run\n# Scanning MEMORY.md for new entries...\n#   Found 6 entries\n#\n#   -> \"always use ctx from PATH\"\n#      Classified: CONVENTIONS.md (keywords: always use)\n#\n#   -> \"decided to use heuristic classification over LLM-based\"\n#      Classified: DECISIONS.md (keywords: decided)\n#\n# Dry run - would import: 4 entries\n# Skipped: 2 entries (session notes/unclassified)\n\nctx memory import    # Actually write entries to .context/ files\n
            ","path":["CLI","Context","Memory"],"tags":[]},{"location":"cli/message/","level":1,"title":"Message","text":"","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message","level":3,"title":"ctx hook message","text":"

            Manage hook message templates.

            Hook messages control the text hooks emit. The hook logic (when to fire, counting, state tracking) is universal; the messages are opinions that can be customized per-project.

            ctx hook message <subcommand>\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-list","level":3,"title":"ctx hook message list","text":"

            Show all hook messages with category and override status.

            ctx hook message list [--json]\n

            Flags:

            Flag Description --json Output in JSON format

            Example:

            ctx hook message list\nctx hook message list --json | jq '.[] | select(.override)'\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-show","level":3,"title":"ctx hook message show","text":"

            Print the effective message template for a hook/variant pair. Shows the user override if present, otherwise the embedded default.

            ctx hook message show <hook> <variant>\n

            Example:

            ctx hook message show qa-reminder gate\nctx hook message show check-context-size checkpoint\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-edit","level":3,"title":"ctx hook message edit","text":"

            Copy the embedded default template for <hook> <variant> to .context/hooks/messages/<hook>/<variant>.txt so you can edit it directly. The override takes effect the next time the hook fires.

            ctx hook message edit <hook> <variant>\n

            If an override already exists, the command fails and directs you to edit it in place or reset it first.

            Example:

            ctx hook message edit qa-reminder gate\n# Edit .context/hooks/messages/qa-reminder/gate.txt in your editor\n
            ","path":["Message"],"tags":[]},{"location":"cli/message/#ctx-hook-message-reset","level":3,"title":"ctx hook message reset","text":"

            Delete a user override and revert to the embedded default. Silent no-op if no override exists.

            ctx hook message reset <hook> <variant>\n

            Example:

            ctx hook message reset qa-reminder gate\n

            See Customizing hook messages for the full workflow.

            ","path":["Message"],"tags":[]},{"location":"cli/notify/","level":1,"title":"Notify","text":"","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify","level":2,"title":"ctx hook notify","text":"

            Send fire-and-forget webhook notifications from skills, loops, and hooks.

            ctx hook notify --event <name> [--session-id <id>] \"message\"\n

            Flags:

            Flag Short Description --event -e Event name (required) --session-id -s Session ID (optional)

            Behavior:

            • No webhook configured: silent no-op (exit 0)
            • Webhook set but event not in events list: silent no-op (exit 0)
            • Webhook set and event matches: fire-and-forget HTTP POST
            • HTTP errors silently ignored (no retry)

            Examples:

            ctx hook notify --event loop \"Loop completed after 5 iterations\"\nctx hook notify -e nudge -s session-abc \"Context checkpoint at prompt #20\"\n
            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-setup","level":3,"title":"ctx hook notify setup","text":"

            Configure the webhook URL interactively. The URL is encrypted with AES-256-GCM using the encryption key and stored in .context/.notify.enc.

            Examples:

            ctx hook notify setup\n

            The encrypted file is safe to commit. The key (~/.ctx/.ctx.key) lives outside the project and is never committed.

            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/notify/#ctx-hook-notify-test","level":3,"title":"ctx hook notify test","text":"

            Send a test notification and report the HTTP response status.

            Examples:

            ctx hook notify test\n

            Payload format (JSON POST):

            {\n  \"event\": \"loop\",\n  \"message\": \"Loop completed after 5 iterations\",\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n
            Field Type Description event string Event name from --event flag message string Notification message session_id string Session ID (omitted if empty) timestamp string UTC RFC3339 timestamp project string Project directory name

            See also: Webhook Notifications recipe.

            ","path":["CLI","Integrations","Notify"],"tags":[]},{"location":"cli/pad/","level":1,"title":"Scratchpad","text":"","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad","level":2,"title":"ctx pad","text":"

            Encrypted scratchpad for sensitive one-liners that travel with the project.

            When invoked without a subcommand, lists all entries.

            ctx pad\nctx pad <subcommand>\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-add","level":3,"title":"ctx pad add","text":"

            Append a new entry to the scratchpad.

            ctx pad add <text>\nctx pad add <label> --file <path>\n

            Flags:

            Flag Short Description --file -f Ingest a file as a blob entry (max 64 KB)

            Examples:

            ctx pad add \"DATABASE_URL=postgres://user:pass@host/db\"\nctx pad add \"deploy config\" --file ./deploy.yaml\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-show","level":3,"title":"ctx pad show","text":"

            Output the raw text of an entry by number. For blob entries, prints decoded file content (or writes to disk with --out).

            ctx pad show <n>\nctx pad show <n> --out <path>\n

            Arguments:

            • n: 1-based entry number

            Flags:

            Flag Description --out Write decoded blob content to a file (blobs only)

            Examples:

            ctx pad show 3\nctx pad show 2 --out ./recovered.yaml\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-rm","level":3,"title":"ctx pad rm","text":"

            Remove one or more entries by stable ID. Supports individual IDs and ranges.

            ctx pad rm <id> [id...]\n

            Arguments:

            • id: One or more entry IDs (e.g., 3, 1 4, 3-5)

            Examples:

            ctx pad rm 2\nctx pad rm 1 4\nctx pad rm 3-5\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-normalize","level":3,"title":"ctx pad normalize","text":"

            Reassign entry IDs as a contiguous sequence 1..N, closing any gaps left by deletions.

            Examples:

            ctx pad normalize\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-edit","level":3,"title":"ctx pad edit","text":"

            Replace, append to, or prepend to an entry.

            ctx pad edit <n> [text]\n

            Arguments:

            • n: 1-based entry number
            • text: Replacement text (mutually exclusive with --append/--prepend)

            Flags:

            Flag Description --append Append text to the end of the entry --prepend Prepend text to the beginning of entry --file Replace blob file content (preserves label) --label Replace blob label (preserves content)

            Examples:

            ctx pad edit 2 \"new text\"\nctx pad edit 2 --append \" suffix\"\nctx pad edit 2 --prepend \"prefix \"\nctx pad edit 1 --file ./v2.yaml\nctx pad edit 1 --label \"new name\"\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-mv","level":3,"title":"ctx pad mv","text":"

            Move an entry from one position to another.

            ctx pad mv <from> <to>\n

            Arguments:

            • from: Source position (1-based)
            • to: Destination position (1-based)

            Examples:

            ctx pad mv 3 1      # promote entry 3 to the top\nctx pad mv 1 5      # bury entry 1 to position 5\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-resolve","level":3,"title":"ctx pad resolve","text":"

            Show both sides of a merge conflict in the encrypted scratchpad.

            Examples:

            ctx pad resolve\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-import","level":3,"title":"ctx pad import","text":"

            Bulk-import lines from a file into the scratchpad. Each non-empty line becomes a separate entry. All entries are written in a single encrypt/write cycle.

            With --blob, import all first-level files from a directory as blob entries. Each file becomes a blob with the filename as its label. Subdirectories and non-regular files are skipped.

            ctx pad import <file>\nctx pad import -              # read from stdin\nctx pad import --blob <dir>   # import directory files as blobs\n

            Arguments:

            • file: Path to a text file, - for stdin, or a directory (with --blob)

            Flags:

            Flag Description --blob Import first-level files from a directory as blobs

            Examples:

            ctx pad import notes.txt\ngrep TODO *.go | ctx pad import -\nctx pad import --blob ./ideas/\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-export","level":3,"title":"ctx pad export","text":"

            Export all blob entries from the scratchpad to a directory as files. Each blob's label becomes the filename. Non-blob entries are skipped.

            ctx pad export [dir]\n

            Arguments:

            • dir: Target directory (default: current directory)

            Flags:

            Flag Short Description --force -f Overwrite existing files instead of timestamping --dry-run Print what would be exported without writing

            When a file already exists, a unix timestamp is prepended to avoid collisions (e.g., 1739836200-label). Use --force to overwrite instead.

            Examples:

            ctx pad export ./ideas\nctx pad export --dry-run\nctx pad export --force ./backup\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pad/#ctx-pad-merge","level":3,"title":"ctx pad merge","text":"

            Merge entries from one or more scratchpad files into the current pad. Each input file is auto-detected as encrypted or plaintext. Entries are deduplicated by exact content.

            ctx pad merge FILE...\n

            Arguments:

            • FILE...: One or more scratchpad files to merge (encrypted or plaintext)

            Flags:

            Flag Short Description --key -k Path to key file for decrypting input files --dry-run Print what would be merged without writing

            Examples:

            ctx pad merge worktree/.context/scratchpad.enc\nctx pad merge notes.md backup.enc\nctx pad merge --key /path/to/other.key foreign.enc\nctx pad merge --dry-run pad-a.enc pad-b.md\n
            ","path":["CLI","Sessions","Scratchpad"],"tags":[]},{"location":"cli/pause/","level":1,"title":"Pause","text":"","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/pause/#ctx-hook-pause","level":2,"title":"ctx hook pause","text":"

            Pause all context nudge and reminder hooks for the current session. Security hooks (dangerous command blocking) and housekeeping hooks still fire.

            ctx hook pause [flags]\n

            Flags:

            Flag Description --session-id Session ID (overrides stdin)

            Example:

            # Pause hooks for a quick investigation\nctx hook pause\n\n# Resume when ready\nctx hook resume\n

            See also:

            • ctx hook resume: the matching resume command
            • Pausing Context Hooks recipe
            ","path":["CLI","Sessions","Pause"],"tags":[]},{"location":"cli/prune/","level":1,"title":"Prune","text":"","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/prune/#ctx-prune","level":3,"title":"ctx prune","text":"

            Remove per-session state files from .context/state/ that are older than the specified age. Session state files are identified by UUID suffixes (context-check-<session-id>, heartbeat-<session-id>, and similar). Global files without session IDs (events.jsonl, memory-import.json, and other non-per-session markers) are always preserved.

            ctx prune [flags]\n

            Flags:

            Flag Description --days Prune files older than this many days (default: 7) --dry-run Show what would be pruned without deleting

            Examples:

            ctx prune                 # Prune files older than 7 days\nctx prune --days 3        # Prune files older than 3 days\nctx prune --dry-run       # Preview without deleting\n

            See State maintenance for the recommended cadence and automation recipe.

            ","path":["CLI","Runtime","Prune"],"tags":[]},{"location":"cli/remind/","level":1,"title":"Remind","text":"","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind","level":2,"title":"ctx remind","text":"

            Session-scoped reminders that surface at session start. Reminders are stored verbatim and relayed verbatim: no summarization, no categories.

            When invoked with a text argument and no subcommand, adds a reminder.

            ctx remind \"text\"\nctx remind <subcommand>\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-add","level":3,"title":"ctx remind add","text":"

            Add a reminder. This is the default action: ctx remind \"text\" and ctx remind add \"text\" are equivalent.

            ctx remind \"refactor the swagger definitions\"\nctx remind add \"check CI after the deploy\" --after 2026-02-25\n

            Arguments:

            • text: The reminder message (verbatim)

            Flags:

            Flag Short Description --after -a Don't surface until this date (YYYY-MM-DD)

            Examples:

            ctx remind \"refactor the swagger definitions\"\nctx remind \"check CI after the deploy\" --after 2026-02-25\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-list","level":3,"title":"ctx remind list","text":"

            List all pending reminders. Date-gated reminders that aren't yet due are annotated with (after DATE, not yet due).

            Examples:

            ctx remind list\nctx remind ls            # alias\n

            Aliases: ls

            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-dismiss","level":3,"title":"ctx remind dismiss","text":"

            Remove one or more reminders by ID, or remove all with --all. Supports individual IDs and ranges.

            ctx remind dismiss <id> [id...]\nctx remind dismiss --all\n

            Arguments:

            • id: One or more reminder IDs (e.g., 3, 3 5-7)

            Flags:

            Flag Description --all Dismiss all reminders

            Aliases: rm

            Examples:

            ctx remind dismiss 3\nctx remind dismiss 3 5-7\nctx remind dismiss --all\n
            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/remind/#ctx-remind-normalize","level":3,"title":"ctx remind normalize","text":"

            Reassign reminder IDs as a contiguous sequence 1..N, closing any gaps left by dismissals.

            Examples:

            ctx remind normalize\n

            See also: Session Reminders recipe.

            ","path":["CLI","Sessions","Remind"],"tags":[]},{"location":"cli/resume/","level":1,"title":"Resume","text":"","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/resume/#ctx-hook-resume","level":2,"title":"ctx hook resume","text":"

            Resume context hooks after a pause. Silent no-op if not paused.

            ctx hook resume [flags]\n

            Flags:

            Flag Description --session-id Session ID (overrides stdin)

            Example:

            ctx hook resume\n

            See also:

            • ctx hook pause: the matching pause command
            • Pausing Context Hooks recipe
            ","path":["CLI","Sessions","Resume"],"tags":[]},{"location":"cli/serve/","level":1,"title":"Serve","text":"","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#ctx-serve","level":2,"title":"ctx serve","text":"

            Serve a static site locally via zensical.

            With no argument, serves the journal site at .context/journal-site. With a directory argument, serves that directory if it contains a zensical.toml.

            ctx serve                             # Serve .context/journal-site\nctx serve ./my-site                   # Serve a specific directory\nctx serve ./docs                      # Serve any zensical site\n

            This Command Does NOT Start a Hub

            ctx serve is purely for static-site serving. To run a ctx Hub for cross-project knowledge sharing, use ctx hub start. That command lives in its own group because the hub is a gRPC server, not a static site.

            Requires zensical to be installed:

            pipx install zensical\n
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#arguments","level":3,"title":"Arguments","text":"Argument Description [directory] Directory containing a zensical.toml to serve

            When omitted, serves .context/journal-site by default, the directory produced by ctx journal site.

            Examples:

            ctx serve                         # Default: serve .context/journal-site\nctx serve ./my-site               # Serve a specific directory\nctx serve ./docs                  # Serve any zensical site\n
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/serve/#see-also","level":3,"title":"See Also","text":"
            • ctx journal: generate the journal site that ctx serve displays.
            • ctx hub start: for running a ctx Hub server, not a static site.
            • Browsing and enriching past sessions: the recipe that combines ctx journal and ctx serve.
            ","path":["CLI","Integrations","Serve"],"tags":[]},{"location":"cli/setup/","level":1,"title":"Setup","text":"","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/setup/#ctx-setup","level":2,"title":"ctx setup","text":"

            Generate AI tool integration configuration.

            ctx setup <tool> [flags]\n

            Flags:

            Flag Short Description --write -w Write the generated config to disk (e.g. .github/copilot-instructions.md)

            Supported tools:

            Tool Description claude-code Redirects to plugin install instructions cursor Cursor IDE kiro Kiro IDE cline Cline (VS Code extension) aider Aider CLI copilot GitHub Copilot windsurf Windsurf IDE

            Claude Code Uses the Plugin System

            Claude Code integration is now provided via the ctx plugin. Running ctx setup claude-code prints plugin install instructions.

            Examples:

            # Print hook instructions to stdout\nctx setup cursor\nctx setup aider\n\n# Generate and write .github/copilot-instructions.md\nctx setup copilot --write\n\n# Generate MCP config and sync steering files\nctx setup kiro --write\nctx setup cursor --write\nctx setup cline --write\n
            ","path":["CLI","Integrations","Setup"],"tags":[]},{"location":"cli/site/","level":1,"title":"Site","text":"","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site","level":2,"title":"ctx site","text":"

            Site management commands for the ctx.ist static site.

            ctx site <subcommand>\n
            ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/site/#ctx-site-feed","level":3,"title":"ctx site feed","text":"

            Generate an Atom 1.0 feed from finalized blog posts in docs/blog/.

            ctx site feed [flags]\n

            Scans docs/blog/ for files matching YYYY-MM-DD-*.md, parses YAML frontmatter, and generates a valid Atom feed. Only posts with reviewed_and_finalized: true are included. Summaries are extracted from the first paragraph after the heading.

            Flags:

            Flag Short Type Default Description --out -o string site/feed.xml Output path --base-url string https://ctx.ist Base URL for entry links

            Output:

            Generated site/feed.xml (21 entries)\n\nSkipped:\n  2026-02-25-the-homework-problem.md: not finalized\n\nWarnings:\n  2026-02-09-defense-in-depth.md: no summary paragraph found\n

            Three buckets: included (count), skipped (with reason), warnings (included but degraded). exit 0 always: warnings inform but do not block.

            Frontmatter requirements:

            Field Required Feed mapping title Yes <title> date Yes <updated> reviewed_and_finalized Yes Draft gate (must be true) author No <author><name> topics No <category term=\"\">

            Examples:

            ctx site feed                                # Generate site/feed.xml\nctx site feed --out /tmp/feed.xml            # Custom output path\nctx site feed --base-url https://example.com # Custom base URL\nmake site-feed                               # Makefile shortcut\nmake site                                    # Builds site + feed\n
            ","path":["CLI","Integrations","Site"],"tags":[]},{"location":"cli/skill/","level":1,"title":"Skill","text":"","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill","level":2,"title":"ctx skill","text":"

            Manage reusable instruction bundles that can be installed into .context/skills/.

            A skill is a directory containing a SKILL.md file with YAML frontmatter (name, description) and a Markdown instruction body. Skills are loaded by the agent context packet when --skill <name> is passed to ctx agent.

            ctx skill <subcommand>\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-install","level":3,"title":"ctx skill install","text":"

            Install a skill from a source directory.

            ctx skill install <source>\n

            Arguments:

            • source: Path to a directory containing SKILL.md

            Examples:

            ctx skill install ./my-skills/code-review\n# Installed code-review → .context/skills/code-review\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-list","level":3,"title":"ctx skill list","text":"

            List all installed skills.

            Examples:

            ctx skill list\n
            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/skill/#ctx-skill-remove","level":3,"title":"ctx skill remove","text":"

            Remove an installed skill.

            Arguments:

            • name: Skill name to remove

            Examples:

            ctx skill remove code-review\n

            See also: Building Project Skills recipe.

            ","path":["CLI","Integrations","Skill"],"tags":[]},{"location":"cli/steering/","level":1,"title":"Steering","text":"","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering","level":2,"title":"ctx steering","text":"

            Manage steering files: persistent behavioral rules for AI coding assistants.

            A steering file is a small markdown document with YAML frontmatter that tells the AI how to behave in a specific context. ctx steering keeps those files in .context/steering/, decides which ones apply for a given prompt, and syncs them out to each AI tool's native format (Claude Code, Cursor, Kiro, Cline).

            ctx steering <subcommand>\n

            Steering vs Decisions vs Conventions

            The three look similar on disk but serve different purposes:

            • Decisions record what was chosen and why. Consumed mostly by humans (and by the agent via ctx agent).
            • Conventions describe how the codebase is written. Consumed as reference material.
            • Steering tells the AI how to behave when asked about X. Consumed by the AI tool's prompt injection layer, conditionally on prompt match.

            If you find yourself writing \"the AI should always do X\", that belongs in steering, not decisions.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#anatomy-of-a-steering-file","level":3,"title":"Anatomy of a Steering File","text":"
            ---\nname: security\ndescription: Security rules for all code changes\ninclusion: always    # always | auto | manual\ntools: []            # empty = all tools\npriority: 10         # lower = injected first\n---\n\n# Security rules\n\n- Validate all user input at system boundaries.\n- Never log secrets, tokens, or credentials.\n- Prefer constant-time comparison for tokens.\n

            Inclusion modes:

            Mode When it's included always Every prompt, unconditionally auto When the prompt matches the description keywords manual Only when the user names it explicitly

            Priority: lower numbers inject first, so high-priority rules appear at the top of the prompt. Default is 50.

            Tools: an empty list means all configured tools receive the file; list specific tool names to scope it.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-init","level":3,"title":"ctx steering init","text":"

            Create a starter set of steering files in .context/steering/ to use as a scaffolding baseline.

            Examples:

            ctx steering init\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-add","level":3,"title":"ctx steering add","text":"

            Create a new steering file with default frontmatter.

            ctx steering add <name>\n

            Arguments:

            • name: Steering file name (without .md extension)

            Examples:

            ctx steering add security\n# Created .context/steering/security.md\n

            The generated file uses inclusion: manual and priority: 50 by default. Edit the frontmatter to change behavior.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-list","level":3,"title":"ctx steering list","text":"

            List all steering files with their inclusion mode, priority, and tool scoping.

            Examples:

            ctx steering list\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-preview","level":3,"title":"ctx steering preview","text":"

            Preview which steering files would be included for a given prompt. Useful for validating auto-inclusion descriptions against realistic prompts.

            ctx steering preview [prompt]\n

            Examples:

            ctx steering preview \"create a REST API endpoint\"\n# Steering files matching prompt \"create a REST API endpoint\":\n#   api-standards        inclusion=auto     priority=20  tools=all\n#   security             inclusion=always   priority=10  tools=all\n
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-steering-sync","level":3,"title":"ctx steering sync","text":"

            Sync steering files to tool-native formats for tools that have a built-in rules primitive. Not every tool needs this; Claude Code and Codex use a different delivery mechanism (see below).

            Examples:

            ctx steering sync\n

            Which tools are sync targets?

            Tool Sync target Mechanism Cursor .cursor/rules/ Cursor reads the directory natively Cline .clinerules/ Cline reads the directory natively Kiro .kiro/steering/ Kiro reads the directory natively Claude Code (no-op) Delivered via hook + MCP (see next section) Codex (no-op) Same as Claude Code

            For the three native-rules tools, ctx steering sync writes each matching steering file to the appropriate directory with tool-specific frontmatter transforms. Unchanged files are skipped (idempotent).

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#how-claude-code-and-codex-consume-steering","level":3,"title":"How Claude Code and Codex Consume Steering","text":"

            Claude Code has no native \"steering files\" primitive, so ctx steering sync skips it entirely. Instead, steering reaches Claude through two non-sync channels, both activated by ctx setup claude-code (which installs the plugin):

            1. Automatic injection via the PreToolUse hook. The Claude Code plugin wires a PreToolUse hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads .context/steering/ and calls steering.Filter with an empty prompt, so only files with inclusion: always match. Those files are included as Tier 6 of the context packet. The packet is printed on stdout, which Claude Code injects as additional context. This fires on every tool call; no user action.

            2. On-demand MCP tool call (ctx_steering_get). The ctx plugin ships a .mcp.json file that automatically registers the ctx MCP server (ctx mcp serve) with Claude Code on plugin install. Once registered, Claude can invoke the ctx_steering_get tool mid-task to fetch matching steering files for a specific prompt. This is the only path that resolves inclusion: auto and inclusion: manual matches for Claude Code; Claude passes the prompt to the MCP tool, which runs the keyword match against each file's description.

            Verify the MCP server is registered:

            claude mcp list\n

            Expected line: ctx: ctx mcp serve - ✓ Connected. If it's missing, reinstall the plugin from Claude Code (/plugin → find ctx → uninstall → install again); older plugin versions shipped without the .mcp.json file.

            Prefer inclusion: always for Claude Code

            Because the PreToolUse hook passes an empty prompt to ctx agent, only always files fire automatically. auto files require Claude to call the ctx_steering_get MCP tool on its own; manual files require an explicit user invocation. For rules that should reliably fire on every Claude Code session, use inclusion: always. Reserve auto/manual for situational libraries where the opt-in cost is acceptable and you understand Claude may not pull them in without prompting.

            The foundation files scaffolded by ctx init already default to inclusion: always for this reason.

            Practical implications:

            • Running ctx steering sync before starting a Claude session does nothing for Claude's benefit. Skip it.
            • ctx steering preview still works for validating your descriptions; it doesn't depend on sync.
            • If Claude Code is your only tool, the ctx steering commands you care about are add, list, preview, init (never sync).
            • If you use both Claude Code and (say) Cursor, ctx steering sync covers Cursor (where auto and manual work natively) while the hook+MCP pipeline covers Claude Code. For rules you need to fire automatically on both, use inclusion: always.
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#ctx-agent-integration","level":3,"title":"ctx agent Integration","text":"

            When ctx agent builds a context packet, steering files are loaded as Tier 6 of the budget-aware assembly (see ctx agent). Files with inclusion: always are always included; auto files are scored against the current prompt and included in priority order until the tier budget is exhausted.

            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/steering/#see-also","level":3,"title":"See Also","text":"
            • ctx setup: configure which tools receive steering syncs
            • ctx trigger: lifecycle scripts (a different hooking concept, see below)
            • Building steering files recipe: walkthrough from first file to synced output
            ","path":["CLI","Integrations","Steering"],"tags":[]},{"location":"cli/sysinfo/","level":1,"title":"Sysinfo","text":"","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/sysinfo/#ctx-sysinfo","level":3,"title":"ctx sysinfo","text":"

            Display a snapshot of system resources (memory, swap, disk, load) with threshold-based alert severities. Mirrors what the check-resource hook plumbing monitors in the background, but this command prints the full report at any severity level, not only at DANGER.

            ctx sysinfo [flags]\n

            Flags:

            Flag Description --json Output in JSON format

            Alert thresholds:

            Resource WARNING DANGER Memory ≥ 75% ≥ 90% Swap ≥ 50% ≥ 75% Disk ≥ 85% ≥ 95% Load ≥ 1.0x CPUs ≥ 1.5x CPUs

            Examples:

            ctx sysinfo                  # Human-readable table\nctx sysinfo --json           # Structured output\n
            ","path":["CLI","Diagnostics","Sysinfo"],"tags":[]},{"location":"cli/system/","level":1,"title":"System","text":"","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system","level":3,"title":"ctx system","text":"

            Hidden parent command that hosts Claude Code hook plumbing and a small set of session-lifecycle plumbing subcommands used by skills and editor integrations. The parent is registered without a visible group in ctx --help; run ctx system --help to see its subcommands.

            ctx system <subcommand>\n

            Commands Previously under ctx system

            Several user-facing maintenance commands used to live under ctx system and were promoted to top-level:

            • ctx system eventsctx hook event
            • ctx system messagectx hook message
            • ctx system prunectx prune
            • ctx system resourcesctx sysinfo
            • ctx system statsctx usage

            ctx system bootstrap remains under ctx system as a hidden, agent-only command. Update any scripts or personal docs that reference the old paths.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#plumbing-subcommands","level":2,"title":"Plumbing Subcommands","text":"

            These are not hook handlers; they're called by skills and editor integrations during the session lifecycle. Safe to run manually.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-journal","level":4,"title":"ctx system mark-journal","text":"

            Update processing state for a journal entry. Records the current date in .context/journal/.state.json. Used by journal skills to record pipeline progress.

            ctx system mark-journal <filename> <stage>\n

            Stages: exported, enriched, normalized, fences_verified

            Flag Description --check Check if stage is set (exit 1 if not)

            Example:

            ctx system mark-journal 2026-01-21-session-abc12345.md enriched\nctx system mark-journal 2026-01-21-session-abc12345.md normalized\nctx system mark-journal --check 2026-01-21-session-abc12345.md fences_verified\n
            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-mark-wrapped-up","level":4,"title":"ctx system mark-wrapped-up","text":"

            Suppress context checkpoint nudges after a wrap-up ceremony. Writes a marker file that check-context-size checks before emitting checkpoint boxes. The marker expires after 2 hours.

            Called automatically by /ctx-wrap-up after persisting context (not intended for direct use).

            ctx system mark-wrapped-up\n

            No flags, no arguments. Idempotent: running it again updates the marker timestamp.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-pause-ctx-system-resume","level":4,"title":"ctx system pause / ctx system resume","text":"

            Session-scoped hook suppression. ctx system pause writes a marker file that causes hook plumbing to no-op for the current session; ctx system resume removes it. These are the hook-plumbing counterparts to the ctx hook pause / ctx hook resume commands (which call them internally).

            Read the session ID from stdin JSON (same as hooks) or pass --session-id.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#ctx-system-session-event","level":4,"title":"ctx system session-event","text":"

            Records a session lifecycle event (start or end) to the event log. Called by editor integrations when a workspace is opened or closed.

            ctx system session-event --type start --caller vscode\nctx system session-event --type end --caller vscode\n
            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/system/#hook-subcommands","level":2,"title":"Hook Subcommands","text":"

            Hidden Claude Code hook handlers implementing the hook contract: read JSON from stdin, perform logic, emit output on stdout, exit 0. Block commands output JSON with a decision field.

            UserPromptSubmit hooks: context-load-gate, check-context-size, check-persistence, check-ceremony, check-journal, check-version, check-resource, check-knowledge, check-map-staleness, check-memory-drift, check-reminder, check-freshness, check-hub-sync, check-skill-discovery, heartbeat.

            PreToolUse hooks: block-non-path-ctx, block-dangerous-command, qa-reminder, specs-nudge.

            PostToolUse hooks: post-commit, check-task-completion.

            See AI Tools for registration details and the Claude Code plugin integration.

            ","path":["CLI","Runtime","System"],"tags":[]},{"location":"cli/trace/","level":1,"title":"Commit Context Tracing","text":"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace","level":3,"title":"ctx trace","text":"

            Show the context behind git commits. Links commits back to the decisions, tasks, learnings, and sessions that motivated them.

            git log shows what changed, git blame shows who, and ctx trace shows why.

            ctx trace [commit] [flags]\n

            Flags:

            Flag Description --last N Show context for last N commits --json Output as JSON for scripting

            Examples:

            # Show context for a specific commit\nctx trace abc123\n\n# Show context for last 10 commits\nctx trace --last 10\n\n# JSON output\nctx trace abc123 --json\n

            Output:

            Commit: abc123 \"Fix auth token expiry\"\nDate:   2026-03-14 10:00:00 -0700\nContext:\n  [Decision] #12: Use short-lived tokens with server-side refresh\n    Date: 2026-03-10\n\n  [Task] #8: Implement token rotation for compliance\n    Status: completed\n

            When listing recent commits with --last:

            abc123  Fix auth token expiry         decision:12, task:8\ndef456  Add rate limiting             decision:15, learning:7\n789abc  Update dependencies           (none)\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-file","level":3,"title":"ctx trace file","text":"

            Show the context trail for a file. Combines git log with context resolution.

            ctx trace file <path[:line-range]> [flags]\n

            Flags:

            Flag Description --last N Maximum commits to show (default: 20)

            Examples:

            # Show context trail for a file\nctx trace file src/auth.go\n\n# Show context for specific line range\nctx trace file src/auth.go:42-60\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-tag","level":3,"title":"ctx trace tag","text":"

            Manually tag a commit with context. For commits made without the hook, or to add extra context after the fact.

            Tags are stored in .context/trace/overrides.jsonl since git trailers cannot be added to existing commits without rewriting history.

            ctx trace tag <commit> --note \"<text>\"\n

            Examples:

            ctx trace tag HEAD --note \"Hotfix for production outage\"\nctx trace tag abc123 --note \"Part of Q1 compliance initiative\"\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#ctx-trace-hook","level":3,"title":"ctx trace hook","text":"

            Enable or disable the prepare-commit-msg hook for automatic context tracing. When enabled, commits automatically receive a ctx-context trailer with references to relevant decisions, tasks, learnings, and sessions.

            ctx trace hook <enable|disable>\n

            Prerequisites: ctx must be on your $PATH. If you installed via go install, ensure $GOPATH/bin (or $HOME/go/bin) is in your shell's $PATH.

            What the hook does:

            1. Before each commit, collects context from three sources:
            2. Pending context accumulated during work (ctx add, ctx task complete)
            3. Staged file changes to .context/ files
            4. Working state (in-progress tasks, active AI session)
            5. Injects a ctx-context trailer into the commit message
            6. After commit, records the mapping in .context/trace/history.jsonl

            Examples:

            # Install the hook\nctx trace hook enable\n\n# Remove the hook\nctx trace hook disable\n

            Resulting commit message:

            Fix auth token expiry handling\n\nRefactored token refresh logic to handle edge case\nwhere refresh token expires during request.\n\nctx-context: decision:12, task:8, session:abc123\n
            ","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#reference-types","level":3,"title":"Reference Types","text":"

            The ctx-context trailer supports these reference types:

            Prefix Points to Example decision:<n> Entry #n in DECISIONS.md decision:12 learning:<n> Entry #n in LEARNINGS.md learning:5 task:<n> Task #n in TASKS.md task:8 convention:<n> Entry #n in CONVENTIONS.md convention:3 session:<id> AI session by ID session:abc123 \"<text>\" Free-form context note \"Performance fix for P1 incident\"","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trace/#storage","level":3,"title":"Storage","text":"

            Context trace data is stored in the .context/ directory:

            File Purpose Lifecycle state/pending-context.jsonl Accumulates refs during work Truncated after each commit trace/history.jsonl Permanent commit-to-context map Append-only, never truncated trace/overrides.jsonl Manual tags for existing commits Append-only","path":["CLI","Diagnostics","Commit Context Tracing"],"tags":[]},{"location":"cli/trigger/","level":1,"title":"Trigger","text":"","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger","level":2,"title":"ctx trigger","text":"

            Manage lifecycle triggers: executable scripts that fire at specific events during an AI session. Triggers can block tool calls, inject context, and automate reactions: any side effect you want at session boundaries, tool boundaries, or file-save events.

            ctx trigger <subcommand>\n

            Triggers Execute Arbitrary Scripts

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks: only enable scripts you've read and understand. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#where-triggers-live","level":3,"title":"Where Triggers Live","text":"

            Triggers live in .context/hooks/<trigger-type>/ as executable scripts. The on-disk directory name is still hooks/ for historical reasons even though the command is ctx trigger. Each script:

            • Reads a JSON payload from stdin.
            • Returns a JSON payload on stdout.
            • Returns a non-zero exit code to block or error.
            .context/\n└── hooks/\n    ├── session-start/\n    │   └── inject-context.sh\n    ├── pre-tool-use/\n    │   └── block-legacy.sh\n    └── post-tool-use/\n        └── record-edit.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#trigger-types","level":3,"title":"Trigger Types","text":"Type Fires when session-start An AI session begins session-end An AI session ends pre-tool-use Before an AI tool call is executed post-tool-use After an AI tool call returns file-save When a file is saved context-add When a context entry is added","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#input-and-output-contract","level":3,"title":"Input and Output Contract","text":"

            Each trigger receives a JSON object on stdin with the event details. Minimal contract (fields vary by trigger type):

            {\n  \"type\": \"pre-tool-use\",\n  \"tool\": \"write_file\",\n  \"path\": \"src/auth.go\",\n  \"session_id\": \"abc123-...\"\n}\n

            The trigger may write a JSON object to stdout to influence behavior. Example for a blocking pre-tool-use trigger:

            {\n  \"action\": \"block\",\n  \"message\": \"Editing src/auth.go requires approval from #security\"\n}\n

            For non-blocking event loggers, simply read stdin and exit 0 without writing to stdout.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-add","level":3,"title":"ctx trigger add","text":"

            Create a new trigger script with a template. The generated file has a bash shebang, a stdin reader using jq, and a basic JSON output structure.

            ctx trigger add <trigger-type> <name>\n

            Arguments:

            • trigger-type: One of session-start, session-end, pre-tool-use, post-tool-use, file-save, context-add
            • name: Script name (without .sh extension)

            Examples:

            ctx trigger add session-start inject-context\n# Created .context/hooks/session-start/inject-context.sh\n\nctx trigger add pre-tool-use block-legacy\n# Created .context/hooks/pre-tool-use/block-legacy.sh\n

            The generated script is not executable by default. Enable it with ctx trigger enable after reviewing the contents.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-list","level":3,"title":"ctx trigger list","text":"

            List all discovered triggers, grouped by trigger type, with their enabled/disabled status.

            Examples:

            ctx trigger list\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-test","level":3,"title":"ctx trigger test","text":"

            Run all enabled triggers of a given type against a mock payload. Use --tool and --path to customize the mock input for tool-related events.

            ctx trigger test <trigger-type> [flags]\n

            Flags:

            Flag Description --tool Tool name to put in mock input --path File path to put in mock input

            Examples:

            ctx trigger test session-start\nctx trigger test pre-tool-use --tool write_file --path src/main.go\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-enable","level":3,"title":"ctx trigger enable","text":"

            Enable a trigger by setting its executable permission bit. Searches every trigger-type directory for a script matching <name>.

            ctx trigger enable <name>\n

            Examples:

            ctx trigger enable inject-context\n# Enabled .context/hooks/session-start/inject-context.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#ctx-trigger-disable","level":3,"title":"ctx trigger disable","text":"

            Disable a trigger by clearing its executable permission bit. Searches every trigger-type directory for a script matching <name>.

            ctx trigger disable <name>\n

            Examples:

            ctx trigger disable inject-context\n# Disabled .context/hooks/session-start/inject-context.sh\n
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#three-hooking-concepts-in-ctx-dont-confuse-them","level":3,"title":"Three Hooking Concepts in ctx (Don't Confuse Them)","text":"

            This is a common source of confusion. ctx has three distinct hook-like layers, and they serve different purposes:

            Layer Owned by Where it runs Configured via ctx trigger You .context/hooks/<type>/*.sh ctx trigger add/enable ctx system hooks ctx itself built-in, called by ctx's own lifecycle internal (see ctx system --help) Claude Code hooks Claude Code .claude/settings.local.json edit JSON, or /ctx-sanitize-permissions

            Use ctx trigger when you want project-specific automation that your AI tool will run at lifecycle events. Use Claude Code hooks for tool-specific integrations that don't need to be portable across tools. ctx system hooks are not something you author; they're the internal nudge machinery that ships with ctx.

            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/trigger/#see-also","level":3,"title":"See Also","text":"
            • ctx steering: persistent AI behavioral rules (a different concept; rules vs scripts)
            • Authoring triggers recipe: a full walkthrough with security guidance
            ","path":["CLI","Integrations","Trigger"],"tags":[]},{"location":"cli/usage/","level":1,"title":"Usage","text":"","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/usage/#ctx-usage","level":3,"title":"ctx usage","text":"

            Display per-session token usage statistics from the local stats JSONL files written by the heartbeat hook. By default, shows the last 20 entries across all sessions. Use --follow to stream new entries as they arrive (like tail -f).

            ctx usage [flags]\n

            Flags:

            Flag Description -f, --follow Stream new entries as they arrive -s, --session Filter by session ID (prefix match) -n, --last Show last N entries (default: 20) -j, --json Output raw JSONL

            Examples:

            ctx usage                     # Last 20 entries across all sessions\nctx usage --follow            # Live stream (like tail -f)\nctx usage --session abc123    # Filter to one session\nctx usage --last 100 --json   # Last 100 as raw JSONL\n
            ","path":["CLI","Diagnostics","Usage"],"tags":[]},{"location":"cli/watch/","level":1,"title":"Watch","text":"","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/watch/#ctx-watch","level":2,"title":"ctx watch","text":"

            Watch for AI output and auto-apply context updates.

            Parses <context-update> XML commands from AI output and applies them to context files.

            ctx watch [flags]\n

            Flags:

            Flag Description --log <file> Log file to watch (default: stdin) --dry-run Preview updates without applying

            Examples:

            # Watch stdin\nai-tool | ctx watch\n\n# Watch a log file\nctx watch --log /path/to/ai-output.log\n\n# Preview without applying\nctx watch --dry-run\n
            ","path":["CLI","Context","Watch"],"tags":[]},{"location":"cli/why/","level":1,"title":"Why","text":"","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"cli/why/#ctx-why","level":2,"title":"ctx why","text":"

            Read ctx's philosophy documents directly in the terminal.

            ctx why [DOCUMENT]\n

            Documents:

            Name Description manifesto The ctx Manifesto: creation, not code about About ctx: what it is and why it exists invariants Design invariants: properties that must hold

            Examples:

            # Interactive numbered menu\nctx why\n\n# Show a specific document\nctx why manifesto\nctx why about\nctx why invariants\n\n# Pipe to a pager\nctx why manifesto | less\n
            ","path":["CLI","Getting Started","Why"],"tags":[]},{"location":"home/","level":1,"title":"Home","text":"
            • ctx is not a prompt.
            • ctx is version-controlled cognitive state.

            ctx is the persistence layer for human-AI reasoning.

            Deterministic. Git-native. Human-readable. Local-first.

            Start here.

            Learn what ctx does, set it up, and run your first session.

            Pre-1.0: Moving Fast

            ctx is under active development. This website tracks the development branch, not the latest release:

            Some features described here may not exist in the binary you have installed.

            Expect rough edges.

            If something is missing or broken, open an issue.

            ","path":["Home"],"tags":[]},{"location":"home/#introduction","level":2,"title":"Introduction","text":"","path":["Home"],"tags":[]},{"location":"home/#about","level":3,"title":"About","text":"

            What ctx is, how it works, and why persistent context changes how you work with AI.

            ","path":["Home"],"tags":[]},{"location":"home/#is-it-right-for-me","level":3,"title":"Is It Right for Me?","text":"

            Good fit, not-so-good fit, and a 5-minute trial to find out for yourself.

            ","path":["Home"],"tags":[]},{"location":"home/#faq","level":3,"title":"FAQ","text":"

            Quick answers to the questions newcomers ask most about ctx, files, tooling, and trade-offs.

            ","path":["Home"],"tags":[]},{"location":"home/#get-started","level":2,"title":"Get Started","text":"","path":["Home"],"tags":[]},{"location":"home/#getting-started","level":3,"title":"Getting Started","text":"

            Install the binary, set up the plugin, and verify it works.

            ","path":["Home"],"tags":[]},{"location":"home/#your-first-session","level":3,"title":"Your First Session","text":"

            Step-by-step walkthrough from ctx init to verified recall.

            ","path":["Home"],"tags":[]},{"location":"home/#common-workflows","level":3,"title":"Common Workflows","text":"

            Day-to-day commands for tracking context, checking health, and browsing history.

            ","path":["Home"],"tags":[]},{"location":"home/#concepts","level":2,"title":"Concepts","text":"","path":["Home"],"tags":[]},{"location":"home/#context-files","level":3,"title":"Context Files","text":"

            What each .context/ file does. What's their purpose. How do we best leverage them.

            ","path":["Home"],"tags":[]},{"location":"home/#configuration","level":3,"title":"Configuration","text":"

            Flexible configuration: .ctxrc, environment variables, and CLI flags.

            ","path":["Home"],"tags":[]},{"location":"home/#hub","level":3,"title":"Hub","text":"

            A fan-out channel for decisions, learnings, conventions, and tasks that need to cross project boundaries, without replicating everything else.

            ","path":["Home"],"tags":[]},{"location":"home/#working-with-ai","level":2,"title":"Working with AI","text":"","path":["Home"],"tags":[]},{"location":"home/#prompting-guide","level":3,"title":"Prompting Guide","text":"

            Effective prompts for AI sessions with ctx.

            ","path":["Home"],"tags":[]},{"location":"home/#keeping-ai-honest","level":3,"title":"Keeping AI Honest","text":"

            AI agents confabulate: they invent history, claim familiarity with decisions never made, and sometimes declare tasks complete when they aren't. Tools and habits to push back.

            ","path":["Home"],"tags":[]},{"location":"home/#my-ai-keeps-making-the-same-mistakes","level":3,"title":"My AI Keeps Making the Same Mistakes","text":"

            Stop rediscovering the same bugs and dead-ends across sessions.

            ","path":["Home"],"tags":[]},{"location":"home/#joining-a-project","level":3,"title":"Joining a Project","text":"

            You inherited a .context/ directory. Get oriented fast: priority order, what to read first, how to ramp up.

            ","path":["Home"],"tags":[]},{"location":"home/#customization","level":2,"title":"Customization","text":"","path":["Home"],"tags":[]},{"location":"home/#steering-files","level":3,"title":"Steering Files","text":"

            Tell the assistant how to behave when a specific kind of prompt arrives.

            ","path":["Home"],"tags":[]},{"location":"home/#lifecycle-triggers","level":3,"title":"Lifecycle Triggers","text":"

            Make things happen at session boundaries: block dangerous tool calls, inject standup notes, log file saves.

            ","path":["Home"],"tags":[]},{"location":"home/#community","level":2,"title":"Community","text":"","path":["Home"],"tags":[]},{"location":"home/#ctx","level":3,"title":"#ctx","text":"

            We are the builders who care about durable context. Join the community. Hang out in IRC. Star ctx on GitHub.

            ","path":["Home"],"tags":[]},{"location":"home/#contributing","level":3,"title":"Contributing","text":"

            Development setup, project layout, and pull request process.

            ","path":["Home"],"tags":[]},{"location":"home/about/","level":1,"title":"About","text":"

            \"Creation, not code; Context, not prompts; Verification, not vibes.\"

            Read the ctx Manifesto →

            \"Without durable context, intelligence resets; with ctx, creation compounds.\"

            Without persistent memory, every session starts at zero; ctx makes sessions cumulative.

            Join the ctx Community →

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#what-is-ctx","level":2,"title":"What Is ctx?","text":"

            ctx (Context) is a file-based system that enables AI coding assistants to persist project knowledge across sessions. It lives in a .context/ directory in your repo.

            • A session is interactive.
            • ctx enables cognitive continuity.
            • Cognitive continuity enables durable, symbiotic-like human-AI workflows.

            Context Files

            Context files let AI tools remember decisions, conventions, and learnings:

            Context files are explicit and versionable contracts between you and your agents.

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#why-do-i-keep-re-explaining-my-codebase","level":2,"title":"Why Do I Keep Re-Explaining My Codebase?!?!","text":"

            You open a new AI session. The first thing you do is re-explain your project.

            Again.

            The architecture, the database choice, the naming conventions, the thing you tried last week that didn't work. You have said all of this before (maybe yesterday, maybe an hour ago), but the AI doesn't know that.

            • You explain the same architecture every session;
            • The AI suggests an approach you already rejected, again;
            • A decision you made three sessions ago gets relitigated from scratch;
            • You spend more time setting context than building features.

            This isn't AI failure: It is staleness.

            Without persistent memory, every session starts at zero.

            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#day-and-night","level":3,"title":"Day and Night","text":"

            ctx gives your AI a memory that persists across sessions:

            Without ctxWith ctx
            Session 12: Monday morning\n\nAI:  \"I'll scaffold a MySQL schema for this.\"\nYou: \"We use PostgreSQL.\"\n\nAI:  \"For auth, session-based would be simplest.\"\nYou: \"We decided on JWT.\"\n\nAI:  \"Endpoints in camelCase?\"\nYou: \"snake_case! Like the rest of the API!!!!!!\"\n\nYou: *scrolls up to copy an existing endpoint*\n\n20 minutes spent restoring decisions that already exist.\n
            Session 12: Monday morning\n\nYou:  \"Let's continue the auth implementation.\"\nAI:   \"Per DECISIONS.md (2026-02-12): JWT, not sessions: \n       The project uses PostgreSQL with JWT\n       authentication. The API follows snake_case conventions.\n       Last session you completed the login endpoint: the\n       registration endpoint is next on TASKS.md. Want me to\n       start there?\"\n\n0 minutes re-explaining. Straight to building.\n
            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/about/#how-ctx-solves-this","level":3,"title":"How ctx Solves This","text":"

            ctx creates a .context/ directory in your project that stores structured knowledge files:

            File What It Remembers TASKS.md What you're working on and what's next DECISIONS.md Architectural choices and why you made them LEARNINGS.md Gotchas, bugs, things that didn't work CONVENTIONS.md Naming patterns, code style, project rules CONSTITUTION.md Hard rules the AI must never violate

            These files can version with your code in git:

            • They load automatically at the session start (via hooks in Claude Code, or manually with ctx agent for other tools).
            • The AI reads them, cites them, and builds on them, instead of asking you to start over.
              • And when it acts, it can point to the exact file and line that justifies the choice.

            Every decision you record, every lesson you capture, makes the next session smarter.

            ctx accumulates.

            Connect with ctx

            • Join the Community →: ask questions, share workflows, and help shape what comes next
            • Read the Blog →: real-world patterns, ponderings, and lessons learned from building ctx using ctx

            Ready to Get Started?

            • Getting Started →: full installation and setup
            • Your First Session →: step-by-step walkthrough from ctx init to verified recall
            ","path":["Home","Introduction","About"],"tags":[]},{"location":"home/common-workflows/","level":1,"title":"Common Workflows","text":"

            The commands below cover what you'll use most often:

            • recording context,
            • checking health,
            • browsing history,
            • and running loops.

            Each section is a self-contained snippet you can copy into your terminal.

            For deeper, step-by-step guides, see Recipes.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#track-context","level":2,"title":"Track Context","text":"

            Prefer Skills over Raw Commands

            When working with an AI agent, use /ctx-task-add, /ctx-decision-add, or /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            # Add a task\nctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Record a decision (full ADR fields required)\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Note a learning\nctx add learning \"Mock functions must be hoisted in Jest\" \\\n  --context \"Tests failed with undefined mock errors\" \\\n  --lesson \"Jest hoists mock calls to top of file\" \\\n  --application \"Place jest.mock() before imports\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Mark task complete\nctx task complete \"user auth\"\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#leave-a-reminder-for-next-session","level":2,"title":"Leave a Reminder for Next Session","text":"

            Drop a note that surfaces automatically at the start of your next session:

            # Leave a reminder\nctx remind \"refactor the swagger definitions\"\n\n# Date-gated: don't surface until a specific date\nctx remind \"check CI after the deploy\" --after 2026-02-25\n\n# List pending reminders\nctx remind list\n\n# Dismiss reminders by ID (supports ranges)\nctx remind dismiss 1\nctx remind dismiss 3 5-7\n

            Reminders are relayed verbatim at session start by the check-reminders hook and repeat every session until you dismiss them.

            See Session Reminders for the full recipe.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#check-context-health","level":2,"title":"Check Context Health","text":"
            # Detect stale paths, missing files, potential secrets\nctx drift\n\n# See full context summary\nctx status\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#browse-session-history","level":2,"title":"Browse Session History","text":"

            List and search past AI sessions from the terminal:

            ctx journal source --limit 5\n
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#journal-site","level":3,"title":"Journal Site","text":"

            Import session transcripts to a browsable static site with search, navigation, and topic indices.

            The ctx journal command requires zensical (Python >= 3.10).

            zensical is a Python-based static site generator from the Material for MkDocs team.

            (why zensical?).

            If you don't have it on your system, install zensical once with pipx:

            # One-time setup\npipx install zensical\n

            Avoid pip install zensical

            pip install often fails: For example, on macOS, system Python installs a non-functional stub (zensical requires Python >= 3.10), and Homebrew Python blocks system-wide installs (PEP 668).

            pipx creates an isolated environment with the correct Python version automatically.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#import-and-serve","level":3,"title":"Import and Serve","text":"

            Then, import and serve:

            # Import all sessions to .context/journal/ (only new files)\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

            Open http://localhost:8000 to browse.

            To update after new sessions, run the same two commands again.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#safe-by-default","level":3,"title":"Safe by Default","text":"

            ctx journal import --all is safe by default:

            • It only imports new sessions and skips existing files.
            • Locked entries (via ctx journal lock) are always skipped by both import and enrichment skills.
            • If you add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#re-importing-existing-files","level":3,"title":"Re-Importing Existing Files","text":"

            Here is how you regenerate existing files.

            Backup your .context folder before regeneration, as this is a potentially destructive action.

            To re-import journal files, you need to explicitly opt-in using the --regenerate flag:

            Flag combination Frontmatter Body --regenerate Preserved Overwritten from source --regenerate --keep-frontmatter=false Overwritten Overwritten

            Regeneration Overwrites Body Edits

            --regenerate preserves your YAML frontmatter (tags, summary, enrichment metadata) but it replaces the Markdown body with a fresh import.

            Any manual edits you made to the transcript will be lost.

            Lock entries you want to protect first: ctx journal lock <session-id>.

            See Session Journal for the full pipeline including normalization and enrichment.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#scratchpad","level":2,"title":"Scratchpad","text":"

            Store short, sensitive one-liners in an encrypted scratchpad that travels with the project:

            # Write a note\nctx pad set db-password \"postgres://user:pass@localhost/mydb\"\n\n# Read it back\nctx pad get db-password\n\n# List all keys\nctx pad list\n

            The scratchpad is encrypted with a key stored at ~/.ctx/.ctx.key (outside the project, never committed).

            See Scratchpad for details.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#run-an-autonomous-loop","level":2,"title":"Run an Autonomous Loop","text":"

            Generate a script that iterates an AI agent until a completion signal is detected:

            ctx loop\nchmod +x loop.sh\n./loop.sh\n

            See Autonomous Loops for configuration and advanced usage.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#trace-commit-context","level":2,"title":"Trace Commit Context","text":"

            Link your git commits back to the decisions, tasks, and learnings that motivated them. Enable the hook once:

            # Install the git hook (one-time setup)\nctx trace hook enable\n

            From now on, every git commit automatically gets a ctx-context trailer linking it to relevant context. No extra steps needed; just use ctx add, ctx task complete, and commit as usual.

            # Later: why was this commit made?\nctx trace abc123\n\n# Recent commits with their context\nctx trace --last 10\n\n# Context trail for a specific file\nctx trace file src/auth.go\n\n# Manually tag a commit after the fact\nctx trace tag HEAD --note \"Hotfix for production outage\"\n

            To stop: ctx trace hook disable.

            See CLI Reference: trace for details.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#agent-session-start","level":2,"title":"Agent Session Start","text":"

            The first thing an AI agent should do at session start is discover where context lives:

            ctx system bootstrap\n

            This prints the resolved context directory, the files in it, and the operating rules. The CLAUDE.md template instructs the agent to run this automatically. See CLI Reference: bootstrap.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#the-two-skills-you-should-always-use","level":2,"title":"The Two Skills You Should Always Use","text":"

            Using /ctx-remember at session start and /ctx-wrap-up at session end are the highest-value skills in the entire catalog:

            # session begins:\n/ctx-remember\n... do work ...\n# before closing the session:\n/ctx-wrap-up\n

            Let's provide some context, because this is important:

            Although the agent will eventually discover your context through CLAUDE.md → AGENT_PLAYBOOK.md, /ctx-remember hydrates the full context up front (tasks, decisions, recent sessions) so the agent starts informed rather than piecing things together over several turns.

            /ctx-wrap-up is the other half: A structured review that captures learnings, decisions, and tasks before you close the window.

            Hooks like check-persistence remind you (the user) mid-session that context hasn't been saved in a while, but they don't trigger persistence automatically: You still have to act. Also, a CTRL+C can end things at any moment with no reliable \"before session end\" event.

            In short, /ctx-wrap-up is the deliberate checkpoint that makes sure nothing slips through. And /ctx-remember it its mirror skill to be used at session start.

            See Session Ceremonies for the full workflow.

            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-commands-vs-ai-skills","level":2,"title":"CLI Commands vs. AI Skills","text":"

            Most ctx operations come in two flavors: a CLI command you run in your terminal and an AI skill (slash command) you invoke inside your coding assistant.

            Commands and skills are not interchangeable: Each has a distinct role.

            ctx CLI command ctx AI skill Runs where Your terminal Inside the AI assistant Speed Fast (milliseconds) Slower (LLM round-trip) Cost Free Consumes tokens and context Analysis Deterministic heuristics Semantic / judgment-based Best for Quick checks, scripting, CI Deep analysis, generation, workflow orchestration","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#paired-commands","level":3,"title":"Paired Commands","text":"

            These have both a CLI and a skill counterpart. Use the CLI for quick, deterministic checks; use the skill when you need the agent's judgment.

            CLI Skill When to prefer the skill ctx drift /ctx-drift Semantic analysis: catches meaning drift the CLI misses ctx status /ctx-status Interpreted summary with recommendations ctx add task /ctx-task-add Agent decomposes vague goals into concrete tasks ctx add decision /ctx-decision-add Agent drafts rationale and consequences from discussion ctx add learning /ctx-learning-add Agent extracts the lesson from a debugging session ctx add convention /ctx-convention-add Agent observes a repeated pattern and codifies it ctx task archive /ctx-archive Agent reviews which tasks are truly done ctx pad /ctx-pad Agent reads/writes scratchpad entries in conversation flow ctx journal /ctx-history Agent searches session history with semantic understanding ctx agent /ctx-agent Agent loads and acts on the context packet ctx loop /ctx-loop Agent tailors the loop script to your project ctx doctor /ctx-doctor Agent adds semantic analysis to structural checks ctx hook pause /ctx-pause Agent pauses hooks with session-aware reasoning ctx hook resume /ctx-resume Agent resumes hooks after a pause ctx remind /ctx-remind Agent manages reminders in conversation flow","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#ai-only-skills","level":3,"title":"AI-Only Skills","text":"

            These have no CLI equivalent. They require the agent's reasoning.

            Skill Purpose /ctx-remember Load context and present structured readback at session start /ctx-wrap-up End-of-session ceremony: persist learnings, decisions, tasks /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Pause and assess session progress /ctx-consolidate Merge overlapping learnings or decisions /ctx-prompt-audit Analyze prompting patterns for improvement /ctx-plan Stress-test an existing plan through adversarial interview /ctx-plan-import Import Claude Code plan files into project specs /ctx-implement Execute a plan step-by-step with verification /ctx-worktree Manage parallel agent worktrees /ctx-journal-enrich Add metadata, tags, and summaries to journal entries /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich /ctx-blog Generate a blog post (zensical-flavored Markdown) /ctx-blog-changelog Generate themed blog post from commits between releases /ctx-architecture Build and maintain architecture maps (ARCHITECTURE.md, DETAILED_DESIGN.md)","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/common-workflows/#cli-only-commands","level":3,"title":"CLI-Only Commands","text":"

            These are infrastructure: used in scripts, CI, or one-time setup.

            Command Purpose ctx init Initialize .context/ directory ctx load Output assembled context for piping ctx task complete Mark a task done by substring match ctx sync Reconcile context with codebase state ctx compact Consolidate and clean up context files ctx trace Show context behind git commits ctx trace hook Enable/disable commit context tracing hook ctx setup Generate AI tool integration config ctx watch Watch AI output and auto-apply context updates ctx serve Serve any zensical directory (default: journal) ctx permission snapshot Save settings as a golden image ctx permission restore Restore settings from golden image ctx journal site Generate browsable journal from exports ctx hook notify setup Configure webhook notifications ctx decision List and filter decisions ctx learning List and filter learnings ctx task List tasks, manage archival and snapshots ctx why Read the philosophy behind ctx ctx guide Quick-reference cheat sheet ctx site Site management commands ctx config Manage runtime configuration profiles ctx system System diagnostics and hook commands ctx completion Generate shell autocompletion scripts

            Rule of Thumb

            Quick check? Use the CLI.

            Need judgment? Use the skill.

            When in doubt, start with the CLI: It's free and instant.

            Escalate to the skill when heuristics aren't enough.

            Next Up: Context Files →: what each .context/ file does and how to use it

            See Also:

            • Recipes: targeted how-to guides for specific tasks
            • Knowledge Capture: patterns for recording decisions, learnings, and conventions
            • Context Health: keeping your .context/ accurate and drift-free
            • Session Archaeology: digging into past sessions
            • Task Management: tracking and completing work items
            ","path":["Home","Get Started","Common Workflows"],"tags":[]},{"location":"home/community/","level":1,"title":"#ctx","text":"

            Open source is better together.

            We are the builders who care about durable context, verifiable decisions, and human-AI workflows that compound over time.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#help-ctx-change-how-ai-remembers","level":2,"title":"Help ctx Change How AI Remembers","text":"

            If you like the idea, a star helps ctx reach engineers who run into context drift every day:

            Star ctx on GitHub ⭐

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#ctx-you","level":2,"title":"ctx ♥️ You","text":"

            Join the community to ask questions, share feedback, and connect with other users:

            • Discord join the ctx Discord: Real-time discussion, field notes, and early ideas.
            • Read the ctx Source on GitHub: Issues, discussions, and contributions.
            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#want-to-contribute","level":2,"title":"Want to Contribute?","text":"

            Early adopters shape the conventions.

            ctx is free and open source software.

            Contributions are always welcome and appreciated.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/community/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

            Clear context requires respectful collaboration.

            ctx follows the Contributor Covenant.

            ","path":["Home","Community","#ctx"],"tags":[]},{"location":"home/configuration/","level":1,"title":"Configuration","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#configuration","level":2,"title":"Configuration","text":"

            ctx uses three layers of configuration. Each layer overrides the one below it:

            1. CLI flags: Per-invocation overrides (highest priority)
            2. Environment variables: Shell or CI/CD overrides
            3. The .ctxrc file: Project-level defaults (YAML)
            4. Built-in defaults: Hardcoded fallbacks (lowest priority)

            All settings are optional: If nothing is configured, ctx works out of the box with sensible defaults.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#the-ctxrc-file","level":2,"title":"The .ctxrc File","text":"

            The .ctxrc file is an optional YAML file placed in the project root (next to your .context/ directory). It lets you set project-level defaults that apply to every ctx command.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#location","level":3,"title":"Location","text":"
            my-project/\n├── .ctxrc              ← configuration file\n├── .context/\n│   ├── TASKS.md\n│   ├── DECISIONS.md\n│   └── ...\n└── src/\n

            ctx reads .ctxrc from the project root (i.e. the parent of CTX_DIR, or dirname(CTX_DIR)/.ctxrc). It does not walk up from CWD. That means whichever project you've activated via eval \"$(ctx activate)\" (or by exporting CTX_DIR directly), its paired .ctxrc is what governs the invocation. There is no global or user-level config file: configuration is always per-project.

            Contributors: Dev Configuration Profile

            The ctx repo ships two .ctxrc source profiles (.ctxrc.base and .ctxrc.dev). The working copy is gitignored and swapped between them via ctx config switch dev / ctx config switch base. See Contributing: Configuration Profiles.

            Using a Different .context Directory

            The context directory is declared via the CTX_DIR environment variable; not via .ctxrc. ctx does not walk the filesystem; every non-exempt command requires CTX_DIR to be set. Use eval \"$(ctx activate)\" to bind it for your shell. CTX_DIR must be an absolute path with .context as its basename.

            See Environment Variables below for details.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#full-reference","level":3,"title":"Full Reference","text":"

            A commented .ctxrc showing all options and their defaults:

            # .ctxrc: ctx runtime configuration\n# https://ctx.ist/configuration/\n#\n# All settings are optional. Missing values use defaults.\n# Priority: CLI flags > environment variables > .ctxrc > defaults\n#\n# token_budget: 8000\n# auto_archive: true\n# archive_after_days: 7\n# scratchpad_encrypt: true\n# event_log: false\n# entry_count_learnings: 30\n# entry_count_decisions: 20\n# convention_line_count: 200\n# injection_token_warn: 15000\n# context_window: 200000      # auto-detected for Claude Code; override for other tools\n# billing_token_warn: 0       # one-shot warning at this token count (0 = disabled)\n#\n# stale_age_days: 30      # days before drift flags a context file as stale (0 = disabled)\n# key_rotation_days: 90\n# task_nudge_interval: 5   # Edit/Write calls between task completion nudges\n#\n# notify:               # requires: ctx hook notify setup\n#   events:             # required: no events sent unless listed\n#     - loop\n#     - nudge\n#     - relay\n#\n# tool: \"\"              # Active AI tool: claude, cursor, cline, kiro, codex\n#\n# steering:             # Steering layer configuration\n#   dir: .context/steering\n#   default_inclusion: manual\n#   default_tools: []\n#\n# hooks:                # Hook system configuration\n#   dir: .context/hooks\n#   timeout: 10\n#   enabled: true\n#\n# provenance_required:  # Relax provenance flags for ctx add\n#   session_id: true    # Require --session-id (default: true)\n#   branch: true        # Require --branch (default: true)\n#   commit: true        # Require --commit (default: true)\n#\n# priority_order:\n#   - CONSTITUTION.md\n#   - TASKS.md\n#   - CONVENTIONS.md\n#   - ARCHITECTURE.md\n#   - DECISIONS.md\n#   - LEARNINGS.md\n#   - GLOSSARY.md\n#   - AGENT_PLAYBOOK.md\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#option-reference","level":3,"title":"Option Reference","text":"Option Type Default Description token_budget int 8000 Default token budget for ctx agent and ctx load auto_archive bool true Auto-archive completed tasks during ctx compact archive_after_days int 7 Days before completed tasks are archived scratchpad_encrypt bool true Encrypt scratchpad with AES-256-GCM event_log bool false Enable local hook event logging to .context/state/events.jsonl entry_count_learnings int 30 Drift warning when LEARNINGS.md exceeds this entry count (0 = disable) entry_count_decisions int 20 Drift warning when DECISIONS.md exceeds this entry count (0 = disable) convention_line_count int 200 Drift warning when CONVENTIONS.md exceeds this line count (0 = disable) injection_token_warn int 15000 Warn when auto-injected context exceeds this token count (0 = disable) context_window int 200000 Context window size in tokens. Auto-detected for Claude Code (200k/1M); override for other AI tools billing_token_warn int 0 (off) One-shot warning when session tokens exceed this threshold (0 = disabled). For plans where tokens beyond an included allowance cost extra stale_age_days int 30 Days before ctx drift flags a context file as stale (0 = disable) key_rotation_days int 90 Days before encryption key rotation nudge task_nudge_interval int 5 Edit/Write calls between task completion nudges notify.events []string (all) Event filter for webhook notifications (empty = all) priority_order []string (see below) Custom file loading priority for context assembly tool string (empty) Active AI tool identifier (claude, cursor, cline, kiro, codex). Used by steering sync and hook dispatch steering.dir string .context/steering Steering files directory steering.default_inclusion string manual Default inclusion mode for new steering files (always, auto, manual) steering.default_tools []string (all) Default tool filter for new steering files (empty = all tools) hooks.dir string .context/hooks Hook scripts directory hooks.timeout int 10 Per-hook execution timeout in seconds hooks.enabled bool true Whether hook execution is enabled provenance_required.session_id bool true Require --session-id on ctx add for tasks, decisions, learnings provenance_required.branch bool true Require --branch on ctx add for tasks, decisions, learnings provenance_required.commit bool true Require --commit on ctx add for tasks, decisions, learnings

            Default priority order (used when priority_order is not set):

            1. CONSTITUTION.md
            2. TASKS.md
            3. CONVENTIONS.md
            4. ARCHITECTURE.md
            5. DECISIONS.md
            6. LEARNINGS.md
            7. GLOSSARY.md
            8. AGENT_PLAYBOOK.md

            See Context Files for the rationale behind this ordering.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#environment-variables","level":2,"title":"Environment Variables","text":"

            Environment variables override .ctxrc values but are overridden by CLI flags.

            Variable Description Equivalent .ctxrc key CTX_DIR Declare the context directory path (required, no fallback) (none) CTX_TOKEN_BUDGET Override the default token budget token_budget","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples","level":3,"title":"Examples","text":"
            # Use a shared context directory\nCTX_DIR=/shared/team-context ctx status\n\n# Increase token budget for a single run\nCTX_TOKEN_BUDGET=16000 ctx agent\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#cli-global-flags","level":2,"title":"CLI Global Flags","text":"

            CLI flags have the highest priority and override both environment variables and .ctxrc settings. These flags are available on every ctx command.

            Flag Description --tool <name> Override active AI tool identifier (e.g. kiro, cursor) --version Show version and exit --help Show command help and exit","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_1","level":3,"title":"Examples","text":"
            # Point to a different context directory inline:\nCTX_DIR=/path/to/project/.context ctx status\n
            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#priority-order","level":2,"title":"Priority Order","text":"

            When the same setting is configured in multiple layers, the highest-priority layer wins:

            CLI flags  >  Environment variables  >  .ctxrc  >  Built-in defaults\n(highest)                                          (lowest)\n

            The context directory itself is resolved differently: it lives outside this priority chain. CTX_DIR (env) must be declared; .ctxrc does not carry a fallback for it, and there is no built-in default. See Activating a Context Directory.

            Example resolution for token_budget:

            Layer Value Wins? CTX_TOKEN_BUDGET 4000 Yes .ctxrc 8000 No Default 8000 No","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#examples_2","level":2,"title":"Examples","text":"","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#external-context-directory","level":3,"title":"External .context Directory","text":"

            Store a project's context outside the project tree (useful when a repo is read-only, or when you want to keep notes adjacent rather than checked in). Declare the path via CTX_DIR:

            export CTX_DIR=/home/you/ctx-stores/my-project/.context\n

            One .context/ per project

            The parent of the context directory is the project root by contract: ctx sync, ctx drift, and the memory-drift hook all read the codebase from filepath.Dir(ContextDir()). Pointing two projects at the same .context/ directory will collide their journals, state, and secrets. To share knowledge (CONSTITUTION / CONVENTIONS / ARCHITECTURE) across projects, use ctx hub, not a shared .context/.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-token-budget","level":3,"title":"Custom Token Budget","text":"

            Increase the token budget for projects with large context:

            # .ctxrc\ntoken_budget: 16000\n

            This affects the default budget for ctx agent and ctx load. You can still override per-invocation with ctx agent --budget 4000.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#disabled-scratchpad-encryption","level":3,"title":"Disabled Scratchpad Encryption","text":"

            Turn off encryption for the scratchpad (useful in ephemeral environments where key management is unnecessary):

            # .ctxrc\nscratchpad_encrypt: false\n

            Unencrypted Scratchpads Store Secrets in Plaintext

            Only disable encryption if you understand the security implications.

            The scratchpad may contain sensitive data such as API keys, database URLs, or deployment credentials.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#custom-priority-order","level":3,"title":"Custom Priority Order","text":"

            Reorder context files to prioritize architecture over conventions:

            # .ctxrc\npriority_order:\n  - CONSTITUTION.md\n  - TASKS.md\n  - ARCHITECTURE.md\n  - DECISIONS.md\n  - CONVENTIONS.md\n  - LEARNINGS.md\n  - GLOSSARY.md\n  - AGENT_PLAYBOOK.md\n

            Files not listed in priority_order receive the lowest priority (100). The order affects ctx agent, ctx load, and drift's file-priority calculations.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#billing-token-threshold","level":3,"title":"Billing Token Threshold","text":"

            Get a one-shot warning when your session crosses a token threshold where extra charges begin (e.g., Claude Pro includes 200k tokens; beyond that costs extra):

            # .ctxrc\nbilling_token_warn: 180000   # warn before hitting the 200k paid boundary\n

            The warning fires once per session the first time token usage exceeds the threshold. Set to 0 (or omit) to disable.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#adjusted-drift-thresholds","level":3,"title":"Adjusted Drift Thresholds","text":"

            Raise or lower the entry-count thresholds that trigger drift warnings:

            # .ctxrc\nentry_count_learnings: 50   # warn above 50 learnings (default: 30)\nentry_count_decisions: 10   # warn above 10 decisions (default: 20)\nconvention_line_count: 300  # warn above 300 lines (default: 200)\n

            Set any threshold to 0 to disable that specific check.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

            Get notified when loops complete, hooks fire, or agents reach milestones:

            # Configure the webhook URL (encrypted, safe to commit)\nctx hook notify setup\n\n# Test delivery\nctx hook notify test\n

            Filter which events reach your webhook:

            # .ctxrc\nnotify:\n  events:\n    - loop      # loop completion/max-iteration\n    - nudge     # VERBATIM relay hooks fired\n    # - relay   # all hook output (verbose, for debugging)\n    # - heartbeat  # every-prompt session-alive signal\n

            Notifications are opt-in: No events are sent unless explicitly listed.

            See Webhook Notifications for a step-by-step recipe.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#hook-message-overrides","level":2,"title":"Hook Message Overrides","text":"

            Hook messages control what text hooks emit when they fire. Each message can be overridden per-project by placing a text file at the matching path under .context/:

            .context/hooks/messages/{hook}/{variant}.txt\n

            The override takes priority over the embedded default compiled into the ctx binary. An empty file silences the message while preserving the hook's logic (counting, state tracking, cooldowns).

            Use ctx hook message to discover and manage overrides:

            ctx hook message list                      # see all messages\nctx hook message show qa-reminder gate     # view the current template\nctx hook message edit qa-reminder gate     # copy default for editing\nctx hook message reset qa-reminder gate    # revert to default\n

            See Customizing Hook Messages for detailed examples including Python, JavaScript, and silence configurations.

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/configuration/#agent-bootstrapping","level":2,"title":"Agent Bootstrapping","text":"

            AI agents need to know the resolved context directory at session start. The ctx system bootstrap command prints the context path, file list, and operating rules in both text and JSON formats:

            ctx system bootstrap          # text output for agents\nctx system bootstrap -q       # just the context directory path\nctx system bootstrap --json   # structured output for automation\n

            The CLAUDE.md template instructs the agent to run this as its first action. Every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: <dir> footer that re-anchors the agent to the correct directory throughout the session.

            This replaces the previous approach of hardcoding .context/ paths in agent instructions.

            See CLI Reference: bootstrap for full details.

            See also: CLI Reference | Context Files | Scratchpad

            ","path":["Home","Concepts","Configuration"],"tags":[]},{"location":"home/context-files/","level":1,"title":"Context Files","text":"","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#context","level":2,"title":".context/","text":"

            Each context file in .context/ serves a specific purpose.

            Files are designed to be human-readable, AI-parseable, and token-efficient.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#file-overview","level":2,"title":"File Overview","text":"

            The core context files live directly under .context/. They are the substrate ctx reads in priority order when assembling the agent context packet:

            File Purpose Priority CONSTITUTION.md Hard rules that must NEVER be violated 1 (highest) TASKS.md Current and planned work 2 CONVENTIONS.md Project patterns and standards 3 ARCHITECTURE.md System overview and components 4 DECISIONS.md Architectural decisions with rationale 5 LEARNINGS.md Lessons learned, gotchas, tips 6 GLOSSARY.md Domain terms and abbreviations 7 AGENT_PLAYBOOK.md Instructions for AI tools 8 (lowest)

            Two subdirectories under .context/ are implementation details that are user-editable but not part of the priority read order:

            • .context/templates/: format templates for ctx add decision and ctx add learning. See templates below.
            • .context/steering/: behavioral rules with YAML frontmatter that get synced into each AI tool's native config. See steering below, and the full Steering files page for the design and workflow.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#outside-context","level":3,"title":"Outside .context/","text":"

            Two other moving parts are often confused with context files but are not under .context/:

            • Skills live in .claude/skills/ (project-local) or are provided by the installed ctx plugin. A typical project doesn't see the plugin's skills at all; they ride with the plugin and are owned by its update cycle. See ctx skill and Skills reference.
            • Hooks: Claude Code PreToolUse/PostToolUse/ UserPromptSubmit entries configured in .claude/settings.json or shipped by a plugin. The ctx plugin registers its own hooks automatically; a typical project does not author hooks by hand, and any local edits to plugin-owned hook files will be overridden on the next plugin update. If you need to customize behavior, edit your own project settings, not the plugin's files. See Hook sequence diagrams.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#read-order-rationale","level":2,"title":"Read Order Rationale","text":"

            The priority order follows a logical progression for AI tools:

            1. CONSTITUTION.md: Inviolable rules first. The AI tool must know what it cannot do before attempting anything.
            2. TASKS.md: Current work items. What the AI tool should focus on.
            3. CONVENTIONS.md: How to write code. Patterns and standards to follow when implementing tasks.
            4. ARCHITECTURE.md: System structure. Understanding of components and boundaries before making changes.
            5. DECISIONS.md: Historical context. Why things are the way they are, to avoid re-debating settled decisions.
            6. LEARNINGS.md: Gotchas and tips. Lessons from past work that inform the current implementation.
            7. GLOSSARY.md: Reference material. Domain terms and abbreviations for lookup as needed.
            8. AGENT_PLAYBOOK.md: Meta instructions last. How to use this context system itself. Loaded last because the agent should understand the content (rules, tasks, patterns) before the operating manual.
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#constitutionmd","level":2,"title":"CONSTITUTION.md","text":"

            Purpose: Define hard invariants: Rules that must NEVER be violated, regardless of the task.

            AI tools read this first and should refuse tasks that violate these rules.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure","level":3,"title":"Structure","text":"
            # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these, the task \nis wrong.\n\n## Security Invariants\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never store customer/user data in context files\n* [ ] Never disable security linters without documented exception\n\n## Quality Invariants\n\n* [ ] All code must pass tests before commit\n* [ ] No `any` types in TypeScript without documented reason\n* [ ] No TODO comments in main branch (*move to `TASKS.md`*)\n\n## Process Invariants\n\n* [ ] All architectural changes require a decision record\n* [ ] Breaking changes require version bump\n* [ ] Generated files are never committed\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines","level":3,"title":"Guidelines","text":"
            • Keep rules minimal and absolute
            • Each rule should be enforceable (can verify compliance)
            • Use checkbox format for clarity
            • Never compromise on these rules
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tasksmd","level":2,"title":"TASKS.md","text":"

            Purpose: Track current work, planned work, and blockers.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_1","level":3,"title":"Structure","text":"

            Tasks are organized by Phase: logical groupings that preserve order and enable replay.

            Tasks stay in their Phase permanently; status is tracked via checkboxes and inline tags.

            # Tasks\n\n## Phase 1: Initial Setup\n\n* [x] Set up project structure\n* [x] Configure linting and formatting\n* [ ] Add CI/CD pipeline `#in-progress`\n\n## Phase 2: Core Features\n\n* [ ] Implement user authentication `#priority:high`\n* [ ] Add API rate limiting `#priority:medium`\n  * Blocked by: Need to finalize auth first\n\n## Backlog\n\n* [ ] Performance optimization `#priority:low`\n* [ ] Add metrics dashboard `#priority:deferred`\n

            Key principles:

            • Tasks never move between sections: mark as [x] or [-] in place
            • Use #in-progress inline tag to indicate current work
            • Phase headers provide structure and replay order
            • Backlog section for unscheduled work
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#tags","level":3,"title":"Tags","text":"

            Use inline backtick-wrapped tags for metadata:

            Tag Values Purpose #priority high, medium, low Task urgency #area core, cli, docs, tests Codebase area #estimate 1h, 4h, 1d Time estimate (optional) #in-progress (none) Currently being worked on

            Lifecycle tags (for session correlation):

            Tag Format When to add #added YYYY-MM-DD-HHMMSS Auto-added by ctx add task #started YYYY-MM-DD-HHMMSS When beginning work on the task

            These timestamps help correlate tasks with session files and track which session started vs completed work.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-markers","level":3,"title":"Status Markers","text":"Marker Meaning [ ] Pending [x] Completed [-] Skipped (include reason)","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_1","level":3,"title":"Guidelines","text":"
            • Never delete tasks; mark as [x] completed or [-] skipped
            • Never move tasks between sections; use inline tags for status
            • Use ctx task archive periodically to move completed tasks to archive
            • Mark current work with #in-progress inline tag
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#decisionsmd","level":2,"title":"DECISIONS.md","text":"

            Purpose: Record architectural decisions with rationale so they don't get re-debated.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_2","level":3,"title":"Structure","text":"
            # Decisions\n\n## [YYYY-MM-DD] Decision Title\n\n**Status**: Accepted | Superseded | Deprecated\n\n**Context**: What situation prompted this decision?\n\n**Decision**: What was decided?\n\n**Rationale**: Why was this the right choice?\n\n**Consequence**: What are the implications?\n\n**Alternatives Considered**:\n* Alternative A: Why rejected\n* Alternative B: Why rejected\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example","level":3,"title":"Example","text":"
            ## [2025-01-15] Use TypeScript Strict Mode\n\n**Status**: Accepted\n\n**Context**: Starting a new project, need to choose the type-checking level.\n\n**Decision**: Enable TypeScript strict mode with all strict flags.\n\n**Rationale**: Catches more bugs at compile time. Team has experience\nwith strict mode. Upfront cost pays off in reduced runtime errors.\n\n**Consequence**: More verbose type annotations required. Some\nthird-party libraries need type assertions.\n\n**Alternatives Considered**:\n- Basic TypeScript: Rejected because it misses null checks\n- JavaScript with JSDoc: Rejected because tooling support is weaker\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#status-values","level":3,"title":"Status Values","text":"Status Meaning Accepted Current, active decision Superseded Replaced by newer decision (link to it) Deprecated No longer relevant","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#learningsmd","level":2,"title":"LEARNINGS.md","text":"

            Purpose: Capture lessons learned, gotchas, and tips that shouldn't be forgotten.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_3","level":3,"title":"Structure","text":"
            # Learnings\n\n## Category Name\n\n### Learning Title\n\n**Discovered**: YYYY-MM-DD\n\n**Context**: When/how was this learned?\n\n**Lesson**: What's the takeaway?\n\n**Application**: How should this inform future work?\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#example_1","level":3,"title":"Example","text":"
            ## Testing\n\n### Vitest Mocks Must Be Hoisted\n\n**Discovered**: 2025-01-15\n\n**Context**: Tests were failing intermittently when mocking fs module.\n\n**Lesson**: Vitest requires `vi.mock()` calls to be hoisted to the\ntop of the file. Dynamic mocks need `vi.doMock()` instead.\n\n**Application**: Always use `vi.mock()` at file top. Use `vi.doMock()`\nonly when mock needs runtime values.\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#categories","level":3,"title":"Categories","text":"

            Organize learnings by topic:

            • Testing
            • Build & Deploy
            • Performance
            • Security
            • Third-Party Libraries
            • Git and Workflow
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#conventionsmd","level":2,"title":"CONVENTIONS.md","text":"

            Purpose: Document project patterns, naming conventions, and standards.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_4","level":3,"title":"Structure","text":"
            # Conventions\n\n## Naming\n\n* **Files**: kebab-case for all source files\n* **Components**: PascalCase for React components\n* **Functions**: camelCase, verb-first (getUser, parseConfig)\n* **Constants**: SCREAMING_SNAKE_CASE\n\n## Patterns\n\n### Pattern Name\n\n**When to use**: Situation description\n\n**Implementation**:\n// in triple backticks\n// Example code\n\n**Why**: Rationale for this pattern\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_2","level":3,"title":"Guidelines","text":"
            • Include concrete examples
            • Explain the \"why\" not just the \"what\"
            • Keep patterns minimal: Only document what's non-obvious
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#architecturemd","level":2,"title":"ARCHITECTURE.md","text":"

            Purpose: Provide system overview and component relationships.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_5","level":3,"title":"Structure","text":"
            # Architecture\n\n## Overview\n\nBrief description of what the system does and how it's organized.\n\n## Components\n\n### Component Name\n\n**Responsibility**: What this component does\n\n**Dependencies**: What it depends on\n\n**Dependents**: What depends on it\n\n**Key Files**:\n* path/to/file.ts: Description\n\n## Data Flow\n\nDescription or diagram of how data moves through the system.\n\n## Boundaries\n\nWhat's in scope vs out of scope for this codebase.\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_3","level":3,"title":"Guidelines","text":"
            • Keep diagrams simple (Mermaid works well)
            • Focus on boundaries and interfaces
            • Update when major structural changes occur
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#glossarymd","level":2,"title":"GLOSSARY.md","text":"

            Purpose: Define domain terms, abbreviations, and project vocabulary.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#structure_6","level":3,"title":"Structure","text":"
            # Glossary\n\n## Domain Terms\n\n### Term Name\n\n**Definition**: What it means in this project's context\n\n**Not to be confused with**: Similar terms that mean different things\n\n**Example**: How it's used\n\n## Abbreviations\n\n| Abbrev | Expansion                     | Context                |\n|--------|-------------------------------|------------------------|\n| ADR    | Architectural Decision Record | Decision documentation |\n| SUT    | System Under Test             | Testing                |\n
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#guidelines_4","level":3,"title":"Guidelines","text":"
            • Define project-specific meanings
            • Clarify potentially ambiguous terms
            • Include abbreviations used in code or docs
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#agent_playbookmd","level":2,"title":"AGENT_PLAYBOOK.md","text":"

            Purpose: Explicit instructions for how AI tools should read, apply, and update context.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#key-sections","level":3,"title":"Key Sections","text":"

            Read Order: Priority order for loading context files

            When to Update: Events that trigger context updates

            How to Avoid Hallucinating Memory: Critical rules:

            1. Never assume: If not in files, you don't know it
            2. Never invent history: Don't claim \"we discussed\" without evidence
            3. Verify before referencing: Search files before citing
            4. When uncertain, say so
            5. Trust files over intuition

            Context Update Commands: Format for automated updates via ctx watch:

            <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"complete\">user auth</context-update>\n<context-update type=\"learning\"\n  context=\"Debugging hooks\"\n  lesson=\"Hooks receive JSON via stdin\"\n  application=\"Parse JSON stdin with the host language\"\n>Hook Input Format</context-update>\n<context-update type=\"decision\"\n  context=\"Need a caching layer\"\n  rationale=\"Redis is fast and team has experience\"\n  consequence=\"Must provision Redis infrastructure\"\n>Use Redis for caching</context-update>\n

            See Integrations for full documentation.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#templates","level":2,"title":"templates/","text":"

            Location: .context/templates/. Status: implementation detail, user-editable.

            Purpose: Format templates for ctx add decision and ctx add learning. These control the structure of new entries appended to DECISIONS.md and LEARNINGS.md.

            ctx init deploys two starter templates:

            • decision.md: sections Context, Rationale, Consequence
            • learning.md: sections Context, Lesson, Application
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing","level":3,"title":"Customizing","text":"

            Edit the templates directly. Changes take effect immediately on the next ctx add command. For example, to add a \"References\" section to all new decisions, edit .context/templates/decision.md.

            Templates are committed to git, so customizations are shared with the team.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#steering","level":2,"title":"steering/","text":"

            Location: .context/steering/. Status: implementation detail, user-editable.

            Purpose: Behavioral rules with YAML frontmatter that tell an AI assistant how to behave when a specific kind of prompt arrives. Unlike the core context files (which describe what the project is), steering files describe what to do and ride alongside the prompt through the AI tool's native rule pipeline (Claude Code, Cursor, Kiro, Cline). ctx matches steering files to prompts and syncs them out to each tool's config.

            ctx init scaffolds four foundation files:

            • product.md: who this project serves and why
            • tech.md: the technology stack and its constraints
            • structure.md: how the code is organized
            • workflow.md: how work moves through the system

            Each file carries YAML frontmatter describing when it applies (always, matching prompts, or manually referenced) and what tool scope it covers. The foundation files use inclusion: always by default so every session picks them up.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#customizing_1","level":3,"title":"Customizing","text":"

            Edit the files directly. Add your own steering files with ctx steering add, preview the match set with ctx steering preview, and run ctx steering sync to push them into each AI tool's config after changes. Steering files are committed to git, so they're shared with the team.

            For the design rationale, the full inclusion/priority model, and the end-to-end sync workflow, see the dedicated Steering files page.

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#parsing-rules","level":2,"title":"Parsing Rules","text":"

            All context files follow these conventions:

            1. Headers define structure: # for title, ## for sections, ### for items
            2. Bold keys for fields: **Key**: followed by value
            3. Code blocks are literal: Never parse code block content as structure
            4. Lists are ordered: Items appear in priority/chronological order
            5. Tags are inline: Backtick-wrapped tags like #priority:high
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#further-reading","level":2,"title":"Further Reading","text":"
            • Refactoring with Intent: how persistent context prevents drift during refactoring sessions
            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/context-files/#token-efficiency","level":2,"title":"Token Efficiency","text":"

            Keep context files concise:

            • Use abbreviations in tags, not prose;
            • Omit obvious words (\"The,\" \"This\");
            • Prefer bullet points over paragraphs;
            • Keep examples minimal but illustrative;
            • Archive old completed items periodically.

            Next Up: Prompting Guide →: effective prompts for AI sessions with ctx

            ","path":["Home","Concepts","Context Files"],"tags":[]},{"location":"home/contributing/","level":1,"title":"Contributing","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#development-setup","level":2,"title":"Development Setup","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#prerequisites","level":3,"title":"Prerequisites","text":"
            • Go (version defined in go.mod)
            • Claude Code
            • Git
            • GNU Make
            • Zensical
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#1-fork-or-clone-the-repository","level":3,"title":"1. Fork (or Clone) the Repository","text":"
            # Fork on GitHub, then:\ngit clone https://github.com/<you>/ctx.git\ncd ctx\n\n# Or, if you have push access:\ngit clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#2-build-and-install-the-binary","level":3,"title":"2. Build and Install the Binary","text":"
            make build\nsudo make install\n

            This compiles the ctx binary and places it in /usr/local/bin/.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#3-install-the-plugin-from-your-local-clone","level":3,"title":"3. Install the Plugin from Your Local Clone","text":"

            The repository ships a Claude Code plugin under internal/assets/claude/. Point Claude Code at your local copy so that skills and hooks reflect your working tree: no reinstall needed after edits:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace
            4. Enter the absolute path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: it points Claude Code to the actual plugin in internal/assets/claude);
            5. Back in /plugin, select Install and choose ctx.

            Claude Code Caches Plugin Files

            Even though the marketplace points at a directory on disk, Claude Code caches skills and hooks. After editing files under internal/assets/claude/, clear the cache and restart:

            make plugin-reload   # then restart Claude Code\n

            See Skill or Hook Changes for details.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#4-verify","level":3,"title":"4. Verify","text":"
            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

            You should see the ctx plugin listed, sourced from your local path.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#project-layout","level":2,"title":"Project Layout","text":"
            ctx/\n├── cmd/ctx/            # CLI entry point\n├── internal/\n│   ├── assets/claude/  # ← Claude Code plugin (skills, hooks)\n│   ├── bootstrap/      # Project initialization templates\n│   ├── claude/         # Claude Code integration helpers\n│   ├── cli/            # Command implementations\n│   ├── config/         # Configuration loading\n│   ├── context/        # Core context logic\n│   ├── crypto/         # Scratchpad encryption\n│   ├── drift/          # Drift detection\n│   ├── index/          # Context file indexing\n│   ├── journal/        # Journal site generation\n│   ├── memory/         # Memory bridge (discover, mirror, import, publish)\n│   ├── notify/         # Webhook notifications\n│   ├── rc/             # .ctxrc parsing\n│   ├── journal/        # Session history, parsers, and state\n│   ├── sysinfo/        # System resource monitoring\n│   ├── task/           # Task management\n│   └── validation/     # Input validation\n├── .claude/\n│   └── skills/         # Dev-only skills (not distributed)\n├── assets/             # Static assets (banners, logos)\n├── docs/               # Documentation site source\n├── editors/            # Editor extensions (VS Code)\n├── examples/           # Example configurations\n├── hack/               # Build scripts\n├── specs/              # Feature specifications\n└── .context/           # ctx's own context (dogfooding)\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skills-two-directories-one-rule","level":3,"title":"Skills: Two Directories, One Rule","text":"Directory What lives here Distributed to users? internal/assets/claude/skills/ The 39 ctx-* skills that ship with the plugin Yes .claude/skills/ Dev-only skills (release, QA, backup, etc.) No

            internal/assets/claude/skills/ is the single source of truth for user-facing skills. If you are adding or modifying a ctx-* skill, edit it there.

            .claude/skills/ holds skills that only make sense inside this repository (release automation, QA checks, backup scripts). These are never distributed to users.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#dev-only-skills-reference","level":4,"title":"Dev-Only Skills Reference","text":"Skill When to use /_ctx-absorb Merge deltas from a parallel worktree or separate checkout /_ctx-audit Detect code-level drift after YOLO sprints or before releases /_ctx-qa Run QA checks before committing /_ctx-release Run the full release process /_ctx-release-notes Generate release notes for dist/RELEASE_NOTES.md /_ctx-alignment-audit Audit doc claims against agent instructions /_ctx-update-docs Check docs/code consistency after changes /_ctx-command-audit Audit CLI surface after renames, moves, or deletions

            Six skills previously in this list have been promoted to bundled plugin skills and are now available to all ctx users: /ctx-brainstorm, /ctx-link-check, /ctx-permission-sanitize, /ctx-skill-create, /ctx-spec.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#how-to-add-things","level":2,"title":"How to Add Things","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-cli-command","level":3,"title":"Adding a New CLI Command","text":"
            1. Create a package under internal/cli/<name>/ with doc.go, cmd.go, and run.go;
            2. Implement Cmd() *cobra.Command as the entry point;
            3. Add Use* and DescKey* constants in internal/config/embed/cmd/<name>.go;
            4. Add command descriptions in internal/assets/commands/commands.yaml;
            5. Add examples in internal/assets/commands/examples.yaml;
            6. Add flag descriptions in internal/assets/commands/flags.yaml;
            7. Register the command in internal/bootstrap/group.go (add import + entry in the appropriate group function);
            8. Create an output package at internal/write/<name>/ for all user-facing output (see Package Taxonomy);
            9. Create error constructors at internal/err/<name>/ for domain-specific errors;
            10. Add tests in the same package (<name>_test.go);
            11. Add a doc page at docs/cli/<name>.md and update docs/cli/index.md;
            12. Add the page to zensical.toml nav.

            Pattern to follow: internal/cli/pad/pad.go (parent with subcommands) or internal/cli/drift/ (single command).

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#package-taxonomy","level":3,"title":"Package Taxonomy","text":"

            ctx separates concerns into a strict package taxonomy. Knowing where things go prevents code review friction and keeps the AST lint tests happy.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#output-internalwrite","level":4,"title":"Output: internal/write/","text":"

            Every CLI command's user-facing output lives in its own sub-package under internal/write/<domain>/. Output functions accept *cobra.Command and call cmd.Println(...), never fmt.Print* directly. All text strings are loaded from YAML via desc.Text(text.DescKey*), never inline.

            internal/write/add/add.go       # output for ctx add\ninternal/write/stat/stat.go     # output for ctx usage\ninternal/write/resource/        # output for ctx sysinfo\n

            Exception: write/rc/ writes to os.Stderr because rc loads before cobra is initialized.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#errors-internalerr","level":4,"title":"Errors: internal/err/","text":"

            Domain-specific error constructors live under internal/err/<domain>/. Each package mirrors the write structure. Functions return error (never custom error types) and load messages from YAML via desc.Text(text.DescKey*).

            internal/err/add/add.go         # errors for ctx add\ninternal/err/config/config.go   # errors for configuration\ninternal/err/cli/cli.go         # errors for CLI argument validation\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#config-constants-internalconfig","level":4,"title":"Config Constants: internal/config/","text":"

            Pure-constant leaf packages with zero internal dependencies (stdlib only). Over 60 sub-packages, organized by domain. See internal/config/README.md for the full decision tree.

            What you're adding Where it goes File names, extensions, paths config/file/, config/dir/ Regex patterns config/regex/ CLI flag names (--flag-name) config/flag/flag.go Flag description YAML keys config/embed/flag/<cmd>.go Command Use/DescKey strings config/embed/cmd/<cmd>.go User-facing text YAML keys config/embed/text/<domain>.go Time durations, thresholds config/<domain>/","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#the-assets-pipeline","level":4,"title":"The Assets Pipeline","text":"

            User-facing text flows through a three-level chain:

            1. Go constant (config/embed/text/) defines a string key: DescKeyWriteAddedTo = \"write.added-to\"
            2. Call site resolves it: desc.Text(text.DescKeyWriteAddedTo)
            3. YAML (internal/assets/commands/text/write.yaml) holds the actual text: write.added-to: { short: \"Added to %s\" }

            The same pattern applies to command descriptions (commands.yaml), flag descriptions (flags.yaml), and examples (examples.yaml). The TestDescKeyYAMLLinkage test verifies every constant resolves to a non-empty YAML value.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-new-session-parser","level":3,"title":"Adding a New Session Parser","text":"

            The journal system uses a SessionParser interface. To add support for a new AI tool (e.g. Aider, Cursor):

            1. Create internal/journal/parser/<tool>.go;
            2. Implement parsing logic that returns []*Session;
            3. Register the parser in FindSessions() / FindSessionsForCWD();
            4. Use config.Tool* constants for the tool identifier;
            5. Add test fixtures and parser tests.

            Pattern to follow: the Claude Code JSONL parser in internal/journal/parser/.

            Multilingual Session Headers

            The Markdown parser recognizes session header prefixes configured via session_prefixes in .ctxrc (default: Session:). To support a new language, users add a prefix to their .ctxrc - no code change needed. New parser implementations can use rc.SessionPrefixes() if they also need prefix-based header detection.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#adding-a-bundled-skill","level":3,"title":"Adding a Bundled Skill","text":"
            1. Create internal/assets/claude/skills/<skill-name>/SKILL.md;
            2. Follow the skill format: trigger, negative triggers, steps, quality gate;
            3. Run make plugin-reload and restart Claude Code to test;
            4. Add a Skill entry to .claude-plugin/plugin.json if user-invocable;
            5. Document in docs/reference/skills.md.

            Pattern to follow: any skill in internal/assets/claude/skills/ctx-status/.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#test-expectations","level":3,"title":"Test Expectations","text":"
            • Unit tests: colocated with source (foo.gofoo_test.go);
            • Test helpers: use t.Helper() so failures point to callers;
            • HOME isolation: use t.TempDir() + t.Setenv(\"HOME\", ...) for tests that touch ~/.claude/ or ~/.ctx/;
            • rc.Reset(): call after os.Chdir in tests that change working directory (rc caches on first access);
            • No network: all tests run offline, use fixtures.

            Run make test before submitting. Target: no failures, no skips.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#day-to-day-workflow","level":2,"title":"Day-to-Day Workflow","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#go-code-changes","level":3,"title":"Go Code Changes","text":"

            After modifying Go source files, rebuild and reinstall:

            make build && sudo make install\n

            The ctx binary is statically compiled. There is no hot reload. You must rebuild for Go changes to take effect.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#skill-or-hook-changes","level":3,"title":"Skill or Hook Changes","text":"

            Edit files under internal/assets/claude/skills/ or internal/assets/claude/hooks/.

            Claude Code caches plugin files, so edits aren't picked up automatically.

            Clear the cache and restart:

            make plugin-reload   # nukes ~/.claude/plugins/cache/activememory-ctx/\n# then restart Claude Code\n

            The plugin will be re-installed from your local marketplace on startup. No version bump is needed during development.

            Version Bumps Are for Releases, Not Iteration

            Only bump VERSION, plugin.json, and marketplace.json when cutting a release. During development, make plugin-reload is all you need.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

            The repo ships two .ctxrc source profiles. The working copy (.ctxrc) is gitignored and swapped between them:

            File Purpose .ctxrc.base Golden baseline: all defaults, no logging .ctxrc.dev Dev profile: notify events enabled, verbose logging .ctxrc Working copy (gitignored: copied from one of the above)

            Use ctx commands to switch:

            ctx config switch dev      # switch to dev profile\nctx config switch base     # switch to base profile\nctx config status          # show which profile is active\n

            After cloning, run ctx config switch dev to get started with full logging.

            See Configuration for the full .ctxrc option reference.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#backups","level":3,"title":"Backups","text":"

            ctx does not ship a backup command. File-level backup is an OS / infrastructure concern; ctx hub handles the cross-machine knowledge persistence that matters most. For everything else, see Backup Strategy: rsync, Time Machine, Borg, or whichever tool already handles the rest of your files.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-tests","level":3,"title":"Running Tests","text":"
            make test   # fast: all tests\nmake audit  # full: fmt + vet + lint + drift + docs + test\nmake smoke  # build + run basic commands end-to-end\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#running-the-docs-site-locally","level":3,"title":"Running the Docs Site Locally","text":"
            make site-setup  # one-time: install zensical via pipx\nmake site-serve  # serve at localhost\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#submitting-changes","level":2,"title":"Submitting Changes","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#before-you-start","level":3,"title":"Before You Start","text":"
            1. Check existing issues to avoid duplicating effort;
            2. For large changes, open an issue first to discuss the approach;
            3. Read the specs in specs/ for design context.
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#pull-request-process","level":3,"title":"Pull Request Process","text":"

            Respect the maintainers' time and energy: Keep your pull requests isolated and strive to minimze code changes.

            If you Pull Request solves more than one distinct issues, it's better to create separate pull requests instead of sending them in one large bundle.

            1. Create a feature branch: git checkout -b feature/my-feature;
            2. Make your changes;
            3. Run make audit to catch issues early;
            4. Commit with a clear message;
            5. Push and open a pull request.

            Audit Your Code Before Submitting

            Run make audit before submitting:

            make audit covers formatting, vetting, linting, drift checks, doc consistency, and tests in one pass.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#commit-messages","level":3,"title":"Commit Messages","text":"

            Following conventional commits is recommended but not required:

            Types: feat, fix, docs, test, refactor, chore

            Examples:

            • feat(cli): add ctx export command
            • fix(drift): handle missing files gracefully
            • docs: update installation instructions
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-style","level":3,"title":"Code Style","text":"
            • Follow Go conventions (gofmt, go vet);
            • Keep functions focused and small;
            • Add tests for new functionality;
            • Handle errors explicitly; use descriptive names (readErr, writeErr) not repeated err;
            • No magic strings: all repeated literals go in internal/config/;
            • Output goes through internal/write/ packages, not fmt.Print*;
            • Errors go through internal/err/ constructors, not inline fmt.Errorf;
            • See Package Taxonomy and .context/CONVENTIONS.md for the full reference.
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#code-of-conduct","level":2,"title":"Code of Conduct","text":"

            A clear context requires respectful collaboration.

            ctx follows the Contributor Covenant.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#boring-legal-stuff","level":2,"title":"Boring Legal Stuff","text":"","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#developer-certificate-of-origin-dco","level":3,"title":"Developer Certificate of Origin (DCO)","text":"

            By contributing, you agree to the Developer Certificate of Origin.

            All commits must be signed off:

            git commit -s -m \"feat: add new feature\"\n
            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/contributing/#license","level":3,"title":"License","text":"

            Contributions are licensed under the Apache 2.0 License.

            ","path":["Home","Community","Contributing"],"tags":[]},{"location":"home/faq/","level":1,"title":"FAQ","text":"","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-markdown","level":2,"title":"Why Markdown?","text":"

            Markdown is human-readable, version-controllable, and tool-agnostic. Every AI model can parse it natively. Every developer can read it in a terminal, a browser, or a code review. There's no schema to learn, no binary format to decode, no vendor lock-in. You can inspect your context with cat, diff it with git diff, and review it in a PR.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-ctx-work-offline","level":2,"title":"Does ctx Work Offline?","text":"

            Yes. ctx is completely local. It reads and writes files on disk, generates context packets from local state, and requires no network access. The only feature that touches the network is the optional webhook notifications hook, which you have to explicitly configure.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-gets-committed-to-git","level":2,"title":"What Gets Committed to Git?","text":"

            The .context/ directory: yes, commit it. That's the whole point. Team members and AI agents read the same context files.

            What not to commit:

            • .ctx.key: your encryption key. Stored at ~/.ctx/.ctx.key, never in the repo. ctx init handles this automatically.
            • journal/ and logs/: generated data, potentially large. ctx init adds these to .gitignore.
            • scratchpad.enc: your choice. It's encrypted, so it's safe to commit if you want shared scratchpad state. See Scratchpad for details.
            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#how-big-should-my-token-budget-be","level":2,"title":"How Big Should My Token Budget Be?","text":"

            The default is 8000 tokens, which works well for most projects. Configure it via .ctxrc or the CTX_TOKEN_BUDGET environment variable:

            # In .ctxrc\ntoken_budget = 12000\n\n# Or as an environment variable\nexport CTX_TOKEN_BUDGET=12000\n\n# Or per-invocation\nctx agent --budget 4000\n

            Higher budgets include more context but cost more tokens per request. Lower budgets force sharper prioritization: ctx drops lower-priority content first, so CONSTITUTION and TASKS always make the cut.

            See Configuration for all available settings.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#why-not-a-database","level":2,"title":"Why Not a Database?","text":"

            Files are inspectable, diffable, and reviewable in pull requests. You can grep them, cat them, pipe them through jq or awk. They work with every version control system and every text editor.

            A database would add a dependency, require migrations, and make context opaque. The design bet is that context should be as visible and portable as the code it describes.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#does-it-work-with-tools-other-than-claude-code","level":2,"title":"Does It Work with Tools Other than Claude Code?","text":"

            Yes. ctx agent outputs a context packet that any AI tool can consume: paste it into ChatGPT, Cursor, Copilot, Aider, or anything else that accepts text input.

            Claude Code gets first-class integration via the ctx plugin (hooks, skills, automatic context loading). VS Code Copilot Chat has a dedicated ctx extension. Other tools integrate via generated instruction files or manual pasting.

            See Integrations for tool-specific setup, including the multi-tool recipe.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#can-i-use-ctx-on-an-existing-project","level":2,"title":"Can I Use ctx on an Existing Project?","text":"

            Yes. Run ctx init in any repo and it creates .context/ with template files. Start recording decisions, tasks, and conventions as you work. Context grows naturally; you don't need to backfill everything on day one.

            See Getting Started for the full setup flow, or Joining a ctx Project if someone else already initialized it.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#what-happens-when-context-files-get-too-big","level":2,"title":"What Happens When Context Files Get Too Big?","text":"

            Token budgeting handles this automatically. ctx agent prioritizes content by file priority (CONSTITUTION first, GLOSSARY last) and trims lower-priority entries when the budget is tight.

            For manual maintenance, ctx compact archives completed tasks and old entries, keeping active context lean. You can also run ctx task archive to move completed tasks out of TASKS.md.

            The goal is to keep context files focused on current state. Historical entries belong in git history or the archive.

            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/faq/#is-context-meant-to-be-shared","level":2,"title":"Is .context/ Meant to Be Shared?","text":"

            Yes. Commit it to your repo. Every team member and every AI agent reads the same files. That's the mechanism for shared memory: decisions made in one session are visible in the next, regardless of who (or what) starts it.

            The only per-user state is the encryption key (~/.ctx/.ctx.key) and the optional scratchpad. Everything else is team-shared by design.

            Related:

            • Getting Started - installation and first setup
            • Configuration - .ctxrc, environment variables, and defaults
            • Context Files - what each file does and how to use it
            ","path":["Home","Introduction","FAQ"],"tags":[]},{"location":"home/first-session/","level":1,"title":"Your First Session","text":"

            Here's what a complete first session looks like, from initialization to the moment your AI cites your project context back to you.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-1-initialize-your-project","level":2,"title":"Step 1: Initialize Your Project","text":"

            Run ctx init in your project root:

            cd your-project\nctx init\n

            Sample output:

            Context initialized in .context/\n\n  ✓ CONSTITUTION.md\n  ✓ TASKS.md\n  ✓ DECISIONS.md\n  ✓ LEARNINGS.md\n  ✓ CONVENTIONS.md\n  ✓ ARCHITECTURE.md\n  ✓ GLOSSARY.md\n  ✓ AGENT_PLAYBOOK.md\n\nSetting up encryption key...\n  ✓ ~/.ctx/.ctx.key\n\nClaude Code plugin (hooks + skills):\n  Install: claude /plugin marketplace add ActiveMemory/ctx\n  Then:    claude /plugin install ctx@activememory-ctx\n\nNext steps:\n  1. Edit .context/TASKS.md to add your current tasks\n  2. Run 'ctx status' to see context summary\n  3. Run 'ctx agent' to get AI-ready context packet\n

            This created your .context/ directory with template files.

            For Claude Code, install the ctx plugin to get automatic hooks and skills.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-2-populate-your-context","level":2,"title":"Step 2: Populate Your Context","text":"

            Add a task and a decision: These are the entries your AI will remember:

            ctx add task \"Implement user authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to TASKS.md\n\nctx add decision \"Use PostgreSQL for primary database\" \\\n  --context \"Need a reliable database for production\" \\\n  --rationale \"PostgreSQL offers ACID compliance and JSON support\" \\\n  --consequence \"Team needs PostgreSQL training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Output: ✓ Added to DECISIONS.md\n

            These entries are what the AI will recall in future sessions. You don't need to populate everything now: Context grows naturally as you work.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-3-check-your-context","level":2,"title":"Step 3: Check Your Context","text":"
            ctx status\n

            Sample output:

            Context Status\n====================\n\nContext Directory: .context/\nTotal Files: 8\nToken Estimate: 1,247 tokens\n\nFiles:\n  ✓ CONSTITUTION.md (loaded)\n  ✓ TASKS.md (1 items)\n  ✓ DECISIONS.md (1 items)\n  ○ LEARNINGS.md (empty)\n  ✓ CONVENTIONS.md (loaded)\n  ✓ ARCHITECTURE.md (loaded)\n  ✓ GLOSSARY.md (loaded)\n  ✓ AGENT_PLAYBOOK.md (loaded)\n\nRecent Activity:\n  - TASKS.md modified 2 minutes ago\n  - DECISIONS.md modified 1 minute ago\n

            Notice the token estimate: This is how much context your AI will load.

            The next to LEARNINGS.md means it's still empty; it will fill in as you capture lessons during development.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-4-start-an-ai-session","level":2,"title":"Step 4: Start an AI Session","text":"

            With Claude Code (and the ctx plugin), start every session with:

            /ctx-remember\n

            This loads your context and presents a structured readback so you can confirm the agent knows what is going on. Context also loads automatically via hooks, but the explicit ceremony gives you a readback to verify.

            Steering Files Fire Automatically

            If you edited the four foundation files scaffolded by ctx init (.context/steering/product.md, tech.md, structure.md, workflow.md), their inclusion: always rules are prepended to every tool call via the plugin's PreToolUse hook, with no /ctx-remember needed, no MCP call. Edit a file, save, and the next tool call in Claude Code picks it up. See Steering files for details on the inclusion modes.

            Using VS Code?

            With VS Code Copilot Chat (and the ctx extension), type @ctx /agent in chat to load your context packet, or @ctx /status to check your project context. Run ctx setup copilot --write once to generate .github/copilot-instructions.md for automatic context loading.

            If you are not using Claude Code, generate a context packet for your AI tool:

            ctx agent --budget 8000\n

            Sample output:

            # Context Packet\nGenerated: 2026-02-14T15:30:45Z | Budget: 8000 tokens | Used: ~2450\n\n## Read These Files (in order)\n1. .context/CONSTITUTION.md\n2. .context/TASKS.md\n3. .context/CONVENTIONS.md\n...\n\n## Current Tasks\n- [ ] Implement user authentication\n- [ ] Add rate limiting to API endpoints\n\n## Key Conventions\n- Use gofmt for formatting\n- Path construction uses filepath.Join\n\n## Recent Decisions\n## [2026-02-14-120000] Use PostgreSQL for the primary database\n\n**Context**: Evaluated PostgreSQL, MySQL, and SQLite...\n**Rationale**: PostgreSQL offers better JSON support...\n\n## Key Learnings\n## [2026-02-14-100000] Connection pool sizing matters\n\n**Context**: Hit connection limits under load...\n**Lesson**: Default pool size of 10 is too low for concurrent requests...\n\n## Also Noted\n- Use JWT for session management\n- Always validate input at API boundary\n

            Paste this output into your AI tool's system prompt or conversation start.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#step-5-verify-it-works","level":2,"title":"Step 5: Verify It Works","text":"

            Ask your AI: \"What are our current tasks?\"

            A working setup produces a response like:

            Based on the project context, you have one active task:\n\n- **Implement user authentication** (pending)\n\nThere's also a recent architectural decision to **use PostgreSQL for\nthe primary database**, chosen for its ACID compliance and JSON support.\n\nWant me to start on the authentication task?\n

            That's the success moment:

            The AI is citing your exact context entries from Step 2, not hallucinating or asking you to re-explain.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-gets-created","level":2,"title":"What Gets Created","text":"
            .context/\n├── CONSTITUTION.md     # Hard rules: NEVER violate these\n├── TASKS.md            # Current and planned work\n├── CONVENTIONS.md      # Project patterns and standards\n├── ARCHITECTURE.md     # System overview\n├── DECISIONS.md        # Architectural decisions with rationale\n├── LEARNINGS.md        # Lessons learned, gotchas, tips\n├── GLOSSARY.md         # Domain terms and abbreviations\n└── AGENT_PLAYBOOK.md   # How AI tools should use this\n

            Claude Code integration (hooks + skills) is provided by the ctx plugin: See Integrations/Claude Code.

            VS Code Copilot Chat integration is provided by the ctx extension: See Integrations/VS Code.

            See Context Files for detailed documentation of each file.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/first-session/#what-to-gitignore","level":2,"title":"What to .gitignore","text":"

            Rule of Thumb

            • If it's knowledge (decisions, tasks, learnings, conventions), commit it.
            • If it's generated output, raw session data, or a secret, .gitignore it.

            Commit your .context/ knowledge files: that's the whole point.

            You should .gitignore the generated and sensitive paths:

            # Journal data (large, potentially sensitive)\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Hook logs (machine-specific)\n.context/logs/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

            ctx init Patches Your .Gitignore for You

            ctx init automatically adds these entries to your .gitignore.

            Review the additions with cat .gitignore after init.

            See also:

            • Security Considerations
            • Scratchpad Encryption
            • Session Journal

            Next Up: Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history.

            ","path":["Home","Get Started","Your First Session"],"tags":[]},{"location":"home/getting-started/","level":1,"title":"Getting Started","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"

            ctx does not require git, but using version control with your .context/ directory is strongly recommended:

            AI sessions occasionally modify or overwrite context files inadvertently. With git, the AI can check history and restore lost content: Without it, the data is gone.

            Also, several ctx features (journal changelog, blog generation) also use git history directly.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#installation","level":2,"title":"Installation","text":"

            Every setup starts with the ctx binary: the CLI tool itself.

            If you use Claude Code, you also install the ctx plugin, which adds hooks (context autoloading, persistence nudges) and 25+ /ctx-* skills. For other AI tools, ctx integrates via generated instruction files or manual context pasting: see Integrations for tool-specific setup.

            Pick one of the options below to install the binary. Claude Code users should also follow the plugin steps included in each option.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-1-build-from-source-recommended","level":3,"title":"Option 1: Build from Source (Recommended)","text":"

            Requires Go (version defined in go.mod) and Claude Code.

            git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\nmake build\nsudo make install\n

            Install the Claude Code plugin from your local clone:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace
            4. Enter the path to the root of your clone, e.g. ~/WORKSPACE/ctx (this is where .claude-plugin/marketplace.json lives: It points Claude Code to the actual plugin in internal/assets/claude)
            5. Back in /plugin, select Install and choose ctx

            This points Claude Code at the plugin source on disk. Changes you make to hooks or skills take effect immediately: No reinstall is needed.

            Local Installs Need Manual Enablement

            Unlike marketplace installs, local plugin installs are not auto-enabled globally. The plugin will only work in projects that explicitly enable it. Run ctx init in each project (it auto-enables the plugin), or add the entry to ~/.claude/settings.json manually:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Verify:

            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed\n

            Use the Source, Luke

            Building from source gives you the latest features and bug fixes.

            Since ctx is predominantly a developer tool, this is the recommended approach:

            You get the freshest code, can inspect what you are installing, and the plugin stays in sync with the binary.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#option-2-binary-download-marketplace","level":3,"title":"Option 2: Binary Download + Marketplace","text":"

            Pre-built binaries are available from the releases page.

            Linux (x86_64)Linux (ARM64)macOS (Apple Silicon)macOS (Intel)Windows
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64\nchmod +x ctx-0.8.1-linux-amd64\nsudo mv ctx-0.8.1-linux-amd64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-arm64\nchmod +x ctx-0.8.1-linux-arm64\nsudo mv ctx-0.8.1-linux-arm64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-arm64\nchmod +x ctx-0.8.1-darwin-arm64\nsudo mv ctx-0.8.1-darwin-arm64 /usr/local/bin/ctx\n
            curl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-darwin-amd64\nchmod +x ctx-0.8.1-darwin-amd64\nsudo mv ctx-0.8.1-darwin-amd64 /usr/local/bin/ctx\n

            Download ctx-0.8.1-windows-amd64.exe from the releases page and add it to your PATH.

            Claude Code users: install the plugin from the marketplace:

            1. Launch claude;
            2. Type /plugin and press Enter;
            3. Select Marketplaces → Add Marketplace;
            4. Enter ActiveMemory/ctx;
            5. Back in /plugin, select Install and choose ctx.

            Other tool users: see Integrations for tool-specific setup (Cursor, Copilot, Aider, Windsurf, etc.).

            Verify the Plugin Is Enabled

            After installing, confirm the plugin is enabled globally. Check ~/.claude/settings.json for an enabledPlugins entry. If missing, run ctx init in your project (it auto-enables the plugin), or add it manually:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Verify:

            ctx --version       # binary is in PATH\nclaude /plugin list # plugin is installed (Claude Code only)\n
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#verifying-checksums","level":4,"title":"Verifying Checksums","text":"

            Each binary has a corresponding .sha256 checksum file. To verify your download:

            # Download the checksum file\ncurl -LO https://github.com/ActiveMemory/ctx/releases/download/v0.8.1/ctx-0.8.1-linux-amd64.sha256\n\n# Verify the binary\nsha256sum -c ctx-0.8.1-linux-amd64.sha256\n

            On macOS, use shasum -a 256 -c instead of sha256sum -c.

            Plugin Details

            After installation (either option) you get:

            • Context autoloading: ctx agent runs on every tool use (with cooldown)
            • Persistence nudges: reminders to capture learnings and decisions
            • Post-commit hooks: nudge context capture after git commit
            • Context size monitoring: alerts as sessions grow large
            • Project skills: /ctx-status, /ctx-task-add, /ctx-history, and more

            See Integrations for the full hook and skill reference.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#quick-start","level":2,"title":"Quick Start","text":"","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#1-initialize-context","level":3,"title":"1. Initialize Context","text":"
            cd your-project\nctx init\n

            This creates a .context/ directory with template files and an encryption key at ~/.ctx/ for the encrypted scratchpad. For Claude Code, install the ctx plugin for automatic hooks and skills.

            ctx init also scaffolds four foundation steering files in .context/steering/; these are behavioral-rule templates that tell your AI how to act on your project:

            File What it captures product.md Product context, goals, and target users tech.md Technology stack, constraints, key dependencies structure.md Project structure and directory conventions workflow.md Development workflow and process rules

            Each file starts with a self-documenting HTML comment explaining the three inclusion modes (always / auto / manual), priority, and tool scoping. The defaults are set to inclusion: always and priority: 10, so they fire on every AI tool call until you edit them.

            You should open each of these files and replace the placeholder content with your project's actual rules. Running ctx init again won't clobber your edits; existing files are left alone. To opt out entirely, use ctx init --no-steering-init.

            See Writing Steering Files for the full walkthrough, or ctx steering for the command reference.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#2-check-status","level":3,"title":"2. Check Status","text":"
            ctx status\n

            Shows context summary: files present, token estimate, and recent activity.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3-start-using-with-ai","level":3,"title":"3. Start Using with AI","text":"

            With Claude Code (and the ctx plugin installed), context loads automatically via hooks.

            With VS Code Copilot Chat, install the ctx extension and use @ctx /status, @ctx /agent, and other slash commands directly in chat. Run ctx setup copilot --write to generate .github/copilot-instructions.md for automatic context loading.

            For other tools, paste the output of:

            ctx agent --budget 8000\n
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#3b-set-up-for-your-ai-tool","level":3,"title":"3B. Set Up for Your AI Tool","text":"

            If you use an MCP-compatible tool, generate the integration config with ctx setup:

            KiroCursorCline
            ctx setup kiro --write\n# Creates .kiro/settings/mcp.json and syncs steering files\n
            ctx setup cursor --write\n# Creates .cursor/mcp.json and syncs steering files\n
            ctx setup cline --write\n# Creates .vscode/mcp.json and syncs steering files\n

            This registers the ctx MCP server and syncs any steering files into the tool's native format. Re-run after adding or changing steering files.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#4-verify-it-works","level":3,"title":"4. Verify It Works","text":"

            Ask your AI: \"Do you remember?\"

            It should cite specific context: current tasks, recent decisions, or previous session topics.

            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/getting-started/#5-set-up-companion-tools-highly-recommended","level":3,"title":"5. Set Up Companion Tools (Highly Recommended)","text":"

            ctx works on its own, but two companion MCP servers unlock significantly better agent behavior. The investment is small and the benefits compound over sessions:

            • Gemini Search grounded web search with citations. Skills like /ctx-code-review and /ctx-explain use it for up-to-date documentation lookups instead of relying on training data.
            • GitNexus: code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Skills like /ctx-refactor and /ctx-code-review use it for impact analysis and dependency awareness.

            # Index your project for GitNexus (run once, then after major changes)\nnpx gitnexus analyze\n

            Both are optional MCP servers: if they are not connected, skills degrade gracefully to built-in capabilities. See Companion Tools for setup details and verification.

            Next Up:

            • Your First Session →: a step-by-step walkthrough from ctx init to verified recall
            • Common Workflows →: day-to-day commands for tracking context, checking health, and browsing history
            ","path":["Home","Get Started","Getting Started"],"tags":[]},{"location":"home/hub/","level":1,"title":"Hub","text":"","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#sharing-is-caring","level":2,"title":"Sharing Is Caring","text":"

            ctx projects are normally independent: each project has its own .context/ directory, its own decisions, its own learnings, its own journal. That's the right default, since most work is project-local, and mixing context across projects tends to dilute more than it helps.

            But sometimes a decision or a learning should cross project boundaries. A convention you codified in one project deserves to be visible in another. A gotcha you discovered debugging service A is the same gotcha waiting for you in service B. The ctx Hub is the feature that makes those specific entries travel, without replicating everything else.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#what-the-hub-actually-is","level":2,"title":"What the Hub Actually Is","text":"

            In one paragraph: the ctx Hub is a fan-out channel for four specific kinds of structured entries: decision, learning, convention, and task. You publish an entry with ctx add --share in one project, and it appears in .context/hub/ for every other project subscribed to that type. When you run ctx agent --include-hub, those shared entries become part of your next agent context packet.

            That is the entire feature. The Hub does not:

            • Share your session journal (.context/journal/). That stays local to each project.
            • Share your scratchpad (.context/pad). Encrypted notes never leave the machine that created them.
            • Share your TASKS.md, DECISIONS.md, LEARNINGS.md, or CONVENTIONS.md wholesale. Only entries you explicitly --share cross the boundary.
            • Provide user identity or attribution. The Hub identifies projects, not people.

            If you want \"my agent in project B sees everything my agent did in project A,\" that's not the Hub. Local session density stays local.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#who-its-for","level":2,"title":"Who It's For","text":"

            Two shapes, same mechanics, different trust models.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

            One developer, many projects. You want a learning from project A to show up when you open project B a week later. You want a convention you codified in your dotfiles project to be visible everywhere else on your workstation. Run a Hub on localhost, register each project, done.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#small-trusted-team","level":3,"title":"Small Trusted Team","text":"

            A few teammates on a LAN or a hub.ctx-like self-hosted server. You want team conventions to propagate without a wiki. You want lessons from one on-call engineer's 3 AM incident to reach everyone else's agent on the next session. Same mechanics as the personal case, plus TLS in front and a short security runbook.

            The Hub is not a multi-tenant public service. It assumes everyone holding a client token is friendly. Don't stand up hub.example.com for untrusted participants.

            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/hub/#going-further","level":2,"title":"Going Further","text":"
            • First-time setup: Hub: Getting Started, a five-minute walkthrough on localhost.
            • Mental model and user stories: Hub Overview, what flows, what doesn't, and when not to use it.
            • Team / LAN deployment: Multi-machine setup.
            • Redundancy: HA cluster.
            • Operating a Hub: Hub Operations and Hub Failure Modes.
            • Security posture: Hub Security Model.
            • Command reference: ctx serve, ctx connect, ctx hub.
            ","path":["Home","Concepts","Hub"],"tags":[]},{"location":"home/is-ctx-right/","level":1,"title":"Is It Right for Me?","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#good-fit","level":2,"title":"Good Fit","text":"

            ctx shines when context matters more than code.

            If any of these sound like your project, it's worth trying:

            • Multi-session AI work: You use AI across many sessions on the same codebase, and re-explaining is slowing you down.
            • Architectural decisions that matter: Your project has non-obvious choices (database, auth strategy, API design) that the AI keeps second-guessing.
            • \"Why\" matters as much as \"what\": you need the AI to understand rationale, not just current code
            • Team handoffs: Multiple people (or multiple AI tools) work on the same project and need shared context.
            • AI-assisted development across tools: Uou switch between Claude Code, Cursor, Copilot, or other tools and want context to follow the project, not the tool.
            • Long-lived projects: Anything you'll work on for weeks or months, where accumulated knowledge has compounding value.
            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#may-not-be-the-right-fit","level":2,"title":"May Not Be the Right Fit","text":"

            ctx adds overhead that isn't worth it for every project. Be honest about when to skip it:

            • One-off scripts: If the project is a single file you'll finish today, there's nothing to remember.
            • RAG-only workflows: If retrieval from an external knowledge base already gives the agent everything it needs for each session, adding ctx may be unnecessary. RAG retrieves information; ctx defines the project's working memory: They are complementary.
            • No AI involvement: ctx is designed for human-AI workflows; without an AI consumer, the files are just documentation.
            • Enterprise-managed context platforms: If your organization provides centralized context services, ctx may duplicate that layer.

            For a deeper technical comparison with RAG, prompt management tools, and agent frameworks, see ctx and Similar Tools.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#project-size-guide","level":2,"title":"Project Size Guide","text":"","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#solo-developer-single-repo","level":3,"title":"Solo Developer, Single Repo","text":"

            This is ctx's sweet spot.

            You get the most value here: one person, one project, decisions, and learnings accumulating over time. Setup takes 5 minutes and the .context/ directory directory stays small, and every session gets faster.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#small-team-one-or-two-repos","level":3,"title":"Small Team, One or Two Repos","text":"

            Works well.

            Context files commit to git, so the whole team shares the same decisions and conventions. Each person's AI starts with the team's decisions already loaded. Merge conflicts on .context/ files are rare and easy to resolve (they are just Markdown).

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#multiple-repos-or-larger-teams","level":3,"title":"Multiple Repos or Larger Teams","text":"

            ctx operates per repository.

            Each repo has its own .context/ directory with its own decisions, tasks, and learnings. This matches the way code, ownership, and history already work in git.

            There is no built-in cross-repo context layer.

            For organizations that need centralized, organization-wide knowledge, ctx complements a platform solution by providing durable, project-local working memory for AI sessions.

            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/is-ctx-right/#5-minute-trial","level":2,"title":"5-Minute Trial","text":"

            Zero commitment. Try it, and delete .context/ if it's not for you.

            Using Claude Code?

            Install the ctx plugin from the Marketplace for Claude-native hooks, skills, and automatic context loading:

            1. Type /plugin and press Enter
            2. Select Marketplaces → Add Marketplace
            3. Enter ActiveMemory/ctx
            4. Back in /plugin, select Install and choose ctx

            You'll still need the ctx binary for the CLI: See Getting Started for install options.

            # 1. Initialize\ncd your-project\nctx init\n\n# 2. Add one real decision from your project\nctx add decision \"Your actual architectural choice\" \\\n  --context \"What prompted this decision\" \\\n  --rationale \"Why you chose this approach\" \\\n  --consequence \"What changes as a result\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# 3. Check what the AI will see\nctx status\n\n# 4. Start an AI session and ask: \"Do you remember?\"\n

            If the AI cites your decision back to you, it's working.

            Want to remove it later? One command:

            rm -rf .context/\n

            No dependencies to uninstall. No configuration to revert. Just files.

            Ready to try it out?

            • Join the Community→: Open Source is better together.
            • Getting Started →: Full installation and setup.
            • ctx and Similar Tools →: Detailed comparison with other approaches.
            ","path":["Home","Introduction","Is It Right for Me?"],"tags":[]},{"location":"home/joining-a-project/","level":1,"title":"Joining a Project","text":"

            You've joined a team or inherited a project, and there's a .context/ directory in the repo. Good news: someone already set up persistent context. This page gets you oriented fast.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#what-to-read-first","level":2,"title":"What to Read First","text":"

            The files in .context/ have a deliberate priority order. Read them top-down:

            1. CONSTITUTION.md: Hard rules. Read this before you touch anything. These are inviolable constraints the team has agreed on.
            2. TASKS.md: Current and planned work. Shows what's in progress, what's pending, and what's blocked.
            3. CONVENTIONS.md: How the team writes code. Naming patterns, file organization, preferred idioms.
            4. ARCHITECTURE.md: System overview. Components, boundaries, data flow.
            5. DECISIONS.md: Why things are the way they are. Saves you from re-proposing something the team already evaluated and rejected.
            6. LEARNINGS.md: Gotchas, tips, and hard-won lessons. The stuff that doesn't fit anywhere else but will save you hours.

            See Context Files for detailed documentation of each file's structure and purpose.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#checking-context-health","level":2,"title":"Checking Context Health","text":"

            Before you start working, check whether the context is current:

            ctx status\n

            This shows file counts, token estimates, and recent activity. If files haven't been touched in weeks, the context may be stale.

            ctx drift\n

            This compares context files against recent code changes and flags potential drift: decisions that no longer match the codebase, conventions that have shifted, or tasks that look outdated.

            If things are stale, mention it to the team. Don't silently fix it yourself on day one.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#starting-your-first-session","level":2,"title":"Starting Your First Session","text":"

            Generate a context packet to prime your AI:

            ctx agent --budget 8000\n

            This outputs a token-budgeted summary of the project context, ordered by priority. With Claude Code and the ctx plugin, context loads automatically via hooks. You can also use the /ctx-remember skill to get a structured readback of what the AI knows.

            The readback is your verification step: if the AI can cite specific tasks and decisions, the context is working.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#adding-context","level":2,"title":"Adding Context","text":"

            As you work, you'll discover things worth recording. Use the CLI:

            # Record a decision you made or learned about\nctx add decision \"Use connection pooling for DB access\" \\\n  --rationale \"Reduces connection overhead under load\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Capture a gotcha you hit\nctx add learning \"Redis timeout defaults to 5s\" \\\n  --context \"Hit timeouts during bulk operations\" \\\n  --application \"Set explicit timeout for batch jobs\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add a convention you noticed the team follows\nctx add convention \"All API handlers return structured errors\"\n

            You can also just tell the AI: \"Record this as a learning\" or \"Add this decision to context.\" With the ctx plugin, context-update commands handle the file writes.

            See the Knowledge Capture recipe for the full workflow.

            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#session-etiquette","level":2,"title":"Session Etiquette","text":"

            A few norms for working in a ctx-managed project:

            • Respect existing conventions. If CONVENTIONS.md says \"use filepath.Join,\" use filepath.Join. If you disagree, propose a change, don't silently diverge.
            • Don't restructure context files without asking. The file layout and section structure are shared state. Reorganizing them affects every team member and every AI session.
            • Mark tasks done when complete. Check the box ([x]) in place. Don't move tasks between sections or delete them.
            • Add context as you go. Decisions, learnings, and conventions you discover are valuable to the next person (or the next session).
            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/joining-a-project/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

            Ignoring CONSTITUTION.md. The constitution exists for a reason. If a task conflicts with a constitution rule, the task is wrong. Raise it with the team instead of working around the constraint.

            Deleting tasks. Never delete a task from TASKS.md. Mark it [x] (done) or [-] (skipped with a reason). The history matters for session replay and audit.

            Bypassing hooks. If the project uses ctx hooks (pre-commit nudges, context autoloading), don't disable them. They exist to keep context fresh. If a hook is noisy or broken, fix it or file a task.

            Over-contributing on day one. Read first, then contribute. Adding a dozen learnings before you understand the project's norms creates noise, not signal.

            Related:

            • Getting Started: installation and setup from scratch
            • Context Files: detailed file reference
            • Knowledge Capture: recording decisions, learnings, and conventions
            • Session Lifecycle: how a typical AI session flows with ctx
            ","path":["Home","Working with AI","Joining a Project"],"tags":[]},{"location":"home/keeping-ai-honest/","level":1,"title":"Keeping AI Honest","text":"","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-problem","level":2,"title":"The Problem","text":"

            AI agents confabulate. They invent history that never happened, claim familiarity with decisions that were never made, and sometimes declare a task complete when it is not. This is not malice - it is the default behavior of a system optimizing for plausible-sounding responses.

            When your AI says \"we decided to use Redis for caching last week,\" can you verify that? When it says \"the auth module is complete,\" can you confirm it? Without grounded, persistent context, the answer is no. You are trusting vibes.

            ctx replaces vibes with verifiable artifacts.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#grounded-memory","level":2,"title":"Grounded Memory","text":"

            Every entry in ctx context files has a timestamp and structured fields. When the AI cites a decision, you can check it.

            ## [2026-01-28-143022] Use Event Sourcing for Audit Trail\n\n**Status**: Accepted\n\n**Context**: Compliance requires full mutation history.\n\n**Decision**: Event sourcing for the audit subsystem only.\n\n**Rationale**: Append-only log meets compliance requirements\nwithout imposing event sourcing on the entire domain model.\n

            The timestamp 2026-01-28-143022 is not decoration. It is a verifiable anchor. If the AI references this decision, you can open DECISIONS.md, find the entry, and confirm it says what the AI claims. If the entry does not exist, the AI is hallucinating - and you know immediately.

            This is grounded memory: claims that trace back to artifacts you control and can audit.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#constitutionmd-hard-guardrails","level":2,"title":"CONSTITUTION.md: Hard Guardrails","text":"

            CONSTITUTION.md defines rules the AI must treat as inviolable. These are not suggestions or best practices - they are constraints that override task requirements.

            # Constitution\n\nThese rules are INVIOLABLE. If a task requires violating these,\nthe task is wrong.\n\n* [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] All public API changes require a decision record\n* [ ] Never delete context files without explicit user approval\n

            The AI reads these at session start, before anything else. A well- integrated agent will refuse a task that conflicts with a constitutional rule, citing the specific rule it would violate.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-agent-playbooks-anti-hallucination-rules","level":2,"title":"The Agent Playbook's Anti-Hallucination Rules","text":"

            The AGENT_PLAYBOOK.md file includes a section called \"How to Avoid Hallucinating Memory\" with five explicit rules:

            1. Never assume. If it is not in the context files, you do not know it.
            2. Never invent history. Do not claim \"we discussed\" something without a file reference.
            3. Verify before referencing. Search files before citing them.
            4. When uncertain, say so. \"I don't see a decision on this\" is always better than a fabricated one.
            5. Trust files over intuition. If the files say PostgreSQL but your training data suggests MySQL, the files win.

            These rules create a behavioral contract. The AI is not left to guess how confident it should be - it has explicit instructions to ground every claim in the context directory.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#drift-detection","level":2,"title":"Drift Detection","text":"

            Context files can go stale. You rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist. Stale context is almost as dangerous as no context: the AI treats outdated information as current truth.

            ctx drift detects this divergence:

            ctx drift\n

            It scans context files for references to files, paths, and symbols that no longer exist in the codebase. Stale references get flagged so you can update or remove them before they mislead the next session.

            Regular drift checks - weekly, or after major refactors - keep your context files honest the same way tests keep your code honest.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#the-verification-loop","level":2,"title":"The Verification Loop","text":"

            The /ctx-commit skill includes a built-in verification step: before staging, it maps claims to evidence and runs self-audit questions to surface gaps. This catches inconsistencies at the point where they matter most: right before code is committed.

            This closes the loop. You write context. The AI reads context. The verification step confirms that context still matches reality. When it does not, you fix it - and the next session starts from truth, not from drift.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#trust-through-structure","level":2,"title":"Trust through Structure","text":"

            The common thread across all of these mechanisms is structure over prose. Timestamps make claims verifiable. Constitutional rules make boundaries explicit. Drift detection makes staleness visible. The playbook makes behavioral expectations concrete.

            You do not need to trust the AI. You need to trust the system -- and verify when it matters.

            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/keeping-ai-honest/#further-reading","level":2,"title":"Further Reading","text":"
            • Detecting and Fixing Drift: the full workflow for keeping context files accurate
            • Invariants: the properties that must hold for any valid ctx implementation
            • Agent Security: threat model and mitigations for AI agents operating with persistent context
            ","path":["Home","Working with AI","Keeping AI Honest"],"tags":[]},{"location":"home/prompting-guide/","level":1,"title":"Prompting Guide","text":"

            New to ctx?

            This guide references context files like TASKS.md, DECISIONS.md, and LEARNINGS.md:

            These are plain Markdown files that ctx maintains in your project's .context/ directory.

            If terms like \"context packet\" or \"session ceremony\" are unfamiliar,

            • start with the ctx Manifesto for the why,
            • About for the big picture,
            • then Getting Started to set up your first project.
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#literature-matters","level":2,"title":"Literature Matters","text":"

            This guide is about crafting effective prompts for working with AI assistants in ctx-enabled projects, but the guidelines given here apply to other AI systems, too.

            The right prompt triggers the right behavior.

            This guide documents prompts that reliably produce good results.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#tldr","level":2,"title":"TL;DR","text":"Goal Prompt Load context \"Do you remember?\" Resume work \"What's the current state?\" What's next /ctx-next Debug \"Why doesn't X work?\" Validate \"Is this consistent with our decisions?\" Impact analysis \"What would break if we...\" Reflect /ctx-reflect Wrap up /ctx-wrap-up Persist \"Add this as a learning\" Explore \"How does X work in this codebase?\" Sanity check \"Is this the right approach?\" Completeness \"What am I missing?\" One more thing \"What's the single smartest addition?\" Set tone \"Push back if my assumptions are wrong.\" Constrain scope \"Only change files in X. Nothing else.\" Course correct \"Stop. That's not what I meant.\" Check health \"Run ctx drift\" Commit /ctx-commit","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#session-start","level":2,"title":"Session Start","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#do-you-remember","level":3,"title":"\"do you remember?\"","text":"

            Triggers the AI to silently read TASKS.md, DECISIONS.md, LEARNINGS.md, and check recent history via ctx journal before responding with a structured readback:

            1. Last session: most recent session topic and date
            2. Active work: pending or in-progress tasks
            3. Recent context: 1-2 recent decisions or learnings
            4. Next step: offer to continue or ask what to focus on

            Use this at the start of every important session.

            Do you remember what we were working on?\n

            This question implies prior context exists. The AI checks files rather than admitting ignorance. The expected response cites specific context (session names, task counts, decisions), not vague summaries.

            If the AI instead narrates its discovery process (\"Let me check if there are files...\"), it has not loaded CLAUDE.md or AGENT_PLAYBOOK.md properly.

            For a detailed case study on making agents actually follow this protocol (including the failure modes, the timing problem, and the hook design that solved it) see The Dog Ate My Homework.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#whats-the-current-state","level":3,"title":"\"What's the Current State?\"","text":"

            Prompts reading of TASKS.md, recent sessions, and status overview.

            Use this when resuming work after a break.

            Variants:

            • \"Where did we leave off?\"
            • \"What's in progress?\"
            • \"Show me the open tasks.\"
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#during-work","level":2,"title":"During Work","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-doesnt-x-work","level":3,"title":"\"Why Doesn't X Work?\"","text":"

            This triggers root cause analysis rather than surface-level fixes.

            Use this when something fails unexpectedly.

            Framing as \"why\" encourages investigation before action. The AI will trace through code, check configurations, and identify the actual cause.

            Real Example

            \"Why can't I run /ctx-reflect?\" led to discovering missing permissions in settings.local.json bootstrapping.

            This was a fix that benefited all users of ctx.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-consistent-with-our-decisions","level":3,"title":"\"Is This Consistent with Our Decisions?\"","text":"

            This prompts checking DECISIONS.md before implementing.

            Use this before making architectural choices.

            Variants:

            • \"Check if we've decided on this before\"
            • \"Does this align with our conventions?\"
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-would-break-if-we","level":3,"title":"\"What Would Break If We...\"","text":"

            This triggers defensive thinking and impact analysis.

            Use this before making significant changes.

            What would break if we change the Settings struct?\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#before-you-start-read-x","level":3,"title":"\"Before You Start, Read X\"","text":"

            This ensures specific context is loaded before work begins.

            Use this when you know the relevant context exists in a specific file.

            Before you start, check ctx journal source for the auth discussion session\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-control","level":3,"title":"Scope Control","text":"

            Constrain the AI to prevent sprawl. These are some of the most useful prompts in day-to-day work.

            Only change files in internal/cli/add/. Nothing else.\n
            No new files. Modify the existing implementation.\n
            Keep the public API unchanged. Internal refactor only.\n

            Use these when the AI tends to \"helpfully\" modify adjacent code, add documentation you didn't ask for, or create new abstractions.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#course-correction","level":3,"title":"Course Correction","text":"

            Steer the AI when it goes off-track: Don't wait for it to finish a wrong approach.

            Stop! That's not what I meant. Let me clarify.\n
            Let's step back. Explain what you're about to do before changing anything.\n
            Undo that last change and try a different approach.\n

            These work because they interrupt momentum.

            Without explicit course correction, the AI tends to commit harder to a wrong path rather than reconsidering.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#failure-modes","level":3,"title":"Failure Modes","text":"

            When the AI misbehaves, match the symptom to the recovery prompt:

            Symptom Recovery prompt Hand-waves (\"should work now\") \"Show evidence: file/line refs, command output, or test name.\" Creates unnecessary files \"No new files. Modify the existing implementation.\" Expands scope unprompted \"Stop after the smallest working change. Ask before expanding scope.\" Narrates instead of acting \"Skip the explanation. Make the change and show the diff.\" Repeats a failed approach \"That didn't work last time. Try a different approach.\" Claims completion without proof \"Run the test. Show me the output.\"

            These are recovery handles, not rules to paste into CLAUDE.md.

            Use them in the moment when you see the behavior.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reflection-and-persistence","level":2,"title":"Reflection and Persistence","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-did-we-learn","level":3,"title":"\"What Did We Learn?\"","text":"

            This prompts reflection on the session and often triggers adding learnings to LEARNINGS.md.

            Use this after completing a task or debugging session.

            This is an explicit reflection prompt. The AI will summarize insights and often offer to persist them.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#add-this-as-a-learningdecision","level":3,"title":"\"Add This as a Learning/decision\"","text":"

            This is an explicit persistence request.

            Use this when you have discovered something worth remembering.

            Add this as a learning: \"JSON marshal escapes angle brackets by default\"\n\n# or simply.\nAdd this as a learning.\n# and let the AI autonomously infer and summarize.\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#save-context-before-we-end","level":3,"title":"\"Save Context Before We End\"","text":"

            This triggers context persistence before the session closes.

            Use it at the end of the session or before switching topics.

            Variants:

            • \"Let's persist what we did\"
            • \"Update the context files\"
            • /ctx-wrap-up:the recommended end-of-session ceremony (see Session Ceremonies)
            • /ctx-reflect: mid-session reflection checkpoint
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#exploration-and-research","level":2,"title":"Exploration and Research","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-the-codebase-for-x","level":3,"title":"\"Explore the Codebase for X\"","text":"

            This triggers thorough codebase search rather than guessing.

            Use this when you need to understand how something works.

            This works because \"Explore\" signals that investigation is needed, not immediate action.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#how-does-x-work-in-this-codebase","level":3,"title":"\"How Does X Work in This Codebase?\"","text":"

            This prompts reading actual code rather than explaining general concepts.

            Use this to understand the existing implementation.

            How does session saving work in this codebase?\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#find-all-places-where-x","level":3,"title":"\"Find All Places Where X\"","text":"

            This triggers a comprehensive search across the codebase.

            Use this before refactoring or understanding the impact.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#meta-and-process","level":2,"title":"Meta and Process","text":"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-should-we-document-from-this","level":3,"title":"\"What Should We Document from This?\"","text":"

            This prompts identifying learnings, decisions, and conventions worth persisting.

            Use this after complex discussions or implementations.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#is-this-the-right-approach","level":3,"title":"\"Is This the Right Approach?\"","text":"

            This invites the AI to challenge the current direction.

            Use this when you want a sanity check.

            This works because it allows AI to disagree.

            AIs often default to agreeing; this prompt signals you want an honest assessment.

            Stronger variant: \"Push back if my assumptions are wrong.\" This sets the tone for the entire session: The AI will flag questionable choices proactively instead of waiting to be asked.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#what-am-i-missing","level":3,"title":"\"What Am I Missing?\"","text":"

            This prompts thinking about edge cases, overlooked requirements, or unconsidered approaches.

            Use this before finalizing a design or implementation.

            Forward-looking variant: \"What's the single smartest addition you could make to this at this point?\" Use this after you think you're done: It surfaces improvements you wouldn't have thought to ask for. The constraint to one thing prevents feature sprawl.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#cli-commands-as-prompts","level":2,"title":"CLI Commands as Prompts","text":"

            Asking the AI to run ctx commands is itself a prompt. These load context or trigger specific behaviors:

            Command What it does \"Run ctx status\" Shows context summary, file presence, staleness \"Run ctx agent\" Loads token-budgeted context packet \"Run ctx drift\" Detects dead paths, stale files, missing context","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ctx-skills","level":3,"title":"ctx Skills","text":"

            The SKILS.md Standard

            Skills are formalized prompts stored as SKILL.md files.

            The /slash-command syntax below is Claude Code specific.

            Other agents can use the same skill files, but invocation may differ.

            Use ctx skills by name:

            Skill When to use /ctx-status Quick context summary /ctx-agent Load full context packet /ctx-remember Recall project context and structured readback /ctx-wrap-up End-of-session context persistence /ctx-history Browse session history for past discussions /ctx-reflect Structured reflection checkpoint /ctx-next Suggest what to work on next /ctx-commit Commit with context persistence /ctx-drift Detect and fix context drift /ctx-implement Execute a plan step-by-step with verification /ctx-loop Generate autonomous loop script /ctx-pad Manage encrypted scratchpad /ctx-archive Archive completed tasks /check-links Audit docs for dead links

            Ceremony vs. Workflow Skills

            Most skills work conversationally: \"what should we work on?\" triggers /ctx-next, \"save that as a learning\" triggers /ctx-learning-add. Natural language is the recommended approach.

            Two skills are the exception: /ctx-remember and /ctx-wrap-up are ceremony skills for session boundaries: Invoke them as explicit slash commands: conversational triggers risk partial execution. See Session Ceremonies.

            Skills combine a prompt, tool permissions, and domain knowledge into a single invocation.

            Skills beyond Claude Code

            The /slash-command syntax above is Claude Code native, but the underlying SKILL.md files are a standard markdown format that any agent can consume. If you use a different coding agent, consult its documentation for how to load skill files as prompt templates.

            See Integrations for setup details.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#anti-patterns","level":2,"title":"Anti-Patterns","text":"

            Based on our ctx development experience (i.e., \"sipping our own champagne\") so far, here are some prompts that tend to produce poor results:

            Prompt Problem Better Alternative \"Fix this\" Too vague, may patch symptoms \"Why is this failing?\" \"Make it work\" Encourages quick hacks \"What's the right way to solve this?\" \"Just do it\" Skips planning \"Plan this, then implement\" \"You should remember\" Confrontational \"Do you remember?\" \"Obviously...\" Discourages questions State the requirement directly \"Idiomatic X\" Triggers language priors \"Follow project conventions\" \"Implement everything\" No phasing, sprawl risk Break into tasks, implement one at a time \"You should know this\" Assumes context is loaded \"Before you start, read X\"","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#reliability-checklist","level":2,"title":"Reliability Checklist","text":"

            Before sending a non-trivial prompt, check these four elements. This is the guide's DNA in one screenful.

            1. Goal in one sentence: What does \"done\" look like?
            2. Files to read: What existing code or context should the AI review before acting?
            3. Verification command: How will you prove it worked? (test name, CLI command, expected output)
            4. Scope boundary: What should the AI not touch?

            A prompt that covers all four is almost always good enough.

            A prompt missing #3 is how you get \"should work now\" without evidence.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#safety-invariants","level":2,"title":"Safety Invariants","text":"

            These Are Invariants: Not Suggestions

            A prompting guide earns its trust by being honest about risk.

            These four rules mentioned below don't change with model versions, agent frameworks, or project size.

            Build them into your workflow once and stop thinking about them.

            Tool-using agents can read files, run commands, and modify your codebase. That power makes them useful. It also creates a trust boundary you should be aware of.

            These invariants apply regardless of which agent or model you use.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#treat-the-repository-text-as-untrusted-input","level":3,"title":"Treat the Repository Text as \"Untrusted Input\"","text":"

            Issue descriptions, PR comments, commit messages, documentation, and even code comments can contain text that looks like instructions. An agent that reads a GitHub issue and then runs a command found inside it is executing untrusted input.

            The rule: Before running any command the agent found in repo text (issues, docs, comments), restate the command explicitly and confirm it does what you expect. Don't let the agent copy-paste from untrusted sources into a shell.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#ask-before-destructive-operations","level":3,"title":"Ask Before Destructive Operations","text":"

            git push --force, rm -rf, DROP TABLE, docker system prune: these are irreversible or hard to reverse. A good agent should pause before running them, but don't rely on that.

            The rule: For any operation that deletes data, overwrites history, or affects shared infrastructure, require explicit confirmation. If the agent runs something destructive without asking, that's a course-correction moment: \"Stop. Never run destructive commands without asking first.\"

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#scope-the-blast-radius","level":3,"title":"Scope the Blast Radius","text":"

            An agent told to \"fix the tests\" might modify test fixtures, change assertions, or delete tests that inconveniently fail. An agent told to \"deploy\" might push to production. Broad mandates create broad risk.

            The rule: Constrain scope before starting work. The Reliability Checklist's scope boundary (#4) is your primary safety lever. When in doubt, err on the side of a tighter boundary.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#secrets-never-belong-in-context","level":3,"title":"Secrets Never Belong in Context","text":"

            LEARNINGS.md, DECISIONS.md, and session transcripts are plain-text files that may be committed to version control.

            Don't persist API keys, passwords, tokens, or credentials in context files.

            The rule: If the agent encounters a secret during work, it should use it transiently (environment variable, an alias to the secret instead of the actual secret, etc.) and never write it to a context file.

            Any Secret Seen IS Exposed

            If you see a secret in a context file, remove it immediately and rotate the credential.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#explore-plan-implement","level":2,"title":"Explore → Plan → Implement","text":"

            For non-trivial work, name the phase you want:

            Explore src/auth and summarize the current flow.\nThen propose a plan. After I approve, implement with tests.\n

            This prevents the AI from jumping straight to code.

            The three phases map to different modes of thinking:

            • Explore: read, search, understand: no changes
            • Plan: propose approach, trade-offs, scope: no changes
            • Implement: write code, run tests, verify: changes

            Small fixes skip straight to implement. Complex or uncertain work benefits from all three.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#prompts-by-task-type","level":2,"title":"Prompts by Task Type","text":"

            Different tasks need different prompt structures. The pattern: symptom + location + verification.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#bugfix","level":3,"title":"Bugfix","text":"
            Users report search returns empty results for queries with hyphens.\nReproduce in src/search/. Write a failing test for \"foo-bar\",\nfix the root cause, run: go test ./internal/search/...\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#refactor","level":3,"title":"Refactor","text":"
            Inspect src/auth/ and list duplication hotspots.\nPropose a refactor plan scoped to one module.\nAfter approval, remove duplication without changing behavior.\nAdd a test if coverage is missing. Run: make audit\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#research","level":3,"title":"Research","text":"
            Explore the request flow around src/api/.\nSummarize likely bottlenecks with evidence.\nPropose 2-3 hypotheses. Do not implement yet.\n
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#docs","level":3,"title":"Docs","text":"
            Update docs/cli-reference.md to reflect the new --format flag.\nConfirm the flag exists in the code and the example works.\n

            Notice each prompt includes what to verify and how. Without that, you get a \"should work now\" instead of evidence.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#writing-tasks-as-prompts","level":2,"title":"Writing Tasks as Prompts","text":"

            Tasks in TASKS.md are indirect prompts to the AI. How you write them shapes how the AI approaches the work.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-motivation-not-just-the-goal","level":3,"title":"State the Motivation, Not Just the Goal","text":"

            Tell the AI why you are building something, not just what.

            Bad: \"Build a calendar view.\"

            Good: \"Build a calendar view. The motivation is that all notes and tasks we build later should be viewable here.\"

            The second version lets the AI anticipate downstream requirements:

            It will design the calendar's data model to be compatible with future features: Without you having to spell out every integration point. Motivation turns a one-off task into a directional task.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#state-the-deliverable-not-just-steps","level":3,"title":"State the Deliverable, Not Just Steps","text":"

            Bad task (implementation-focused):

            - [ ] T1.1.0: Parser system\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

            The AI may complete all subtasks but miss the actual goal. What does \"Parser system\" deliver to the user?

            Good task (deliverable-focused):

            - [ ] T1.1.0: Parser CLI command\n  **Deliverable**: `ctx journal source` command that shows parsed sessions\n  - [ ] Define data structures\n  - [ ] Implement line parser\n  - [ ] Implement session grouper\n

            Now the AI knows the subtasks serve a specific user-facing deliverable.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#use-acceptance-criteria","level":3,"title":"Use Acceptance Criteria","text":"

            For complex tasks, add explicit \"done when\" criteria:

            - [ ] T2.0: Authentication system\n  **Done when**:\n  - [ ] User can register with email\n  - [ ] User can log in and get a token\n  - [ ] Protected routes reject unauthenticated requests\n

            This prevents premature \"task complete\" when only the implementation details are done, but the feature doesn't actually work.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#subtasks-parent-task","level":3,"title":"Subtasks ≠ Parent Task","text":"

            Completing all subtasks does not mean the parent task is complete.

            The parent task describes what the user gets.

            Subtasks describe how to build it.

            Always re-read the parent task description before marking it complete. Verify the stated deliverable exists and works.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#why-do-these-approaches-work","level":2,"title":"Why Do These Approaches Work?","text":"

            The patterns in this guide aren't invented here: They are practitioner translations of well-established, peer-reviewed research, most of which predate the current AI (hype) wave.

            The underlying ideas come from decades of work in machine learning, cognitive science, and numerical optimization. For a concrete case study showing how these principles play out when an agent decides whether to follow instructions (attention competition, optimization toward least-resistance paths, and observable compliance as a design goal) see The Dog Ate My Homework.

            Phased work (\"Explore → Plan → Implement\") applies chain-of-thought reasoning: Decomposing a problem into sequential steps before acting. Forcing intermediate reasoning steps measurably improves output quality in language models, just as it does in human problem-solving. Wei et al., Chain-of-Thought Prompting Elicits Reasoning in Large Language Models (2022).

            Root-cause prompts (\"Why doesn't X work?\") use step-back abstraction: Retreating to a higher-level question before diving into specifics. This mirrors how experienced engineers debug: they ask \"what should happen?\" before asking \"what went wrong?\" Zheng et al., Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models (2023).

            Exploring alternatives (\"Propose 2-3 approaches\") leverages self-consistency: Generating multiple independent reasoning paths and selecting the most coherent result. The idea traces back to ensemble methods in ML: A committee of diverse solutions outperforms any single one. Wang et al., Self-Consistency Improves Chain of Thought Reasoning in Language Models (2022).

            Impact analysis (\"What would break if we...\") is a form of tree-structured exploration: Branching into multiple consequence paths before committing. This is the same principle behind game-tree search (minimax, MCTS) that has powered decision-making systems since the 1950s. Yao et al., Tree of Thoughts: Deliberate Problem Solving with Large Language Models (2023).

            Motivation prompting (\"Build X because Y\") works through goal conditioning: Providing the objective function alongside the task. In optimization terms, you are giving the gradient direction, not just the loss. The model can make locally coherent decisions that serve the global objective because it knows what \"better\" means.

            Scope constraints (\"Only change files in X\") apply constrained optimization: Bounding the search space to prevent divergence. This is the same principle behind regularization in ML: Without boundaries, powerful optimizers find solutions that technically satisfy the objective but are practically useless.

            CLI commands as prompts (\"Run ctx status\") interleave reasoning with acting: The model thinks, acts on external tools, observes results, then thinks again. Grounding reasoning in real tool output reduces hallucination because the model can't ignore evidence it just retrieved. Yao et al., ReAct: Synergizing Reasoning and Acting in Language Models (2022).

            Task decomposition (\"Prompts by Task Type\") applies least-to-most prompting: Breaking a complex problem into subproblems and solving them sequentially, each building on the last. This is the research version of \"plan, then implement one slice.\" Zhou et al., Least-to-Most Prompting Enables Complex Reasoning in Large Language Models (2022).

            Explicit planning (\"Explore → Plan → Implement\") is directly supported by plan-and-solve prompting, which addresses missing-step failures in zero-shot reasoning by extracting a plan before executing. The phased structure prevents the model from jumping to code before understanding the problem. Wang et al., Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models (2023).

            Session reflection (\"What did we learn?\", /ctx-reflect) is a form of verbal reinforcement learning: Improving future performance by persisting linguistic feedback as memory rather than updating weights. This is exactly what LEARNINGS.md and DECISIONS.md provide: a durable feedback signal across sessions. Shinn et al., Reflexion: Language Agents with Verbal Reinforcement Learning (2023).

            These aren't prompting \"hacks\" that you will find in the \"1000 AI Prompts for the Curious\" listicles: They are applications of foundational principles:

            • Decomposition,
            • Abstraction,
            • Ensemble Reasoning,
            • Search,
            • and Constrained Optimization.

            They work because language models are, at their core, optimization systems navigating probabilistic landscapes.

            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#further-reading","level":2,"title":"Further Reading","text":"
            • The Attention Budget: Why your AI forgets what you just told it, and how token budgets shape context strategy
            • The Dog Ate My Homework: A case study in making agents follow instructions: attention timing, delegation decay, and observable compliance as a design goal
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/prompting-guide/#contributing","level":2,"title":"Contributing","text":"

            Found a prompt that works well? Open an issue or PR with:

            1. The prompt text;
            2. What behavior it triggers;
            3. When to use it;
            4. Why it works (optional but helpful).

            Dive Deeper:

            • Recipes: targeted how-to guides for specific tasks
            • CLI Reference: all commands and flags
            • Integrations: setup for Claude Code, Cursor, Aider
            ","path":["Home","Working with AI","Prompting Guide"],"tags":[]},{"location":"home/repeated-mistakes/","level":1,"title":"My AI Keeps Making the Same Mistakes","text":"","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-problem","level":2,"title":"The Problem","text":"

            You found a bug last Tuesday. You debugged it, understood the root cause, and moved on. Today, a new session hits the exact same bug. The AI rediscovers it from scratch, burning twenty minutes on something you already solved.

            Worse: you spent an hour last week evaluating two database migration strategies, picked one, documented why in a comment somewhere, and now the AI is cheerfully suggesting the approach you rejected. Again.

            This is not a model problem. It is a memory problem. Without persistent context, every session starts with amnesia.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#how-ctx-stops-the-loop","level":2,"title":"How ctx Stops the Loop","text":"

            ctx gives your AI three files that directly prevent repeated mistakes, each targeting a different failure mode.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#decisionsmd-stop-relitigating-settled-choices","level":3,"title":"DECISIONS.md: Stop Relitigating Settled Choices","text":"

            When you make an architectural decision, record it with rationale and rejected alternatives. The AI reads this at session start and treats it as settled.

            ## [2026-02-12] Use JWT for Authentication\n\n**Status**: Accepted\n\n**Context**: Need stateless auth for the API layer.\n\n**Decision**: JWT with short-lived access tokens and refresh rotation.\n\n**Rationale**: Stateless, scales horizontally, team has prior experience.\n\n**Alternatives Considered**:\n- Session-based auth: Rejected. Requires sticky sessions or shared store.\n- API keys only: Rejected. No user identity, no expiry rotation.\n

            Next session, when the AI considers auth, it reads this entry and builds on the decision instead of re-debating it. If someone asks \"why not sessions?\", the rationale is already there.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#learningsmd-capture-gotchas-once","level":3,"title":"LEARNINGS.md: Capture Gotchas Once","text":"

            Learnings are the bugs, quirks, and non-obvious behaviors that cost you time the first time around. Write them down so they cost you zero time the second time.

            ## Build\n\n### CGO Required for SQLite on Alpine\n\n**Discovered**: 2026-01-20\n\n**Context**: Docker build failed silently with \"no such table\" at runtime.\n\n**Lesson**: The go-sqlite3 driver requires CGO_ENABLED=1 and gcc\ninstalled in the build stage. Alpine needs apk add build-base.\n\n**Application**: Always use the golang:alpine image with build-base\nfor SQLite builds. Never set CGO_ENABLED=0.\n

            Without this entry, the next session that touches the Dockerfile will hit the same wall. With it, the AI knows before it starts.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#constitutionmd-draw-hard-lines","level":3,"title":"CONSTITUTION.md: Draw Hard Lines","text":"

            Some mistakes are not about forgetting - they are about boundaries the AI should never cross. CONSTITUTION.md sets inviolable rules.

            * [ ] Never commit secrets, tokens, API keys, or credentials\n* [ ] Never disable security linters without a documented exception\n* [ ] All database migrations must be reversible\n

            The AI reads these as absolute constraints. It does not weigh them against convenience. It refuses tasks that would violate them.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#the-accumulation-effect","level":2,"title":"The Accumulation Effect","text":"

            Each of these files grows over time. Session one captures two decisions. Session five adds a tricky learning about timezone handling. Session twelve records a convention about error message formatting.

            By session twenty, your AI has a knowledge base that no single person carries in their head. New team members - human or AI - inherit it instantly.

            The key insight: you are not just coding. You are building a knowledge layer that makes every future session faster.

            ctx files version with your code in git. They survive branch switches, team changes, and model upgrades. The context outlives any single session.

            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#getting-started","level":2,"title":"Getting Started","text":"

            Capture your first decision or learning right now:

            ctx add decision \"Use PostgreSQL\" \\\n  --context \"Need a relational database for the project\" \\\n  --rationale \"Team expertise, JSONB support, mature ecosystem\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\nctx add learning \"Vitest mock hoisting\" \\\n  --context \"Tests failing intermittently\" \\\n  --lesson \"vi.mock() must be at file top level\" \\\n  --application \"Use vi.doMock() for dynamic mocks\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/repeated-mistakes/#further-reading","level":2,"title":"Further Reading","text":"
            • Knowledge Capture: the full workflow for persisting decisions, learnings, and conventions
            • Context Files Reference: structure and format for every file in .context/
            • About ctx: the bigger picture - why persistent context changes how you work with AI
            ","path":["Home","Working with AI","My AI Keeps Making the Same Mistakes"],"tags":[]},{"location":"home/steering/","level":1,"title":"Steering Files","text":"","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#steering-files","level":2,"title":"Steering Files","text":"

            ctx projects talk to AI assistants through several layers (context files, decisions, conventions, the agent context packet) but none of those can tell the assistant how to behave when a specific kind of prompt arrives. That's what steering files are for.

            A steering file is a small markdown document with YAML frontmatter that says: \"when the user asks about X, prepend these rules to the prompt.\" ctx manages those files in .context/steering/, decides which ones match each prompt, and syncs them out to each AI tool's native config (Claude Code, Cursor, Kiro, Cline) so the rules actually land in the prompt pipeline.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#not-the-same-as-decisions-or-conventions","level":2,"title":"Not the Same as Decisions or Conventions","text":"

            The three look similar on disk but serve different purposes:

            Kind Purpose Decisions (DECISIONS.md) What was chosen and why Conventions (CONVENTIONS.md) How the codebase is written Steering (.context/steering/*.md) How the AI should behave on matching prompts

            If you find yourself writing \"the AI should always do X when asked about Y,\" that belongs in steering, not decisions.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#your-first-steering-files","level":2,"title":"Your First Steering Files","text":"

            ctx init scaffolds four foundation steering files in .context/steering/ so you start with something to edit rather than an empty directory:

            File What to fill in product.md What the project is, who it's for, what's out of scope tech.md Languages, frameworks, runtime, hard constraints structure.md Directory layout, where new files go, naming rules workflow.md Branch strategy, commit conventions, pre-commit checks

            Each file starts with an inline HTML comment explaining the three inclusion modes, priority semantics, and tool scoping. The comment is invisible in rendered markdown but visible when you open the file to edit it; it's self-documenting scaffolding, not forever guidance. Delete the comment once you've customized the file.

            Default settings for foundation files:

            • inclusion: always: fires on every AI tool call
            • priority: 10: injected near the top of the prompt
            • tools: []: applies to every configured AI tool

            You should open each of these files and replace the placeholder content with your project's actual rules. Re-running ctx init is safe: existing files are left alone, so your edits survive. Use ctx init --no-steering-init to opt out of the scaffold entirely.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#inclusion-modes","level":2,"title":"Inclusion Modes","text":"

            Each steering file declares an inclusion mode in its frontmatter:

            Mode When the file is included always Every prompt, unconditionally auto When the prompt keywords match the file's description manual Only when the user explicitly names the file

            Which mode to pick depends on the AI tool you use, because the two tool families consume steering very differently.

            Claude Code and Codex: prefer inclusion: always for rules that must fire reliably. These tools have two delivery channels:

            1. The plugin's PreToolUse hook runs ctx agent with an empty prompt, so only always files match and get injected automatically on every tool call.
            2. The ctx_steering_get MCP tool, registered automatically when the ctx plugin is installed. Claude can call this tool mid-task to fetch auto or manual files matching a specific prompt. Verify with claude mcp list; look for ctx: ✓ Connected.

            Use always for invariants and anything that must fire every session. Use auto for situational rules where \"Claude fetches this when the prompt is relevant\" is the right behavior; those still land, just on Claude's judgment. Use manual for reference libraries you'll name explicitly.

            Cursor, Cline, Kiro: auto is the natural default. These tools read .cursor/rules/, .clinerules/, or .kiro/steering/ natively and resolve the description match on their own, so auto files fire when the prompt matches. manual files load on explicit invocation. always still works but consumes context budget on every turn.

            Mixed setups: if a rule must fire on Claude Code, pick always, even if it's overkill for your Cursor setup. The context budget cost is small; the alternative (silently not firing) is worse.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-families-of-ai-tools-two-delivery-paths","level":2,"title":"Two Families of AI Tools, Two Delivery Paths","text":"

            Not every AI tool consumes steering the same way. ctx handles two tool families differently, and it's worth knowing which family your editor is in before you wonder why a rule isn't firing.

            Native-rules tools (Cursor, Cline, Kiro) have a built-in rules primitive. They read a specific directory (.cursor/rules/, .clinerules/, .kiro/steering/) and apply the rules they find there. ctx handles these via ctx steering sync, which exports your files into the tool-native format. Run sync whenever you edit a steering file.

            Hook + MCP tools (Claude Code, Codex) have no native rules primitive, so ctx steering sync is a no-op for them. Instead, ctx delivers steering through two non-sync channels:

            1. Automatic injection via a PreToolUse hook. The ctx setup claude-code plugin wires a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them by the active prompt, and includes matching bodies in the context packet it prints. Claude Code feeds that output back into its context. Every tool call, automatically.
            2. On-demand via the ctx_steering_get MCP tool. The ctx MCP server exposes a tool Claude can call mid-task to fetch matching steering files for a specific prompt. Claude decides when to call it; it's not automatic.

            Both channels activate when you run ctx setup claude-code --write. After that, steering just works for Claude Code.

            Practical takeaway:

            • Using Cursor/Cline/Kiro only? Run ctx steering sync after edits.
            • Using Claude Code or Codex only? Never run sync; the hook+MCP pipeline handles it.
            • Using both? Run sync for the native-rules tools; the hook+MCP pipeline covers Claude Code automatically.
            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#two-shapes-of-automation-rules-and-scripts","level":2,"title":"Two Shapes of Automation: Rules and Scripts","text":"

            Steering is one of two hook-like layers ctx provides for customizing AI behavior. They're complementary:

            • Steering: persistent rules that get prepended to prompts. Declarative, text-only, scored by match.
            • Triggers: executable shell scripts that fire at lifecycle events. Imperative, runs arbitrary code, gated by exit codes.

            Pick steering when you want \"always remind the AI of X.\" Pick triggers when you want \"do Y when event Z happens.\" They can coexist; many projects use both.

            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/steering/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Writing Steering Files: a six-step walkthrough: scaffold, write the rule, preview matches, list, get-rules-in-front-of-the-AI (two paths depending on tool family), verify.
            • ctx steering reference: full command, flag, and frontmatter reference; includes the per-tool delivery-mechanism table and a dedicated section on how Claude Code and Codex consume steering.
            • ctx setup: configure which AI tools receive steering. For Cursor/Cline/Kiro this is about sync targets; for Claude Code/Codex it installs the plugin that wires the PreToolUse hook and MCP server.
            • Lifecycle Triggers: the imperative companion to steering files.
            ","path":["Home","Customization","Steering Files"],"tags":[]},{"location":"home/triggers/","level":1,"title":"Lifecycle Triggers","text":"","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#lifecycle-triggers","level":2,"title":"Lifecycle Triggers","text":"

            Some things can't be expressed as a rule you want the AI to follow. Sometimes you want something to happen: block a dangerous tool call, inject today's standup notes into the next session, log every file save to a journal. That's what triggers are for.

            A trigger is an executable shell script that ctx runs at a specific lifecycle event: the start of a session, before a tool call, when a file is saved, and so on. Triggers read a JSON payload from stdin, do whatever they need, and write a JSON response on stdout. They can allow, block, or inject context into the pipeline depending on the event type.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#trigger-types","level":2,"title":"Trigger Types","text":"Type Fires when Use case session-start A new AI session begins Inject rotating context, standup notes session-end An AI session ends Persist summaries, send notifications pre-tool-use Before a tool call executes Block, gate, or audit post-tool-use After a tool call completes Log, react, post-process file-save A file is saved Lint on save, update indices context-add A new entry is added to .context/ Cross-link, notify, enrich","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-are-arbitrary-code-treat-them-like-pre-commit-hooks","level":2,"title":"Triggers Are Arbitrary Code: Treat Them like Pre-Commit Hooks","text":"

            Only Enable Scripts You've Read and Understand

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            ctx trigger add intentionally creates new scripts disabled (no executable bit). You must ctx trigger enable <name> after reviewing the contents. That's not a suggestion; it's the security model.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#three-hook-like-layers-in-ctx","level":2,"title":"Three Hook-like Layers in ctx","text":"

            Triggers are one of three distinct hook-like concepts in ctx. The names are similar but the owners and use cases are not:

            Layer Owned by Where they live When to use ctx trigger You .context/hooks/<type>/*.sh Project-specific automation, any AI tool ctx system hooks ctx itself built-in, wired into tool configs Built-in nudges (you don't author these) Claude Code hooks Claude Code .claude/settings.local.json Claude-Code-only tool-specific integration

            This page is about the first category. The other two run automatically and are invisible to you.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#triggers-vs-steering-same-problem-different-shape","level":2,"title":"Triggers vs Steering: Same Problem, Different Shape","text":"

            Triggers are the imperative counterpart to steering files. Steering expresses persistent rules the AI reads before each prompt; triggers express side effects that run on lifecycle events. They're complementary, not competing:

            • Want the AI to remember something? → Steering.
            • Want a script to run when something happens? → Trigger.

            Most projects use both.

            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"home/triggers/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Authoring Lifecycle Triggers: walkthrough with security guidance: scaffold, test, enable, iterate.
            • ctx trigger reference: command reference, trigger type table, input/output contract.
            • Steering files: the declarative counterpart to triggers.
            ","path":["Home","Customization","Lifecycle Triggers"],"tags":[]},{"location":"operations/","level":1,"title":"Operations","text":"

            Guides for installing, upgrading, integrating, and running ctx. Split into three groups by audience.

            ","path":["Operations"],"tags":[]},{"location":"operations/#day-to-day","level":2,"title":"Day-to-Day","text":"

            Everyday operation guides for anyone running ctx in a project or adopting it in a team.

            ","path":["Operations"],"tags":[]},{"location":"operations/#integration","level":3,"title":"Integration","text":"

            Adopt ctx in an existing project: initialize context files, migrate from other tools, and onboard team members.

            ","path":["Operations"],"tags":[]},{"location":"operations/#upgrade","level":3,"title":"Upgrade","text":"

            Upgrade between versions with step-by-step migration notes and breaking-change guidance.

            ","path":["Operations"],"tags":[]},{"location":"operations/#ai-tools","level":3,"title":"AI Tools","text":"

            Configure ctx with Claude Code, Cursor, Aider, Copilot, Windsurf, and other AI coding tools.

            ","path":["Operations"],"tags":[]},{"location":"operations/#autonomous-loops","level":3,"title":"Autonomous Loops","text":"

            Run an unattended AI agent that works through tasks overnight, with ctx providing persistent memory between iterations.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub","level":2,"title":"Hub","text":"

            Operator guides for running a ctx Hub, the gRPC server that fans out structured entries across projects. If you're a client connecting to a Hub someone else runs, see ctx connect and the Hub recipes instead.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub-operations","level":3,"title":"Hub Operations","text":"

            Data directory layout, daemon management, systemd unit, backup and restore, log rotation, monitoring, and upgrades.

            ","path":["Operations"],"tags":[]},{"location":"operations/#hub-failure-modes","level":3,"title":"Hub Failure Modes","text":"

            What can go wrong in network, storage, cluster, auth, and clock layers, and what you should do about each one. Includes the short-list table oncall engineers will want bookmarked.

            ","path":["Operations"],"tags":[]},{"location":"operations/#maintainers","level":2,"title":"Maintainers","text":"

            Runbooks for people shipping ctx itself.

            ","path":["Operations"],"tags":[]},{"location":"operations/#cutting-a-release","level":3,"title":"Cutting a Release","text":"

            Step-by-step runbook for maintainers: bump version, generate release notes, run the release script, and verify the result.

            ","path":["Operations"],"tags":[]},{"location":"operations/#runbooks","level":2,"title":"Runbooks","text":"

            Step-by-step procedures you run with your agent. Each runbook includes a prompt to paste into a Claude Code session and guidance on triaging the results.

            Runbook Purpose When to run Release checklist Full pre-release sequence Before every release Plugin release Plugin-specific release steps Plugin changes ship Breaking migration Guide users across breaking changes Releases with renames Hub deployment Set up a ctx Hub end-to-end First-time hub setup New contributor Onboarding: clone to first session New contributors Codebase audit AST audits, magic strings, dead code, doc alignment Before release, quarterly Docs semantic audit Narrative gaps, weak pages, structural problems Before release, after adding pages Sanitize permissions Clean .claude/settings.local.json of over-broad grants After heavy permission granting Architecture exploration Systematic architecture docs across repos New codebase onboarding, reviews

            Recommended cadence:

            • Before every release: release checklist (which includes codebase audit + docs semantic audit)
            • Monthly: sanitize permissions
            • Quarterly: full sweep of all audit runbooks
            ","path":["Operations"],"tags":[]},{"location":"operations/autonomous-loop/","level":1,"title":"Autonomous Loops","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#autonomous-ai-development","level":2,"title":"Autonomous AI Development","text":"

            Iterate until done.

            An autonomous loop is an iterative AI development workflow where an agent works on tasks until completion, without constant human intervention.

            ctx provides the memory that makes this possible:

            • ctx provides the memory: persistent context that survives across iterations
            • The loop provides the automation: continuous execution until done

            Together, they enable fully autonomous AI development where the agent remembers everything across iterations.

            Origin

            This pattern is inspired by Geoffrey Huntley's Ralph Wiggum technique.

            We use generic terminology here so the concepts remain clear regardless of trends.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#how-it-works","level":2,"title":"How It Works","text":"
            graph TD\n    A[Start Loop] --> B[Load .context/loop.md]\n    B --> C[AI reads .context/]\n    C --> D[AI picks task from TASKS.md]\n    D --> E[AI completes task]\n    E --> F[AI updates context files]\n    F --> G[AI commits changes]\n    G --> H{Check signals}\n    H -->|SYSTEM_CONVERGED| I[Done - all tasks complete]\n    H -->|SYSTEM_BLOCKED| J[Done - needs human input]\n    H -->|Continue| B
            1. Loop reads .context/loop.md and invokes AI
            2. AI loads context from .context/
            3. AI picks one task and completes it
            4. AI updates context files (mark task done, add learnings)
            5. AI commits changes
            6. Loop checks for completion signals
            7. Repeat until converged or blocked
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#quick-start-shell-while-loop-recommended","level":2,"title":"Quick Start: Shell While Loop (Recommended)","text":"

            The best way to run an autonomous loop is a plain shell script that invokes your AI tool in a fresh process on each iteration. This is \"pure ralph\":

            The only state that carries between iterations is what lives in .context/ and the git history. No context window bleed, no accumulated tokens, no hidden state.

            Create a loop.sh:

            #!/bin/bash\n# loop.sh: an autonomous iteration loop\n\nPROMPT_FILE=\"${1:-.context/loop.md}\"\nMAX_ITERATIONS=\"${2:-10}\"\nOUTPUT_FILE=\"/tmp/loop_output.txt\"\n\nfor i in $(seq 1 $MAX_ITERATIONS); do\n  echo \"=== Iteration $i ===\"\n\n  # Invoke AI with prompt\n  cat \"$PROMPT_FILE\" | claude --print > \"$OUTPUT_FILE\" 2>&1\n\n  # Display output\n  cat \"$OUTPUT_FILE\"\n\n  # Check for completion signals\n  if grep -q \"SYSTEM_CONVERGED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop complete: All tasks done\"\n    break\n  fi\n\n  if grep -q \"SYSTEM_BLOCKED\" \"$OUTPUT_FILE\"; then\n    echo \"Loop blocked: Needs human input\"\n    break\n  fi\n\n  sleep 2\ndone\n

            Make it executable and run:

            chmod +x loop.sh\n./loop.sh\n

            You can also generate this script with ctx loop (see CLI Reference).

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-do-we-use-a-shell-loop","level":3,"title":"Why Do We Use a Shell Loop?","text":"

            Each iteration starts a fresh AI process with zero context window history. The agent knows only what it reads from .context/ files: Exactly the information you chose to persist.

            This is the core loop principle: memory is explicit, not accidental.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#alternative-claude-codes-built-in-loop","level":2,"title":"Alternative: Claude Code's Built-in Loop","text":"

            Claude Code has built-in loop support:

            # Start autonomous loop\n/loop\n\n# Cancel running loop\n/cancel-loop\n

            This is convenient for quick iterations, but be aware of important caveats:

            This Loop Is Not Pure

            Claude Code's /loop runs all iterations within the same session. This means:

            • State leaks between iterations: The context window accumulates output from every previous iteration. The agent \"remembers\" things it saw earlier (even if they were never persisted to .context/).
            • Token budget degrades: Each iteration adds to the context window, leaving less room for actual work in later iterations.
            • Not ergonomic for long runs: Users report that the built-in loop is less predictable for 10+ iteration runs compared to a shell loop.

            For short explorations (2-5 iterations) or interactive use, /loop works fine. For overnight unattended runs or anything where iteration independence matters, use the shell while loop instead.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#the-contextloopmd-file","level":2,"title":"The .context/loop.md File","text":"

            The prompt file instructs the AI on how to work autonomously. Here's a template:

            # Autonomous Development Prompt\n\nYou are working on this project autonomously. Follow these steps:\n\n## 1. Load Context\n\nRead these files in order:\n\n1. `.context/CONSTITUTION.md`: NEVER violate these rules\n2. `.context/TASKS.md`: Find work to do\n3. `.context/CONVENTIONS.md`: Follow these patterns\n4. `.context/DECISIONS.md`: Understand past choices\n\n## 2. Pick One Task\n\nFrom `.context/TASKS.md`, select ONE task that is:\n\n- Not blocked\n- Highest priority available\n- Within your capabilities\n\n## 3. Complete the Task\n\n- Write code following conventions\n- Run tests if applicable\n- Keep changes focused and minimal\n\n## 4. Update Context\n\nAfter completing work:\n\n- Mark task complete in `TASKS.md`\n- Add any learnings to `LEARNINGS.md`\n- Add any decisions to `DECISIONS.md`\n\n## 5. Commit Changes\n\nCreate a focused commit with clear message.\n\n## 6. Signal Status\n\nEnd your response with exactly ONE of:\n\n- `SYSTEM_CONVERGED`: All tasks in TASKS.md are complete\n- `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n- (no signal): More work remains, continue to next iteration\n\n## Rules\n\n- ONE task per iteration\n- NEVER skip tests\n- NEVER violate CONSTITUTION.md\n- Commit after each task\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#completion-signals","level":2,"title":"Completion Signals","text":"

            The loop watches for these signals in AI output:

            Signal Meaning When to Use SYSTEM_CONVERGED All tasks complete No pending tasks in TASKS.md SYSTEM_BLOCKED Cannot proceed Needs clarification, access, or decision BOOTSTRAP_COMPLETE Initial setup done Project scaffolding finished","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-usage","level":3,"title":"Example Usage","text":"

            converged state

            I've completed all tasks in TASKS.md:\n- [x] Set up project structure\n- [x] Implement core API\n- [x] Add authentication\n- [x] Write tests\n\nNo pending tasks remain.\n\nSYSTEM_CONVERGED\n

            blocked state

            I cannot proceed with the \"Deploy to production\" task because:\n- Missing AWS credentials\n- Need confirmation on region selection\n\nPlease provide credentials and confirm deployment region.\n\nSYSTEM_BLOCKED\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#why-ctx-and-loops-work-well-together","level":2,"title":"Why ctx and Loops Work Well Together","text":"Without ctx With ctx Each iteration starts fresh Each iteration has full history Decisions get re-made Decisions persist in DECISIONS.md Learnings are lost Learnings accumulate in LEARNINGS.md Tasks can be forgotten Tasks tracked in TASKS.md","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#automatic-context-updates","level":3,"title":"Automatic Context Updates","text":"

            During the loop, the AI should update context files:

            Mark task complete:

            ctx task complete \"implement user auth\"\n

            Or emit an update command (parsed by ctx watch):

            <context-update type=\"complete\">user auth</context-update>\n

            Add learning:

            ctx add learning \"Rate limiting requires Redis connection\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            Or via update command:

            <context-update type=\"learning\"\n  context=\"Implementing rate limiter\"\n  lesson=\"Rate limiting requires Redis connection\"\n  application=\"Ensure Redis is provisioned before enabling rate limits\"\n>Rate Limiting Redis Dependency</context-update>\n

            Record decision:

            ctx add decision \"Use JWT tokens for API authentication\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#advanced-watch-mode","level":2,"title":"Advanced: Watch Mode","text":"

            Run ctx watch alongside the loop to automatically process context updates:

            # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

            The watch command processes context updates from the loop output in real time.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#project-setup","level":2,"title":"Project Setup","text":"

            Initialize a project for autonomous loop operation:

            ctx init\n

            The loop prompt template is deployed to .context/loop.md during initialization. It instructs the agent to:

            • Work autonomously without asking clarifying questions;
            • Follow one-task-per-iteration discipline;
            • Use SYSTEM_CONVERGED / SYSTEM_BLOCKED signals;
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#example-project-structure","level":2,"title":"Example Project Structure","text":"
            my-project/\n├── .context/\n│   ├── CONSTITUTION.md\n│   ├── TASKS.md          # Work items for the loop\n│   ├── DECISIONS.md\n│   ├── LEARNINGS.md\n│   ├── CONVENTIONS.md\n│   └── sessions/         # Loop iteration history\n├── loop.sh               # Loop script (if not using Claude Code)\n└── src/                  # Your code\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#sample-tasksmd-for-autonomous-loops","level":3,"title":"Sample TASKS.md for Autonomous Loops","text":"
            # Tasks\n\n## Phase 1: Setup\n\n- [x] Initialize project structure\n- [x] Set up testing framework\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Polish\n\n- [ ] Add rate limiting `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n

            The loop will work through these systematically, marking each complete.

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#loop-runs-forever","level":3,"title":"Loop Runs Forever","text":"

            Cause: AI not emitting completion signals

            Fix: Ensure .context/loop.md explicitly instructs signaling:

            End EVERY response with one of:\n- SYSTEM_CONVERGED (if all tasks done)\n- SYSTEM_BLOCKED (if stuck)\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#context-not-persisting","level":3,"title":"Context Not Persisting","text":"

            Cause: AI not updating context files

            Fix: Add explicit instructions to .context/loop.md:

            After completing a task, you MUST:\n1. Run: ctx task complete \"<task>\"\n2. Add learnings: ctx add learning \"...\" --session-id abc12345 --branch main --commit 68fbc00a\n

            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#tasks-getting-repeated","level":3,"title":"Tasks Getting Repeated","text":"

            Cause: Task not marked complete before next iteration

            Fix: Ensure commit happens after context update:

            Order of operations:\n1. Complete coding work\n2. Update context files (*`ctx task complete`, `ctx add`*)\n3. Commit **ALL** changes including `.context/`\n4. Then signal status\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#ai-violating-constitution","level":3,"title":"AI Violating Constitution","text":"

            Cause: Constitution not read first

            Fix: Make constitution check explicit in .context/loop.md:

            BEFORE any work:\n1. Read .context/CONSTITUTION.md\n2. If task would violate ANY rule, emit SYSTEM_BLOCKED\n3. Explain which rule prevents the work\n
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#further-reading","level":2,"title":"Further Reading","text":"
            • Building ctx Using ctx: The dogfooding story: how autonomous loops built the tool that powers them
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/autonomous-loop/#resources","level":2,"title":"Resources","text":"
            • Geoffrey Huntley's Ralph Wiggum Technique: The original inspiration
            • Context CLI: Command reference
            • Integrations: Tool-specific setup
            ","path":["Operations","Day-to-Day","Autonomous Loops"],"tags":[]},{"location":"operations/hub-failure-modes/","level":1,"title":"Hub Failure Modes","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#ctx-hub-failure-modes","level":1,"title":"ctx Hub: Failure Modes","text":"

            What can go wrong, what the system does about it, and what you should do. Complementary to ctx Hub Operations.

            Design Posture

            The hub is best-effort knowledge sharing, not a durable ledger. Local .context/ files are the source of truth for each project; the hub is a fan-out channel. This framing informs every failure-mode decision below.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#network","level":2,"title":"Network","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#client-loses-connection-mid-stream","level":3,"title":"Client Loses Connection Mid-Stream","text":"

            What happens: ctx connection listen detects the EOF, waits with exponential backoff, and reconnects. On reconnect it passes its last-seen sequence; the hub replays everything newer.

            What you should do: nothing. If reconnects are looping, check firewall state on the hub and ctx hub status output.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-majority-side-reachable","level":3,"title":"Partition: Majority Side Reachable","text":"

            What happens: clients routed to the majority side continue to publish and listen. The minority nodes step down to followers that cannot accept writes (Raft quorum lost).

            What you should do: let it heal. When the partition closes, followers catch up via sequence-based sync automatically.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#partition-split-brain-no-quorum","level":3,"title":"Partition: Split Brain (No Quorum)","text":"

            What happens: no node holds a majority, so no leader is elected. All nodes become read-only. ctx connection publish and ctx add --share fail with a \"no leader\" error; local writes still succeed.

            What you should do: fix the network. If the partition is permanent (e.g., a data center is gone), bootstrap a new cluster from the survivors with ctx hub peer remove for the dead nodes.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#hub-unreachable-during-ctx-add-share","level":3,"title":"Hub Unreachable during ctx add --share","text":"

            What happens: the local write succeeds; the share step prints a warning and exits non-zero on the share leg only. --share is best-effort; it never blocks local context updates.

            What you should do: run ctx connection publish later to backfill, or rely on another --share for the same entry ID. The hub deduplicates by entry ID.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#storage","level":2,"title":"Storage","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#disk-full-on-the-leader","level":3,"title":"Disk Full on the Leader","text":"

            What happens: entries.jsonl append fails. The hub rejects writes with an error and stays up for read traffic. Clients retry; followers keep their in-sync status using whatever the leader already wrote.

            What you should do: free disk or grow the volume, then nothing else; the hub resumes accepting writes on the next append attempt.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#corrupt-entriesjsonl","level":3,"title":"Corrupt entries.jsonl","text":"

            What happens: if the last line is a partial JSON write from a crash, the hub truncates it on startup and logs a warning. If any earlier line is malformed, the hub refuses to start.

            What you should do: inspect with jq -c . <data-dir>/entries.jsonl > /dev/null to find the bad line. Move the bad region to a .quarantine file, then start. Nothing is ever silently dropped.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#metajson-entriesjsonl-sequence-mismatch","level":3,"title":"meta.json / entries.jsonl Sequence Mismatch","text":"

            What happens: the hub refuses to start. This usually means someone copied one file without the other.

            What you should do: restore both files from the same backup, or accept the higher sequence by regenerating meta.json from entries.jsonl (manual for now; file a bug).

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#cluster","level":2,"title":"Cluster","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-clean-shutdown","level":3,"title":"Leader Crash, Clean Shutdown","text":"

            What happens: ctx hub stop triggers stepdown first, so a new leader is elected before the old one exits. In-flight writes drain. Clients reconnect to the new leader transparently.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#leader-crash-hard-fail-kill-9-power-loss","level":3,"title":"Leader Crash, Hard Fail (Kill -9, Power Loss)","text":"

            What happens: Raft detects the missing heartbeat and elects a new leader within a few seconds. Writes the old leader accepted but had not yet replicated can be lost. See the Raft-lite warning in the cluster recipe.

            What you should do: if you need stronger durability, run ctx connection listen on a dedicated \"collector\" project that persists entries locally as a write-ahead backup.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#split-brain-after-rejoin","level":3,"title":"Split-Brain After Rejoin","text":"

            What happens: Raft reconciles: the minority side's uncommitted writes are discarded, and the majority's log is authoritative.

            What you should do: nothing automatic. If you know the minority had important writes, grep for them in <data-dir>/entries.jsonl.rejected (written by the reconciliation pass) and replay them with ctx connection publish.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#auth-and-tokens","level":2,"title":"Auth and Tokens","text":"","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#lost-admin-token","level":3,"title":"Lost Admin Token","text":"

            What happens: you cannot register new projects.

            What you should do: retrieve it from <data-dir>/admin.token. If that file is also gone, stop the hub and regenerate. Note that all existing client tokens keep working; only new registrations need the admin token.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-admin-token","level":3,"title":"Compromised Admin Token","text":"

            What happens: anyone with the token can register new projects and publish. They cannot read existing entries without a client token for a project that subscribes.

            What you should do: rotate the admin token (regenerate <data-dir>/admin.token and restart), revoke suspicious client registrations via clients.json, and audit entries.jsonl for unexpected origins.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-client-token","level":3,"title":"Compromised Client Token","text":"

            What happens: the attacker can publish as that project and read anything that project is subscribed to. Because Origin is self-asserted on publish, the attacker can also publish entries tagged with any other project's name, so attribution in entries.jsonl cannot be trusted after a token compromise.

            What you should do: remove the client's entry from clients.json, restart the hub, and re-register the legitimate project with a fresh token. Audit entries.jsonl for entries published after the compromise timestamp and quarantine any that look suspicious; remember that Origin on those entries proves nothing.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#compromised-hub-host","level":3,"title":"Compromised Hub Host","text":"

            What happens: <data-dir>/clients.json stores client tokens verbatim (not hashed). Anyone with read access to that file has every client token in hand and can impersonate any registered project until each one is rotated.

            What you should do: treat it as a total hub compromise. Stop the hub, wipe <data-dir> (keep a forensic copy first), regenerate the admin token, and have every client re-register. See Security model for the mitigations that reduce the blast radius while the hashing follow-up is pending.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#clock-skew","level":2,"title":"Clock Skew","text":"

            Hub entries carry a timestamp assigned by the publishing client. The hub does not rewrite timestamps. Clients with significant clock skew will publish entries that look out of order in the shared feed.

            What you should do: run NTP on all client machines. If you see entries dated in the future or far past, the publisher's clock is the culprit.

            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#the-short-list","level":2,"title":"The Short List","text":"Symptom First thing to check Client can't reach hub Firewall, then ctx hub status \"No leader\" errors Cluster quorum; run ctx hub status on each peer Hub won't start after crash Last line of entries.jsonl Entries missing after restore Check clients.json sequence vs local .sync-state.json Duplicate entries in shared feed Client replayed after restore, safe (dedup by ID) Followers lagging Disk or network on the follower, not the leader","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub-failure-modes/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub Operations
            • ctx Hub security model
            • HA cluster recipe
            ","path":["Operations","Hub","Hub Failure Modes"],"tags":[]},{"location":"operations/hub/","level":1,"title":"Hub Operations","text":"","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#ctx-hub-operations","level":1,"title":"ctx Hub: Operations","text":"

            Running the ctx ctx Hub in production. This page is for operators: people running a hub for themselves or a team, not people writing to a hub someone else is running.

            If you have not read it yet, start with the ctx Hub overview. It explains what the hub is, the two user stories it supports (personal cross-project brain vs small trusted team), and what it does not do. A client-side tour is in Getting Started.

            Operator Cheat Sheet

            • The hub fans out four entry types only: decision, learning, convention, task. Journals, scratchpad, and other local state are out of scope.
            • Identity is per-project, not per-user. Attribution is limited to Origin, which is self-asserted by the publishing client.
            • The data model is an append-only JSONL log plus two small JSON sidecar files. Nothing is rewritten in place.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#data-directory-layout","level":2,"title":"Data Directory Layout","text":"

            The hub stores everything under a single data directory (default ~/.ctx/hub-data/, override with --data-dir).

            <data-dir>/\n  admin.token        # Initial admin token (chmod 600)\n  clients.json       # Registered client tokens and project names\n  meta.json          # Sequence counter, version, cluster metadata\n  entries.jsonl      # Append-only log (single source of truth)\n  hub.pid            # Daemon PID file (daemon mode only)\n  raft/              # Raft state (cluster mode only)\n    log.db\n    stable.db\n    snapshots/\n

            Invariants:

            • entries.jsonl is append-only. Every line is a valid JSON object. Corrupt lines are fatal at startup: fix or truncate before restart.
            • meta.json is authoritative for the next sequence number. On restart, the hub reads the last valid line of entries.jsonl and refuses to start if the sequences disagree.
            • clients.json holds hashed client tokens; losing it invalidates all client registrations.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#starting-and-stopping","level":2,"title":"Starting and Stopping","text":"ForegroundDaemon
            ctx hub start                    # Ctrl-C to stop\nctx hub start --port 8080        # Custom port\nctx hub start --data-dir /srv/ctx-hub\n
            ctx hub start --daemon           # Fork to background\nctx hub stop                      # Graceful shutdown\n

            --stop sends SIGTERM to the PID in hub.pid, waits for in-flight RPCs to drain, then exits. If the daemon is wedged, remove hub.pid and send SIGKILL manually. entries.jsonl is crash-safe, so you will not lose accepted writes.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#systemd-unit","level":2,"title":"Systemd Unit","text":"

            For production single-node deployments, run the hub as a systemd service instead of --daemon:

            # /etc/systemd/system/ctx-hub.service\n[Unit]\nDescription=ctx `ctx` Hub\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nType=simple\nUser=ctx\nGroup=ctx\nExecStart=/usr/local/bin/ctx hub start --port 9900 \\\n    --data-dir /var/lib/ctx-hub\nRestart=on-failure\nRestartSec=5\nNoNewPrivileges=true\nProtectSystem=strict\nProtectHome=true\nReadWritePaths=/var/lib/ctx-hub\nPrivateTmp=true\n\n[Install]\nWantedBy=multi-user.target\n
            sudo systemctl enable --now ctx-hub\nsudo journalctl -u ctx-hub -f\n
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#backup-and-restore","level":2,"title":"Backup and Restore","text":"

            Because entries.jsonl is append-only, backups are trivial:

            # Hot backup, safe while the hub is running.\ncp <data-dir>/entries.jsonl backups/entries-$(date +%F).jsonl\ncp <data-dir>/meta.json      backups/meta-$(date +%F).json\ncp <data-dir>/clients.json   backups/clients-$(date +%F).json\n

            For a consistent snapshot across all three files, stop the hub, copy, then start again, or use a filesystem-level snapshot (LVM, ZFS, Btrfs).

            Restore:

            ctx hub stop                           # Stop the hub\ncp backups/entries-2026-04-10.jsonl <data-dir>/entries.jsonl\ncp backups/meta-2026-04-10.json      <data-dir>/meta.json\ncp backups/clients-2026-04-10.json   <data-dir>/clients.json\nctx hub start --daemon\n

            Clients that pushed sequences above the restored watermark will re-publish on the next listen reconnect, because the hub now reports a lower sequence than what clients have on disk. This is safe; the store deduplicates by entry ID.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#log-rotation","level":2,"title":"Log Rotation","text":"

            entries.jsonl grows unbounded. For long-lived hubs, rotate it offline:

            ctx hub stop\nmv <data-dir>/entries.jsonl <data-dir>/entries-$(date +%F).jsonl.old\n# Replay the last N days into a fresh entries.jsonl if you want a\n# trimmed active log, or leave the old file in place as history.\nctx hub start --daemon\n

            Do not truncate entries.jsonl while the hub is running. The hub holds an open file handle; an in-place truncation confuses the sequence counter and loses writes.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#monitoring","level":2,"title":"Monitoring","text":"

            Liveness probe:

            ctx hub status --exit-code\n

            Exit code 0 means the node is healthy (leader or in-sync follower); non-zero means degraded. Wire this into your monitoring of choice.

            For cluster deployments, watch for:

            • Role flaps: the leader changing more than once per hour suggests network instability or disk contention.
            • Replication lag: ctx hub status shows per-peer sequence offsets. Sustained lag > 100 sequences on a follower is worth investigating.
            • entries.jsonl growth rate: sudden spikes often indicate a misbehaving ctx connection listen reconnect loop.
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#upgrading","level":2,"title":"Upgrading","text":"

            The JSONL format is versioned in meta.json. ctx refuses to start against a newer store version than it understands; older store versions are upgraded in place at first start after an upgrade.

            Always back up <data-dir>/ before upgrading.

            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/hub/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub failure modes
            • ctx Hub security model
            • ctx serve reference
            • ctx hub reference
            ","path":["Operations","Hub","Hub Operations"],"tags":[]},{"location":"operations/integrations/","level":1,"title":"AI Tools","text":"","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#ai-tools","level":2,"title":"AI Tools","text":"

            Context works with any AI tool that can read files. This guide covers setup for popular AI coding assistants.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#claude-code-full-integration","level":2,"title":"Claude Code (Full Integration)","text":"

            Claude Code has the deepest integration via the ctx plugin.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup","level":3,"title":"Setup","text":"

            First, install ctx and initialize your project:

            ctx init\n

            Then, install the ctx plugin in Claude Code:

            # From the ctx repository\nclaude /plugin install ./internal/assets/claude\n\n# Or from the marketplace\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

            Ensure the Plugin Is Enabled

            Installing a plugin registers it, but local installs may not auto-enable it globally. Verify ~/.claude/settings.json contains:

            { \"enabledPlugins\": { \"ctx@activememory-ctx\": true } }\n

            Without this, the plugin's hooks and skills won't appear in other projects. Running ctx init auto-enables the plugin; use --no-plugin-enable to skip this step.

            This gives you:

            Component Purpose .context/ All context files CLAUDE.md Bootstrap instructions Plugin hooks Lifecycle automation Plugin skills Agent Skills","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#how-it-works","level":3,"title":"How It Works","text":"
            graph TD\n    A[Session Start] --> B[Claude reads CLAUDE.md]\n    B --> C[PreToolUse hook runs]\n    C --> D[ctx agent loads context]\n    D --> E[Work happens]\n    E --> F[Session End]
            1. Session start: Claude reads CLAUDE.md, which tells it to check .context/
            2. First tool use: PreToolUse hook runs ctx agent and emits the context packet (subsequent invocations within the cooldown window are silent)
            3. Next session: Claude reads context files and continues with context
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#plugin-hooks","level":3,"title":"Plugin Hooks","text":"

            The ctx plugin provides lifecycle hooks implemented as Go subcommands (ctx system *):

            Hook Event Purpose ctx system context-load-gate PreToolUse (.*) Auto-inject context on first tool use ctx system block-non-path-ctx PreToolUse (Bash) Block ./ctx or go run: force $PATH install ctx system qa-reminder PreToolUse (Bash) Remind agent to lint/test before committing ctx system specs-nudge PreToolUse (EnterPlanMode) Nudge agent to use project specs when planning ctx system check-context-size UserPromptSubmit Nudge context assessment as sessions grow ctx system check-ceremonies UserPromptSubmit Nudge /ctx-remember and /ctx-wrap-up adoption ctx system check-persistence UserPromptSubmit Remind to persist learnings/decisions ctx system check-journal UserPromptSubmit Remind to export/enrich journal entries ctx system check-reminders UserPromptSubmit Relay pending reminders at session start ctx system check-version UserPromptSubmit Warn when binary/plugin versions diverge ctx system check-resources UserPromptSubmit Warn when memory/swap/disk/load hit DANGER level ctx system check-knowledge UserPromptSubmit Nudge when knowledge files grow large ctx system check-map-staleness UserPromptSubmit Nudge when ARCHITECTURE.md is stale ctx system heartbeat UserPromptSubmit Session-alive signal with prompt count metadata ctx system post-commit PostToolUse (Bash) Nudge context capture and QA after git commits

            A catch-all PreToolUse hook also runs ctx agent on every tool use (with cooldown) to autoload context.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#hook-configuration","level":3,"title":"Hook Configuration","text":"

            The plugin's hooks.json wires everything automatically: no manual configuration in settings.local.json needed:

            {\n  \"hooks\": {\n    \"PreToolUse\": [\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system context-load-gate\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system block-non-path-ctx\" }\n        ]\n      },\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system qa-reminder\" }\n        ]\n      },\n      {\n        \"matcher\": \"EnterPlanMode\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system specs-nudge\" }\n        ]\n      },\n      {\n        \"matcher\": \".*\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx agent --budget 4000 2>/dev/null || true\" }\n        ]\n      }\n    ],\n    \"PostToolUse\": [\n      {\n        \"matcher\": \"Bash\",\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system post-commit\" }\n        ]\n      }\n    ],\n    \"UserPromptSubmit\": [\n      {\n        \"hooks\": [\n          { \"type\": \"command\", \"command\": \"ctx system check-context-size\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-ceremonies\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-persistence\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-journal\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-reminders\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-version\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-resources\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-knowledge\" },\n          { \"type\": \"command\", \"command\": \"ctx system check-map-staleness\" },\n          { \"type\": \"command\", \"command\": \"ctx system heartbeat\" }\n        ]\n      }\n    ]\n  }\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#customizing-token-budget-and-cooldown","level":3,"title":"Customizing Token Budget and Cooldown","text":"

            Edit the PreToolUse command to change the token budget or cooldown:

            \"command\": \"ctx agent --budget 8000 --session $PPID >/dev/null || true\"\n\"command\": \"ctx agent --budget 4000 --cooldown 5m --session $PPID >/dev/null || true\"\n

            The --session $PPID flag isolates the cooldown per session: $PPID resolves to the Claude Code process PID, so concurrent sessions don't interfere. The default cooldown is 10 minutes; use --cooldown 0 to disable it.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#verifying-setup","level":3,"title":"Verifying Setup","text":"
            1. Start a new Claude Code session;
            2. Ask: \"Do you remember?\"
            3. Claude should cite specific context:
              • Current tasks from .context/TASKS.md;
              • Recent decisions or learnings;
              • Recent session history from ctx journal.
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#local-plugin-development","level":3,"title":"Local Plugin Development","text":"

            When developing ctx locally (adding skills, hooks, or changing plugin behavior), Claude Code caches the plugin by version. You must bump the version in both files and update the marketplace for changes to take effect:

            1. Bump version in both:
            2. internal/assets/claude/.claude-plugin/plugin.json (plugin manifest), .claude-plugin/marketplace.json (marketplace listing*);

            3. Update the marketplace in Claude Code:

            4. Open the Plugins UI (/plugins or Esc menu),
            5. Go to Marketplaces tab,
            6. Select the activememory-ctx Marketplace,
            7. Choose Update marketplace;

            8. Start a new Claude Code session: skill changes aren't reflected in existing sessions.

            Both Version Files Must Match

            If you only bump plugin.json but not marketplace.json (or vice versa), Claude Code may not detect the update. Always bump both together.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#troubleshooting","level":3,"title":"Troubleshooting","text":"Issue Solution Context not loading Check ctx is in PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list New skill not visible Bump version in both plugin.json files, update marketplace","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-load","level":3,"title":"Manual Context Load","text":"

            If hooks aren't working, manually load context:

            # Get context packet\nctx agent --budget 4000\n\n# Or paste into conversation\ncat .context/TASKS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#agent-skills","level":3,"title":"Agent Skills","text":"

            The ctx plugin ships Agent Skills following the agentskills.io specification.

            These are invoked in Claude Code with /skill-name.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-lifecycle-skills","level":4,"title":"Session Lifecycle Skills","text":"Skill Description /ctx-remember Recall project context at session start (ceremony) /ctx-wrap-up End-of-session context persistence (ceremony) /ctx-status Show context summary (tasks, decisions, learnings) /ctx-agent Get AI-optimized context packet /ctx-next Suggest 1-3 concrete next actions from context /ctx-commit Commit with integrated context capture /ctx-reflect Review session and suggest what to persist /ctx-remind Manage session-scoped reminders /ctx-pause Pause context hooks for this session /ctx-resume Resume context hooks after a pause","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-persistence-skills","level":4,"title":"Context Persistence Skills","text":"Skill Description /ctx-task-add Add a task to TASKS.md /ctx-learning-add Add a learning to LEARNINGS.md /ctx-decision-add Add a decision with context/rationale/consequence /ctx-convention-add Add a coding convention to CONVENTIONS.md /ctx-archive Archive completed tasks","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#scratchpad-skills","level":4,"title":"Scratchpad Skills","text":"Skill Description /ctx-pad Manage encrypted scratchpad entries","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-history-skills","level":4,"title":"Session History Skills","text":"Skill Description /ctx-history Browse AI session history /ctx-journal-enrich Enrich a journal entry with frontmatter/tags /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#blogging-skills","level":4,"title":"Blogging Skills","text":"

            Blogging Is a Better Way of Creating Release Notes

            The blogging workflow can also double as generating release notes:

            AI reads your git commit history and creates a \"narrative\", which is essentially what a release note is for.

            Skill Description /ctx-blog Generate blog post from recent activity /ctx-blog-changelog Generate blog post from commit range with theme","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#auditing-health-skills","level":4,"title":"Auditing & Health Skills","text":"Skill Description /ctx-doctor Troubleshoot ctx behavior with structural health checks /ctx-drift Detect and fix context drift (structural + semantic) /ctx-consolidate Merge redundant learnings or decisions into denser entries /ctx-alignment-audit Audit doc claims against playbook instructions /ctx-prompt-audit Analyze session logs for vague prompts /check-links Audit docs for dead internal and external links","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#planning-execution-skills","level":4,"title":"Planning & Execution Skills","text":"Skill Description /ctx-loop Generate a Ralph Loop iteration script /ctx-implement Execute a plan step-by-step with checks /ctx-plan-import Import Claude Code plan files into project specs /ctx-worktree Manage git worktrees for parallel agents /ctx-architecture Build and maintain architecture maps","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples","level":4,"title":"Usage Examples","text":"
            /ctx-status\n/ctx-learning-add \"Token refresh requires explicit cache invalidation\"\n/ctx-journal-enrich twinkly-stirring-kettle\n

            Skills support partial matching where applicable (e.g., session slugs).

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#cursor-ide","level":2,"title":"Cursor IDE","text":"

            Cursor can use context files through its system prompt or by reading files directly.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_1","level":3,"title":"Setup","text":"
            # Generate Cursor configuration\nctx setup cursor\n\n# Initialize context\nctx init --minimal\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration","level":3,"title":"Configuration","text":"

            Add to Cursor settings (.cursor/settings.json):

            // split to multiple lines for readability\n{\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and \n  .context/CONVENTIONS.md before responding. \n  Follow rules in .context/CONSTITUTION.md.\",\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage","level":3,"title":"Usage","text":"
            1. Open your project in Cursor
            2. Context files are available in the file tree
            3. Reference them in prompts: \"Check .context/DECISIONS.md for our approach to...\"
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-injection","level":3,"title":"Manual Context Injection","text":"

            For more control, paste context directly:

            # Get AI-ready packet\nctx agent --budget 4000 | pbcopy  # macOS\nctx agent --budget 4000 | xclip  # Linux\n

            Paste into Cursor's chat.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#aider","level":2,"title":"Aider","text":"

            Aider works well with context files through its --read flag.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_2","level":3,"title":"Setup","text":"
            # Generate Aider configuration\nctx setup aider\n\n# Initialize context\nctx init\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_1","level":3,"title":"Configuration","text":"

            Create .aider.conf.yml:

            read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_1","level":3,"title":"Usage","text":"
            # Start Aider (reads context files automatically)\naider\n\n# Or specify files explicitly\naider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#with-watch-mode","level":3,"title":"With Watch Mode","text":"

            Run ctx watch alongside Aider to capture context updates:

            # Terminal 1: Run Aider\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/aider.log\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#github-copilot","level":2,"title":"GitHub Copilot","text":"

            GitHub Copilot integrates with ctx at three levels: an automated instructions file, a VS Code Chat extension, and manual patterns.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_3","level":3,"title":"Setup","text":"
            # Initialize context\nctx init\n\n# Generate .github/copilot-instructions.md\nctx setup copilot --write\n

            The --write flag creates .github/copilot-instructions.md, which Copilot reads automatically at the start of every session. This file contains your project's constitution rules, current tasks, conventions, and architecture: giving Copilot persistent context without manual copy-paste.

            Re-run ctx setup copilot --write after updating your .context/ files to regenerate the instructions.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#vs-code-chat-extension-ctx","level":3,"title":"VS Code Chat Extension (@ctx)","text":"

            The ctx VS Code extension adds a @ctx chat participant to GitHub Copilot Chat, giving you direct access to all context commands from within the editor.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#installation","level":4,"title":"Installation","text":"
            1. Build from source (requires Node.js 18+):
            cd editors/vscode\nnpm install\nnpm run build\nnpx @vscode/vsce package\n
            1. Install the .vsix file:
            code --install-extension ctx-context-0.8.1.vsix\n
            1. Reload VS Code. Type @ctx in Copilot Chat to verify.
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#slash-commands","level":4,"title":"Slash Commands","text":"Command Description @ctx /init Initialize .context/ directory with template files @ctx /status Show context summary with token estimate @ctx /agent Print AI-ready context packet @ctx /drift Detect stale or invalid context @ctx /journal Browse and search AI session history @ctx /hook Generate AI tool integration configs @ctx /add Add a task, decision, or learning @ctx /load Output assembled context Markdown @ctx /compact Archive completed tasks and clean up @ctx /sync Reconcile context with codebase","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage-examples_1","level":4,"title":"Usage Examples","text":"
            @ctx /init\n@ctx /status\n@ctx /add task Implement user authentication\n@ctx /drift\n@ctx /hook copilot\n@ctx /journal\n

            Typing @ctx without a command shows help with all available commands. The extension also supports natural language: asking @ctx about \"status\" or \"drift\" routes to the correct command automatically.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_2","level":4,"title":"Configuration","text":"Setting Default Description ctx.executablePath ctx Path to the ctx binary. Set this if ctx is not in your PATH.","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#follow-up-suggestions","level":4,"title":"Follow-Up Suggestions","text":"

            After each command, the extension suggests relevant next steps. For example, after /init it suggests /status and /hook; after /drift it suggests /sync.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#session-persistence","level":3,"title":"Session Persistence","text":"

            ctx init creates a .context/sessions/ directory for storing session data from non-Claude tools. The Markdown session parser scans this directory during ctx journal, enabling session history for Copilot and other tools.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-patterns","level":3,"title":"Manual Patterns","text":"

            These patterns work without the extension, using Copilot's built-in file awareness:

            Pattern 1: Keep context files open

            Open .context/CONVENTIONS.md in a split pane. Copilot will reference it.

            Pattern 2: Reference in comments

            // See .context/CONVENTIONS.md for naming patterns\n// Following decision in .context/DECISIONS.md: Use PostgreSQL\n\nfunction getUserById(id: string) {\n  // Copilot now has context\n}\n

            Pattern 3: Paste context into Copilot Chat

            ctx agent --budget 2000\n

            Paste output into Copilot Chat for context-aware responses.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#windsurf-ide","level":2,"title":"Windsurf IDE","text":"

            Windsurf supports custom instructions and file-based context.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#setup_4","level":3,"title":"Setup","text":"
            # Generate Windsurf configuration\nctx setup windsurf\n\n# Initialize context\nctx init\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#configuration_3","level":3,"title":"Configuration","text":"

            Add to Windsurf settings:

            // Split to multiple lines for readability\n{\n  \"ai.customInstructions\": \"Always read .context/CONSTITUTION.md first. \n  Check .context/TASKS.md for current work. \n  Follow patterns in .context/CONVENTIONS.md.\"\n}\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#usage_2","level":3,"title":"Usage","text":"

            Context files appear in the file tree. Reference them when chatting:

            • \"What's in our task list?\" → AI reads .context/TASKS.md
            • \"What convention do we use for naming?\" → AI reads .context/CONVENTIONS.md
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#generic-integration","level":2,"title":"Generic Integration","text":"

            For any AI tool that can read files, use these patterns:

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#manual-context-loading","level":3,"title":"Manual Context Loading","text":"
            # Get full context\nctx load\n\n# Get AI-optimized packet\nctx agent --budget 8000\n\n# Get specific file\ncat .context/TASKS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#system-prompt-template","level":3,"title":"System Prompt Template","text":"
            You are working on a project with persistent context in .context/\n\nBefore responding:\n1. Read .context/CONSTITUTION.md - NEVER violate these rules\n2. Check .context/TASKS.md for current work\n3. Follow .context/CONVENTIONS.md patterns\n4. Reference .context/DECISIONS.md for architectural choices\n\nWhen you learn something new, note it for .context/LEARNINGS.md\nWhen you make a decision, document it for .context/DECISIONS.md\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#automated-updates","level":3,"title":"Automated Updates","text":"

            If your AI tool outputs to a log, use ctx watch:

            # Watch log file for context-update commands\nyour-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

            The AI can emit updates like:

            <context-update type=\"complete\">implement caching</context-update>\n<context-update type=\"learning\"\n  context=\"Implementing caching layer\"\n  lesson=\"Important thing learned today\"\n  application=\"Apply this insight going forward\"\n>Caching Insight</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#context-update-commands","level":2,"title":"Context Update Commands","text":"

            The ctx watch command parses update commands from AI output. Use this format:

            <context-update type=\"TYPE\" [attributes]>Content</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#supported-types","level":3,"title":"Supported Types","text":"Type Target File Required Attributes task TASKS.md None decision DECISIONS.md context, rationale, consequence learning LEARNINGS.md context, lesson, application convention CONVENTIONS.md None complete TASKS.md None","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#simple-format-tasks-conventions-complete","level":3,"title":"Simple Format (Tasks, Conventions, Complete)","text":"
            <context-update type=\"task\">Implement rate limiting</context-update>\n<context-update type=\"convention\">Use kebab-case for files</context-update>\n<context-update type=\"complete\">rate limiting</context-update>\n
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#structured-format-learnings-decisions","level":3,"title":"Structured Format (Learnings, Decisions)","text":"

            Learnings and decisions support structured attributes for better documentation:

            Learning with full structure:

            <context-update type=\"learning\"\n  context=\"Debugging Claude Code hooks\"\n  lesson=\"Hooks receive JSON via stdin, not environment variables\"\n  application=\"Parse JSON stdin with the host language (Go, Python, etc.): no jq needed\"\n>Hook Input Format</context-update>\n

            Decision with full structure:

            <context-update type=\"decision\"\n  context=\"Need a caching layer for API responses\"\n  rationale=\"Redis is fast, well-supported, and team has experience\"\n  consequence=\"Must provision Redis infrastructure; team training on Redis patterns\"\n>Use Redis for caching</context-update>\n

            Learnings require: context, lesson, application attributes. Decisions require: context, rationale, consequence attributes. Updates missing required attributes are rejected with an error.

            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/integrations/#further-reading","level":2,"title":"Further Reading","text":"
            • Skills That Fight the Platform: Common pitfalls in skill design that work against the host tool
            • The Anatomy of a Skill That Works: What makes a skill reliable: the E/A/R framework and quality gates
            ","path":["Operations","Day-to-Day","AI Tools"],"tags":[]},{"location":"operations/migration/","level":1,"title":"Integration","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#adopting-ctx-in-existing-projects","level":2,"title":"Adopting ctx in Existing Projects","text":"

            Claude Code User?

            You probably want the plugin instead of this page.

            Install ctx from the marketplace: (/plugin → search \"ctx\" → Install) and you're done: hooks, skills, and updates are handled for you.

            See Getting Started for the full walkthrough.

            This guide covers adopting ctx in existing projects regardless of which tools your team uses.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#quick-paths","level":2,"title":"Quick Paths","text":"You have... Command What happens Nothing (greenfield) ctx init Creates .context/, CLAUDE.md, permissions Existing CLAUDE.md ctx init --merge Backs up your file, inserts ctx block after the H1 Existing CLAUDE.md + ctx markers ctx init --force Replaces the ctx block, leaves your content intact .cursorrules / .aider.conf.yml ctx init ctx ignores those files: they coexist cleanly Team repo, first adopter ctx init --merge && git add .context/ CLAUDE.md Initialize and commit for the team","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-claudemd","level":2,"title":"Existing CLAUDE.md","text":"

            This is the most common scenario:

            You have a CLAUDE.md with project-specific instructions and don't want to lose them.

            You Own CLAUDE.md

            After initialization, CLAUDE.md is yours: edit it freely.

            Add project instructions, remove sections you don't need, reorganize as you see fit.

            The only part ctx manages is the block between the <!-- ctx:context --> and <!-- ctx:end --> markers; everything outside those markers is yours to change at any time.

            If you remove the markers, nothing breaks: ctx simply treats the file as having no ctx content and will offer to merge again on the next ctx init.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-ctx-init-does","level":3,"title":"What ctx init Does","text":"

            When ctx init detects an existing CLAUDE.md, it checks for ctx markers (<!-- ctx:context --> ... <!-- ctx:end -->):

            State Default behavior With --merge With --force No CLAUDE.md Creates from template Creates from template Creates from template Exists, no ctx markers Prompts to merge Auto-merges (no prompt) Auto-merges (no prompt) Exists, has ctx markers Skips (already set up) Skips Replaces the ctx block only","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-merge-flag","level":3,"title":"The --merge Flag","text":"

            --merge auto-merges without prompting. The merge process:

            1. Backs up your existing CLAUDE.md to CLAUDE.md.<timestamp>.bak;
            2. Finds the H1 heading (e.g., # My Project) in your file;
            3. Inserts the ctx block immediately after it;
            4. Preserves everything else untouched.

            Your content before and after the ctx block remains exactly as it was.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#before-after-example","level":3,"title":"Before / After Example","text":"

            Before: your existing CLAUDE.md:

            # My Project\n\n## Build Commands\n\n-`npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

            After ctx init --merge:

            # My Project\n\n<!-- ctx:context -->\n<!-- DO NOT REMOVE: This marker indicates ctx-managed content -->\n\n## IMPORTANT: You Have Persistent Memory\n\nThis project uses Context (`ctx`) for context persistence across sessions.\n...\n\n<!-- ctx:end -->\n\n## Build Commands\n\n- `npm run build`: production build\n- `npm test`: run tests\n\n## Code Style\n\n- Use TypeScript strict mode\n- Prefer named exports\n

            Your build commands and code style sections are untouched. The ctx block sits between markers and can be updated independently.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#the-force-flag","level":3,"title":"The --force Flag","text":"

            If your CLAUDE.md already has ctx markers (from a previous ctx init), the default behavior is to skip it. Use --force to replace the ctx block with the latest template: This is useful after upgrading ctx:

            ctx init --force\n

            This only replaces content between <!-- ctx:context --> and <!-- ctx:end -->. Your own content outside the markers is preserved. A timestamped backup is created before any changes.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#undoing-a-merge","level":3,"title":"Undoing a Merge","text":"

            Every merge creates a backup:

            $ ls CLAUDE.md*.bak\nCLAUDE.md.1738000000.bak\n

            To restore:

            cp CLAUDE.md.1738000000.bak CLAUDE.md\n

            Or if you are using git, simply:

            git checkout CLAUDE.md\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#existing-cursorrules-aider-copilot","level":2,"title":"Existing .cursorrules / Aider / Copilot","text":"

            ctx doesn't touch tool-specific config files. It creates its own files (.context/, CLAUDE.md) and coexists with whatever you already have.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-does-ctx-create","level":3,"title":"What Does ctx Create?","text":"ctx creates ctx does NOT touch .context/ directory .cursorrules CLAUDE.md (or merges into) .aider.conf.yml .claude/settings.local.json (seeded by ctx init; the plugin manages hooks and skills) .github/copilot-instructions.md .windsurfrules Any other tool-specific config

            Claude Code hooks and skills are provided by the ctx plugin, installed from the Claude Code marketplace (/plugin → search \"ctx\" → Install).

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#running-ctx-alongside-other-tools","level":3,"title":"Running ctx Alongside Other Tools","text":"

            The .context/ directory is the source of truth. Tool-specific configs point to it:

            • Cursor: Reference .context/ files in your system prompt (see Cursor setup)
            • Aider: Add .context/ files to the read: list in .aider.conf.yml (see Aider setup)
            • Copilot: Keep .context/ files open or reference them in comments (see Copilot setup)

            You can generate a tool-specific configuration with:

            ctx setup cursor    # Generate Cursor config snippet\nctx setup aider     # Generate .aider.conf.yml\nctx setup copilot   # Generate Copilot tips\nctx setup windsurf  # Generate Windsurf config\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#migrating-content-into-context","level":3,"title":"Migrating Content into .context/","text":"

            If you have project knowledge scattered across .cursorrules or custom prompt files, consider migrating it:

            1. Rules / invariants → .context/CONSTITUTION.md
            2. Code patterns → .context/CONVENTIONS.md
            3. Architecture notes → .context/ARCHITECTURE.md
            4. Known issues / tips → .context/LEARNINGS.md

            You don't need to delete the originals: ctx and tool-specific files can coexist. But centralizing in .context/ means every tool gets the same context.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#team-adoption","level":2,"title":"Team Adoption","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#context-is-designed-to-be-committed","level":3,"title":".context/ Is Designed to Be Committed","text":"

            The context files (tasks, decisions, learnings, conventions, architecture) are meant to live in version control. However, some subdirectories are personal or sensitive and should not be committed.

            ctx init automatically adds these .gitignore entries:

            # Journals contain full session transcripts: personal, potentially large\n.context/journal/\n.context/journal-site/\n.context/journal-obsidian/\n\n# Legacy encryption key path (copy to ~/.ctx/.ctx.key if needed)\n.context/.ctx.key\n\n# Runtime state and logs (ephemeral, machine-specific):\n.context/state/\n.context/logs/\n\n# Claude Code local settings (machine-specific)\n.claude/settings.local.json\n

            With those in place, committing is straightforward:

            # One person initializes\nctx init --merge\n\n# Commit context files (journals and keys are already gitignored)\ngit add .context/ CLAUDE.md\ngit commit -m \"Add ctx context management\"\ngit push\n

            Teammates pull and immediately have context. No per-developer setup needed.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#what-about-claude","level":3,"title":"What about .claude/?","text":"

            The .claude/ directory contains permissions that ctx init seeds. Hooks and skills are provided by the ctx plugin (not per-project files).

            File Commit? Why .claude/settings.local.json No Machine-specific, accumulates session permissions .claude/settings.golden.json Yes Curated permission snapshot (via ctx permission snapshot)","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#merge-conflicts-in-context-files","level":3,"title":"Merge Conflicts in Context Files","text":"

            Context files are plain Markdown. Resolve conflicts the same way you would for any other documentation file:

            # After a conflicting pull\ngit diff .context/TASKS.md    # See both sides\n# Edit to keep both sets of tasks, then:\ngit add .context/TASKS.md\ngit commit\n

            Common conflict scenarios:

            • TASKS.md: Two people added tasks: Keep both.
            • DECISIONS.md: Same decision recorded differently: Unify the entry.
            • LEARNINGS.md: Parallel discoveries: Keep both, remove duplicates.
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#gradual-adoption","level":3,"title":"Gradual Adoption","text":"

            You don't need the whole team to switch at once:

            1. One person runs ctx init --merge and commits;
            2. CLAUDE.md instructions work immediately for Claude Code users;
            3. Other tool users can adopt at their own pace using ctx setup <tool>;
            4. Context files benefit everyone who reads them, even without tool integration.
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verifying-it-worked","level":2,"title":"Verifying It Worked","text":"","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#check-status","level":3,"title":"Check Status","text":"
            ctx status\n

            You should see your context files listed with token counts and no warnings.

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#test-memory","level":3,"title":"Test Memory","text":"

            Start a new AI session and ask: \"Do you remember?\"

            The AI should cite specific context:

            • Current tasks from .context/TASKS.md;
            • Recent decisions or learnings;
            • Session history (if you've had prior sessions);

            If it responds with generic \"I don't have memory\", check that ctx is in your PATH (which ctx) and that hooks are configured (see Troubleshooting).

            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#verify-the-merge","level":3,"title":"Verify the Merge","text":"

            If you used --merge, check that your original content is intact:

            # Your original content should still be there\ncat CLAUDE.md\n\n# The ctx block should be between markers\ngrep -c \"ctx:context\" CLAUDE.md  # Should print 1\ngrep -c \"ctx:end\" CLAUDE.md      # Should print 1\n
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/migration/#further-reading","level":2,"title":"Further Reading","text":"
            • Getting Started: Full setup walkthrough
            • Context Files: What each .context/ file does
            • Integrations: Per-tool setup (Claude Code, Cursor, Aider, Copilot)
            • CLI Reference: All ctx commands and flags
            ","path":["Operations","Day-to-Day","Integration"],"tags":[]},{"location":"operations/release/","level":1,"title":"Cutting a Release","text":"

            Full Release Checklist

            This page covers the mechanics of cutting a release (bump, tag, push). For the complete pre-release ceremony (audits, tests, verification, and post-release steps), see the Release Checklist runbook.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#prerequisites","level":2,"title":"Prerequisites","text":"

            Before you can cut a release you need:

            • Push access to origin (GitHub)
            • GPG signing configured (make gpg-test)
            • Go installed (version in go.mod)
            • Zensical installed (make site-setup)
            • A clean working tree (git status shows nothing to commit)
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#step-by-step","level":2,"title":"Step-by-Step","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#1-update-the-version-file","level":3,"title":"1. Update the VERSION File","text":"
            echo \"0.9.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.9.0\"\n

            The VERSION file uses bare semver (0.9.0), no v prefix. The release script adds the v prefix for git tags.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#2-generate-release-notes","level":3,"title":"2. Generate Release Notes","text":"

            In Claude Code:

            /_ctx-release-notes\n

            This analyzes commits since the last tag and writes dist/RELEASE_NOTES.md. The release script refuses to proceed without this file.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#3-verify-docs-and-commit-any-remaining-changes","level":3,"title":"3. Verify Docs and Commit Any Remaining Changes","text":"
            /ctx-link-check    # audit docs for dead links\nmake audit          # full check: fmt, vet, lint, style, test\ngit status          # must be clean\n
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#4-run-the-release","level":3,"title":"4. Run the Release","text":"
            make release\n

            Or, if you are in a Claude Code session:

            /_ctx-release\n

            The release script does everything in order:

            Step What happens 1 Reads VERSION, verifies release notes exist 2 Verifies working tree is clean 3 Updates version in 4 config files (plugin.json, marketplace.json, VS Code package.json + lock) 4 Updates download URLs in 3 doc files (index.md, getting-started.md, integrations.md) 5 Adds new row to versions.md 6 Rebuilds the documentation site (make site) 7 Commits all version and docs updates 8 Runs make test and make smoke 9 Builds binaries for all 6 platforms via hack/build-all.sh 10 Creates a signed git tag (v0.9.0) 11 Pushes the tag to origin 12 Updates and pushes the latest tag","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#5-github-ci-takes-over","level":3,"title":"5. GitHub CI Takes Over","text":"

            Pushing a v* tag triggers .github/workflows/release.yml:

            1. Checks out the tagged commit
            2. Runs the full test suite
            3. Builds binaries for all platforms
            4. Creates a GitHub Release with auto-generated notes
            5. Uploads binaries and SHA256 checksums
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#6-verify","level":3,"title":"6. Verify","text":"
            • GitHub Releases shows the new version
            • All 6 binaries are attached (linux/darwin x amd64/arm64, windows x amd64)
            • SHA256 files are attached
            • Release notes look correct
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#what-gets-updated-automatically","level":2,"title":"What Gets Updated Automatically","text":"

            The release script updates 8 files so you do not have to:

            File What changes internal/assets/claude/.claude-plugin/plugin.json Plugin version .claude-plugin/marketplace.json Marketplace version (2 fields) editors/vscode/package.json VS Code extension version editors/vscode/package-lock.json VS Code lock version (2 fields) docs/index.md Download URLs docs/home/getting-started.md Download URLs docs/operations/integrations.md VSIX filename version docs/reference/versions.md New version row + latest pointer

            The Go binary version is injected at build time via -ldflags from the VERSION file. No source file needs editing.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#build-targets-reference","level":2,"title":"Build Targets Reference","text":"Target What it does make release Full release (script + tag + push) make build Build binary for current platform make build-all Build all 6 platform binaries make test Unit tests make smoke Integration smoke tests make audit Full check (fmt + vet + lint + drift + docs + test) make site Rebuild documentation site","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#release-notes-not-found","level":3,"title":"\"Release Notes Not Found\"","text":"
            ERROR: dist/RELEASE_NOTES.md not found.\n

            Run /_ctx-release-notes in Claude Code first, or write dist/RELEASE_NOTES.md manually.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#working-tree-is-not-clean","level":3,"title":"\"Working Tree Is Not Clean\"","text":"
            ERROR: Working tree is not clean.\n

            Commit or stash all changes before running make release.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#tag-already-exists","level":3,"title":"\"Tag Already Exists\"","text":"
            ERROR: Tag v0.9.0 already exists.\n

            You cannot release the same version twice. Either bump VERSION to a new version, or delete the old tag if the release was incomplete:

            git tag -d v0.9.0\ngit push origin :refs/tags/v0.9.0\n
            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/release/#ci-build-fails-after-tag-push","level":3,"title":"CI Build Fails After Tag Push","text":"

            The tag is already published. Fix the issue, bump to a patch version (e.g. 0.9.1), and release again. Do not force-push tags that others may have already fetched.

            ","path":["Operations","Maintainers","Cutting a Release"],"tags":[]},{"location":"operations/upgrading/","level":1,"title":"Upgrade","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade","level":2,"title":"Upgrade","text":"

            New versions of ctx may ship updated permissions, CLAUDE.md directives, or plugin hooks and skills.

            Claude Code User?

            The marketplace can update skills, hooks, and prompts independently: /plugin → select ctx → Update now (or enable auto-update).

            The ctx binary is separate: rebuild from source or download a new release when one is available, then run ctx init --force --merge. Knowledge files are preserved automatically.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#tldr","level":2,"title":"TL:DR","text":"
            # Plugin users (Claude Code)\n# /plugin → select ctx → Update now\n# Then update the binary and reinitialize:\nctx init --force --merge\n\n# From-source / manual users\n# install new ctx binary, then:\nctx init --force --merge\n# /plugin → select ctx → Update now   (if using Claude Code)\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-changes-between-versions","level":2,"title":"What Changes between Versions","text":"

            ctx init generates two categories of files:

            Category Examples Changes between versions? Infrastructure .claude/settings.local.json (permissions), ctx-managed sections in CLAUDE.md, ctx plugin (hooks + skills) Yes Knowledge .context/TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md, ARCHITECTURE.md, GLOSSARY.md, CONSTITUTION.md, AGENT_PLAYBOOK.md No: this is your data

            Infrastructure is regenerated by ctx init and plugin updates. Knowledge files are yours and should never be overwritten.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#upgrade-steps","level":2,"title":"Upgrade Steps","text":"","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#1-install-the-new-version","level":3,"title":"1. Install the New Version","text":"

            Build from source or download the binary:

            cd /path/to/ctx-source\ngit pull\nmake build\nsudo make install\nctx --version   # verify\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#2-reinitialize","level":3,"title":"2. Reinitialize","text":"
            ctx init --force --merge\n
            • --force regenerates infrastructure files (permissions, ctx-managed sections in CLAUDE.md).
            • --merge preserves your content outside ctx markers.

            Knowledge files (.context/TASKS.md, DECISIONS.md, etc.) are preserved automatically: ctx init only overwrites infrastructure, never your data.

            Encryption key: The encryption key lives at ~/.ctx/.ctx.key (outside the project). Reinit does not affect it. If you have a legacy key at .context/.ctx.key or ~/.local/ctx/keys/, copy it manually (see Syncing Scratchpad Notes).

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#3-update-the-ctx-plugin","level":3,"title":"3. Update the ctx Plugin","text":"

            If you use Claude Code, update the plugin to get new hooks and skills:

            1. Open /plugin in Claude Code.
            2. Select ctx.
            3. Click Update now.

            Or enable auto-update so the plugin stays current without manual steps.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#4-review-custom-settings","level":3,"title":"4. Review Custom Settings","text":"

            If you added custom permissions to .claude/settings.local.json beyond what ctx init provides, diff and merge:

            diff .claude.bak/settings.local.json .claude/settings.local.json\n

            Manually add back any custom entries that the new init dropped.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#5-verify","level":3,"title":"5. Verify","text":"
            ctx status          # context files intact\nctx drift           # no broken references\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#6-clean-up","level":3,"title":"6. Clean Up","text":"

            If you made manual backups, remove them once satisfied:

            rm -rf .context.bak .claude.bak CLAUDE.md.bak\n
            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/upgrading/#what-if-i-skip-the-upgrade","level":2,"title":"What If I Skip the Upgrade?","text":"

            The old binary still works with your existing .context/ files. But you may miss:

            • New plugin hooks that enforce better practices or catch mistakes;
            • Updated skill prompts that produce better results;
            • New .gitignore entries for directories added in newer versions;
            • Bug fixes in the CLI itself.

            The plugin and the binary can be updated independently. You can update the plugin (for new hooks/skills) even if you stay on an older binary, and vice versa.

            Context files are plain Markdown: They never break between versions.

            The surrounding infrastructure is what evolves.

            ","path":["Operations","Day-to-Day","Upgrade"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/","level":1,"title":"Architecture Exploration","text":"","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#architecture-exploration","level":1,"title":"Architecture Exploration","text":"

            Systematically build architecture documentation across one or more repositories using ctx skills. Each invocation does one unit of work; a simple loop drives the agent through all phases.

            When to use: When onboarding to a new codebase, performing architecture reviews, or building up .context/ documentation across a workspace of repos.

            Prerequisites: ctx installed, repos cloned under a shared workspace directory (e.g., ~/WORKSPACE/).

            Companion skills:

            • /ctx-architecture: structural baseline and principal analysis
            • /ctx-architecture-enrich: code intelligence enrichment via GitNexus
            • /ctx-architecture-failure-analysis: adversarial failure analysis
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#overview","level":2,"title":"Overview","text":"

            The agent progresses through phases per repo, depth-first:

            Phase Skill What it does bootstrap ctx init + /ctx-architecture Initialize context and build structural baseline principal /ctx-architecture principal Deep analysis: vision, bottlenecks, alternatives enriched /ctx-architecture-enrich Quantify with code intelligence (blast radius, flows) frontier-N /ctx-architecture (re-run) Explore unexplored areas found in convergence report lens-* /ctx-architecture with lens Focused exploration through conceptual lenses

            Exploration stops when convergence >= 0.85, frontier runs plateau, or all lenses are exhausted.

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#setup","level":2,"title":"Setup","text":"

            Create a tracking directory in your workspace root:

            cd ~/WORKSPACE\nmkdir -p .arch-explorer\n

            Create .arch-explorer/manifest.json listing your repos:

            {\n  \"repos\": [\"ctx\", \"portal\", \"infra\"],\n  \"current_repo_index\": 0,\n  \"progress\": {}\n}\n

            Create .arch-explorer/run-log.md (empty, the agent appends to it).

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#prompt","level":2,"title":"Prompt","text":"

            Save this as .arch-explorer/PROMPT.md and invoke with your agent. The prompt is self-contained: the agent reads the manifest, picks the next unit of work, executes it, updates tracking, and stops.

            You are an autonomous architecture exploration agent. Your job is to\nsystematically build and evolve architecture documentation across all\nrepositories in this workspace using ctx skills.\n\n## Execution Protocol\n\n### Step 1: Read State\n\nRead `.arch-explorer/manifest.json`. This tells you:\n- Which repos exist and their order\n- What has been done per repo (`progress` object)\n- Which repo to work on next (`current_repo_index`)\n\n### Step 2: Pick the Next Unit of Work\n\n**Strategy: depth-first, sequential.**\n\nFind the current repo (by `current_repo_index`). Determine its next\nphase from the progression below. If all phases are exhausted for this\nrepo (convergence score >= 0.85 or 3+ frontier runs with no new\nfindings), advance `current_repo_index` and pick the next repo.\n\n### Phase Progression (per repo)\n\nEach repo progresses through these phases in order:\n\n| Phase | Skill | Prerequisite |\n|-------|-------|-------------|\n| `bootstrap` | `ctx init` + `/ctx-architecture` | None |\n| `principal` | `/ctx-architecture principal` | bootstrap done |\n| `enriched` | `/ctx-architecture-enrich` | principal done, GitNexus indexed |\n| `frontier-N` | `/ctx-architecture` (re-run) | enriched done |\n\n**`bootstrap` is a single composite unit:** `ctx init` followed by\nstructural analysis. This is the ONLY phase that combines two actions.\nNo other phase may chain actions.\n\n**Frontier runs** are numbered: `frontier-1`, `frontier-2`, etc.\nEach frontier run reads CONVERGENCE-REPORT.md and picks unexplored\nareas. The skill handles this automatically.\n\nAfter the third frontier run OR when convergence >= 0.85, apply\n**conceptual lenses** (one per run):\n\n| Lens | Focus Areas |\n|------|-------------|\n| `security` | Auth flows, input validation, secrets, attack surfaces, trust boundaries |\n| `performance` | Hot paths, caching, concurrency, resource lifecycle, allocation patterns |\n| `stability` | Error handling, retries, graceful degradation, circuit breakers, timeouts |\n| `observability` | Logging, metrics, tracing, alerting, debugging affordances |\n| `data-integrity` | Storage, serialization, migrations, consistency, backup, recovery |\n\nFor lens runs, prepend the lens context as an explicit instruction to\nthe skill invocation:\n\n> \"Focus exploration on security: auth flows, input validation, secrets,\n> attack surfaces, trust boundaries.\"\n\nDo NOT wait for the skill to ask what to explore. Provide the lens\nfocus as input upfront.\n\n### Step 3: Do the Work\n\n1. `cd` into the sub-repo directory (`~/WORKSPACE/<repo-name>`, NOT\n   `~/WORKSPACE` itself).\n2. Verify `CTX_DIR` already points at THIS sub-repo's `.context/`:\n\n    ```bash\n    test \"$CTX_DIR\" = \"$PWD/.context\" || {\n      echo \"STOP: CTX_DIR=$CTX_DIR but this sub-repo needs $PWD/.context.\"\n      echo \"Re-launch the agent with CTX_DIR set to the sub-repo:\"\n      echo \"  cd $PWD && CTX_DIR=\\\"\\$PWD/.context\\\" claude --print 'Follow .arch-explorer/PROMPT.md' --allowedTools '*'\"\n      exit 1\n    }\n    ```\n\n    If it fails, STOP. The agent cannot change `CTX_DIR` for itself:\n    child shells and skill invocations inherit the parent Claude\n    process environment, which only the caller can control. Do not\n    proceed, do not run `ctx` commands, do not skip the check.\n3. If phase is `bootstrap`:\n    - Run `ctx init`, confirm `.context/` exists.\n    - Then run `/ctx-architecture` (structural baseline).\n4. If phase is `principal` or `frontier-*`:\n    - Run `/ctx-architecture` (add `principal` argument for principal phase).\n    - The skill will read existing artifacts and build on them.\n5. If phase is `enriched`:\n    - Verify GitNexus is connected: call `mcp__gitnexus__list_repos`.\n    - Success = non-empty list returned with no error.\n    - If GitNexus unavailable, log as `enriched-skipped` and advance\n      to `frontier-1`.\n    - Run `/ctx-architecture-enrich`.\n6. If phase is a lens run (`lens-security`, etc.):\n    - Run `/ctx-architecture` with lens focus prepended as instruction\n      (see lens table above for exact wording).\n\n### Step 4: Extract Results\n\nAfter the skill completes, gather:\n\n- **Convergence score**: from `map-tracking.json`, computed as:\n  average of all module `confidence` values (0.0-1.0). If\n  `map-tracking.json` is missing or has no confidence values,\n  record `null` and log a warning.\n- **Frontier count**: from CONVERGENCE-REPORT.md, count the number\n  of listed unexplored areas. If CONVERGENCE-REPORT.md is missing,\n  record `frontier_count: null` and log a warning. Treat missing\n  as \"exploration should continue\" (do not stall).\n- **Key findings**: 2-3 bullet points of what was discovered or\n  changed in this run (new modules mapped, danger zones found, etc.)\n- **New artifacts**: list any new files created in `.context/`\n\n### Step 5: Update Tracking\n\nUpdate `.arch-explorer/manifest.json`:\n\n```json\n{\n  \"progress\": {\n    \"ctx\": {\n      \"phases_completed\": [\"bootstrap\", \"principal\"],\n      \"current_phase\": \"enriched\",\n      \"lenses_explored\": [],\n      \"last_run\": \"2026-04-07T14:00:00Z\",\n      \"convergence_score\": 0.72,\n      \"frontier_count\": 3,\n      \"total_runs\": 2,\n      \"findings_summary\": \"14 modules mapped, 3 danger zones, 2 extension points\"\n    }\n  }\n}\n```\n\nAppend to `.arch-explorer/run-log.md`:\n\n```markdown\n## 2026-04-07T14:00:00Z / ctx / principal\n\n**Phase:** principal\n**Convergence:** 0.45 -> 0.72\n**Frontiers remaining:** 3\n**Key findings:**\n- Identified CLI dispatch as primary bottleneck (fan-out to 12 subsystems)\n- Security: context files readable by any process (no access control)\n- Strategic recommendation: extract context engine into library package\n\n**Artifacts updated:** ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md, map-tracking.json\n```\n\n### Step 6: Report and Stop\n\nPrint this exact format as the FINAL output of the invocation:\n\n```\n[arch-explorer] DONE\n  repo: ctx\n  phase: principal\n  convergence: 0.72\n  frontiers: 3\n  runs_on_repo: 3\n  next: ctx / enriched\n```\n\nThe `[arch-explorer] DONE` line is the terminal marker. After printing\nit, produce no further output. Execution is complete.\n\n## Rules\n\n1. **One unit per invocation.** The only composite unit is `bootstrap`\n   (init + structural). All other phases are exactly one skill run.\n2. **Additive only.** Never delete or overwrite existing artifacts.\n   The skills already handle incremental updates.\n3. **No duplicated work.** Read manifest before acting. If a phase is\n   already recorded as completed, skip it.\n4. **Log everything.** Every run gets a run-log entry, even failures\n   and skips.\n5. **Fail gracefully.** If a skill fails (missing GitNexus, broken repo,\n   etc.), log the failure with reason and advance to the next phase or\n   repo. Don't retry in the same invocation.\n6. **Respect ctx conventions.** Each repo gets its own `.context/`\n   directory. Never write architecture artifacts outside `.context/`.\n\n## Stopping Logic\n\nA repo is considered \"explored\" when ANY of these is true:\n- Convergence score >= 0.85 (from map-tracking.json)\n- 3+ frontier runs produced no new findings (frontier_count unchanged\n  across consecutive runs)\n- All 5 lenses have been applied\n- Convergence score is `null` after 3 attempts (artifacts aren't being\n  generated properly; log warning and move on)\n\nWhen a repo is explored, advance `current_repo_index` in the manifest.\n\n## When All Repos Are Done\n\nWhen every repo has reached its stopping condition, print:\n\n```\n[arch-explorer] ALL DONE\n  - ctx: 0.92 convergence, 8 runs, 5 lenses\n  - portal: 0.87 convergence, 6 runs, 3 lenses\n  ...\n```\n
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#invocation","level":2,"title":"Invocation","text":"

            The caller MUST set CTX_DIR to the sub-repo the agent will work on. The agent verifies this at Step 3.2 and stops if it does not match. The wrapper reads the manifest to pick the current sub-repo, then launches claude with CTX_DIR pinned to that sub-repo's .context/.

            Single run (safest for quota):

            cd ~/WORKSPACE\nREPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json)\nCTX_DIR=\"$PWD/$REPO/.context\" \\\n  claude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n

            Batch of N runs:

            cd ~/WORKSPACE\nfor i in $(seq 1 5); do\n  REPO=$(jq -r '.repos[.current_repo_index]' .arch-explorer/manifest.json)\n  CTX_DIR=\"$PWD/$REPO/.context\" \\\n    claude --print \"Follow .arch-explorer/PROMPT.md\" --allowedTools '*'\n  echo \"--- Run $i complete (repo: $REPO) ---\"\ndone\n

            Resume after interruption:

            Just run the wrapper again. The manifest tracks state; the agent picks up where it left off. CTX_DIR is recomputed from the manifest on each invocation, so the right sub-repo is always bound.

            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#tips","level":2,"title":"Tips","text":"
            • Start small: list 1-2 repos in the manifest first. Add more once you're confident in the output quality.
            • GitNexus is optional: the enrichment phase is skipped gracefully if GitNexus isn't connected. You still get structural and principal analysis.
            • Review between batches: check the run-log and generated artifacts between batch runs. The agent is additive-only, but early course correction saves wasted runs.
            • Lens runs are the payoff: the first three phases build the map; lens runs find the interesting things (security gaps, performance cliffs, stability risks).
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/architecture-exploration/#history","level":2,"title":"History","text":"
            • 2026-04-07: Original prompt created as hack/agents/architecture-explorer.md.
            • 2026-04-16: Moved to docs as a runbook for discoverability.
            • 2026-04-20: Added CTX_DIR verification at Step 3.2 and per-invocation CTX_DIR binding in the wrapper, so the agent writes artifacts to the sub-repo's .context/ instead of the inherited workspace one.
            ","path":["Operations","Runbooks","Architecture Exploration"],"tags":[]},{"location":"operations/runbooks/backup-strategy/","level":1,"title":"Backup Strategy","text":"

            ctx backup was removed. File-level backup is not ctx's responsibility; your OS or a dedicated backup tool handles it better and without locking you into a specific mount strategy.

            This runbook explains what to back up, how ctx hub reduces the surface, and what options exist for the rest.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#what-to-back-up","level":2,"title":"What To Back Up","text":"

            Per project:

            • .context/: all context files, journal, state, scratchpad.
            • .claude/: Claude Code settings, hooks, skills specific to the project. Skip this entry when it lives in git; the repo is the backup.

            Per user:

            • ~/.ctx/: global config, the encryption key (~/.ctx/.ctx.key), hub data directory (if running a local hub).
            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#how-hub-reduces-backup-needs","level":2,"title":"How Hub Reduces Backup Needs","text":"

            ctx hub replicates the knowledge surface across machines:

            • DECISIONS.md
            • LEARNINGS.md
            • CONVENTIONS.md
            • CONSTITUTION.md
            • ARCHITECTURE.md
            • Task items promoted to hub

            If you run ctx hub (as a server or by subscribing to someone else's), the data that matters most survives losing any single machine.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#what-hub-does-not-replicate","level":2,"title":"What Hub Does Not Replicate","text":"

            Hub is not a file-level backup. The following still live only on the machine that produced them:

            • Journal entries (.context/journal/*.md)
            • Runtime state (.context/state/*)
            • Session event log (.context/events.jsonl)
            • Scratchpad (.context/.pad)
            • Encrypted notify/webhook config (.context/.notify.enc)
            • The encryption key itself (~/.ctx/.ctx.key)

            If you need those to survive a disk failure, use a file-level backup.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#example-strategies","level":2,"title":"Example Strategies","text":"","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#1-cron-rsync-to-nas-or-external-drive","level":3,"title":"1. cron + rsync to NAS or External Drive","text":"
            # Daily at 03:00, mirror ~/WORKSPACE and ~/.ctx to NAS\n0 3 * * * rsync -a --delete \\\n    --exclude='node_modules' \\\n    --exclude='dist' \\\n    --exclude='.context/state' \\\n    ~/WORKSPACE/ /mnt/nas/backup/workspace/\n0 3 * * * rsync -a --delete ~/.ctx/ /mnt/nas/backup/ctx-global/\n

            Adjust excludes for the trash you don't want to back up. The .context/state/ dir is ephemeral per-session; skip it.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#2-cron-cp-to-a-cloud-synced-directory","level":3,"title":"2. cron + cp to a Cloud-Synced Directory","text":"

            iCloud Drive, Dropbox, or any directory watched by a sync client:

            0 3 * * * cp -a ~/WORKSPACE/some-project/.context \\\n    ~/CloudDrive/ctx-backups/some-project/$(date +\\%Y-\\%m-\\%d)\n

            Daily snapshots, cloud provider handles the replication.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#3-time-machine-macos","level":3,"title":"3. Time Machine (macOS)","text":"

            If you already run Time Machine, ensure ~/WORKSPACE and ~/.ctx are not in its exclusion list. Time Machine handles versioning; you get point-in-time recovery for free.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#4-borg-or-restic-for-versioned-backups","level":3,"title":"4. Borg or restic for Versioned Backups","text":"

            For deduplicated, versioned, encrypted backups:

            # Borg init (once)\nborg init --encryption=repokey /mnt/nas/borg-ctx\n\n# Daily backup\nborg create /mnt/nas/borg-ctx::'ctx-{now}' \\\n    ~/WORKSPACE ~/.ctx \\\n    --exclude '*/node_modules' \\\n    --exclude '*/.context/state'\n

            Use restic if you prefer S3-compatible targets.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#when-you-still-need-file-level-backup-even-with-hub","level":2,"title":"When You Still Need File-Level Backup Even With Hub","text":"
            • Journal: session histories are local-only until exported.
            • Scratchpad: private notes, encrypted locally.
            • Encryption key: losing ~/.ctx/.ctx.key means losing access to every encrypted file in every project.
            • Non-hub projects: projects that never called ctx hub register have zero cross-machine persistence.

            For these, pick one strategy above and forget about it.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/backup-strategy/#why-ctx-no-longer-ships-a-backup-command","level":2,"title":"Why ctx No Longer Ships a Backup Command","text":"

            Backup is inherently environment-specific: SMB, NFS, S3, rsync, Time Machine, Borg, restic. Every user has a different story. The previous ctx backup picked SMB via GVFS, which was Linux-only and narrow. Chasing mount strategies would never generalize.

            Hub is the right answer for the data ctx owns (knowledge). For everything else, your OS or a dedicated backup tool is the right layer.

            ","path":["Backup Strategy"],"tags":[]},{"location":"operations/runbooks/breaking-migration/","level":1,"title":"Breaking Migration","text":"","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#breaking-migration-guide","level":1,"title":"Breaking Migration Guide","text":"

            Template for upgrading across breaking CLI renames or behavior changes. Use this as a starting point when writing migration notes for a specific release, or hand it to your agent as context for generating release-specific guidance.

            When to use: When a release includes breaking changes (command renames, removed flags, changed defaults) that require user action.

            Companion: Upgrade guide covers the general upgrade flow. This runbook covers the breaking-change specifics.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-1-identify-what-changed","level":2,"title":"Step 1: Identify What Changed","text":"

            Ask your agent to diff the CLI surface between the old and new version:

            Compare the CLI command surface between the previous release tag\nand HEAD. For each change, categorize as: renamed, removed,\nnew, or changed-behavior. Include old and new command signatures.\n

            Or use the /_ctx-command-audit skill after the rename.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-2-regenerate-infrastructure","level":2,"title":"Step 2: Regenerate Infrastructure","text":"
            # Install the new binary\nmake build && sudo make install\n\n# Regenerate CLAUDE.md and permissions\nctx init --force --merge\n

            --merge preserves your knowledge files (TASKS.md, DECISIONS.md, etc.) while regenerating infrastructure (permissions, CLAUDE.md managed sections).

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-3-update-the-plugin","level":2,"title":"Step 3: Update the Plugin","text":"
            /plugin -> select ctx -> Update now\n

            Or, if using a local clone:

            make plugin-reload\n# restart Claude Code\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-4-update-personal-scripts","level":2,"title":"Step 4: Update Personal Scripts","text":"

            Search your scripts and aliases for old command names:

            # Example: find references to old command names\ngrep -r \"ctx old-command\" ~/scripts/ ~/.zshrc ~/.bashrc\n

            Replace with the new names per the changelog.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-5-update-hook-configs","level":2,"title":"Step 5: Update Hook Configs","text":"

            If you have custom hooks in .claude/settings.local.json that reference ctx commands, update them:

            jq '.hooks' .claude/settings.local.json | grep \"ctx \"\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
            ctx status          # context files intact\nctx drift           # no broken references\nmake test           # if you're a contributor\n
            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/breaking-migration/#writing-release-specific-migration-notes","level":2,"title":"Writing Release-Specific Migration Notes","text":"

            When preparing a release with breaking changes, create a section in the release notes using this template:

            ## Breaking Changes\n\n### `old-command` renamed to `new-command`\n\n**What changed**: `ctx old-command` is now `ctx new-command`.\nThe old name is removed (no deprecation alias).\n\n**Action required**:\n1. Run `ctx init --force --merge` to update CLAUDE.md\n2. Update any scripts referencing `ctx old-command`\n3. Update hook configs if applicable\n\n**Why**: [brief rationale for the rename]\n

            Repeat for each breaking change. Users should be able to follow the notes mechanically without needing to understand the codebase.

            ","path":["Operations","Runbooks","Breaking Migration"],"tags":[]},{"location":"operations/runbooks/codebase-audit/","level":1,"title":"Codebase Audit","text":"","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#codebase-audit","level":1,"title":"Codebase Audit","text":"

            A structured audit of the codebase: dead code, magic strings, documentation drift, security surface, and roadmap opportunities.

            When to run: Before a release, after a long YOLO sprint, quarterly, or when planning the next phase of work.

            Time: ~15-30 minutes with a team of agents.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#how-to-use-this-runbook","level":2,"title":"How to Use This Runbook","text":"

            Start a Claude Code session with a clean git state (git stash or commit first). Paste or adapt the prompt below. The agent does the analysis; you triage the findings.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#prompt","level":2,"title":"Prompt","text":"
            I want you to create an agent team to audit this codebase. Save each report as\na separate markdown file under `./ideas/` (or another directory if you prefer).\n\nUse read-only agents (subagent_type: Explore) for all analyses. No code changes.\n\nFor each report, use this structure:\n- Executive Summary (2-3 sentences + severity table)\n- Findings (grouped, with file:line references)\n- Ranked Recommendations (high/medium/low priority)\n- Methodology (what was examined, how)\n\nKeep reports actionable: every finding should suggest a concrete fix or next step.\n\n## Analyses to Run\n\n### 1. Extractable Patterns (session mining)\nSearch session JSONL files, journal entries, and task archives for repetitive\nmulti-step workflows. Count frequency of bash command sequences, slash command\nusage, and recurring user prompts. Identify patterns that could become skills\nor scripts. Cross-reference with existing skills to find coverage gaps.\nOutput: ranked list of automation opportunities with frequency data.\n\n### 2. Documentation Drift (godoc + inline)\nCompare every doc.go against its package's actual exports and behavior. Check\ninline godoc comments on exported functions against their implementations.\nScan for stale TODO/FIXME/HACK comments. Check package-level comments match\npackage names. Output: drift items ranked by severity with exact file:line refs.\n\n### 3. Maintainability\nLook for: functions >80 lines that have logical split points; switch blocks\nwith >5 cases that could be table-driven or extracted; inline comments that\nsay \"step 1\", \"step 2\" or similar (sign the block wants to be a function);\nfiles with >400 lines; packages with flat structure that could benefit from\nsub-packages; functions that seem misplaced in their file. Do NOT flag\nthings that are fine as-is just because they could theoretically be different.\nOutput: concrete refactoring suggestions, not style nitpicks.\n\n### 4. Security Review\nThis is a CLI app: focus on CLI-relevant attack surface, not web OWASP:\nfile path traversal (does user input flow into file paths unsanitized?),\ncommand injection (does user input flow into exec calls?), symlink following\n(does the tool follow symlinks when writing to .context/?), permission\nhandling (are file permissions set correctly?), sensitive data in outputs\n(do any commands leak secrets or session content?). Output: findings with\nseverity ratings and exploit scenarios.\n\n### 5. Blog Theme Discovery\nRead existing blog posts for style and narrative voice. Analyze git log,\nrecent session discussions, and DECISIONS.md for story arcs worth writing\nabout. Suggest 3-5 blog post themes with: title, angle, target audience,\nkey commits/sessions to reference, and a 2-sentence pitch. Prioritize\nthemes that build a coherent narrative across posts.\n\n### 6. Roadmap & Value Opportunities\nBased on current features, recent momentum, and gaps found in other analyses:\nwhat are the highest-value improvements? Consider: user-facing features,\ndeveloper experience, integration opportunities, and low-hanging fruit.\nOutput: prioritized list with effort/impact estimates (not time estimates).\n\n### 7. User-Facing Documentation\nEvaluate README, help text, and any user docs. Suggest improvements\nstructured as use-case pages: the problem, how ctx solves it, typical\nworkflow, gotchas. Identify gaps where a user would get stuck without\nreading source code. Output: list of documentation gaps and suggested\npage outlines.\n\n### 8. Agent Team Strategies\nBased on the codebase structure, suggest 2-3 agent team configurations for\nupcoming work sessions. For each: team composition (roles, agent types),\ntask distribution strategy, coordination approach, and which types of work\nit suits. Ground suggestions in actual project patterns, not generic advice.\n
            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#tips","level":2,"title":"Tips","text":"
            • Clean state matters: the prompt says \"no code changes\" but accidents happen. Start from a clean git state so you can git checkout . if needed.

            • Adjust scope: drop analyses you don't need. Analyses 1-4 are the most actionable. Analyses 5-8 are planning/creative and can be skipped if you just want a technical audit.

            • Reports feed TASKS.md: after the audit, read each report and create tasks in the appropriate Phase section. The reports are input, not output.

            • ideas/ is gitignored: reports saved there won't be committed. Move specific findings to TASKS.md, DECISIONS.md, or LEARNINGS.md to persist them.

            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/codebase-audit/#history","level":2,"title":"History","text":"
            • 2026-02-08: Original prompt created after a codebase audit sprint.
            • 2026-02-17: Improved with read-only agents, report structure template, CLI-scoped security review, and maintainability thresholds.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Codebase Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/","level":1,"title":"Docs Semantic Audit","text":"","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#documentation-semantic-audit","level":1,"title":"Documentation Semantic Audit","text":"

            Find structural problems that linters and link checkers cannot: weak pages that should be merged, heavy pages that should be split, missing cross-links, and narrative arcs that don't land.

            When to run: Before a release, after adding several new pages, when the site feels sprawling, or when you suspect narrative gaps.

            Time: ~20-40 minutes with an agent session.

            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#why-this-is-a-runbook","level":2,"title":"Why This Is a Runbook","text":"

            These judgments are inherently subjective and context-dependent. A page is \"weak\" relative to its neighbors; a narrative arc only matters if the docs intend to tell a story. Deterministic tools (broken-link checkers, word counters) can't do this. An LLM reading the full doc set can.

            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#prompt","level":2,"title":"Prompt","text":"

            Paste or adapt the following into a Claude Code session. The agent needs read access to docs/ and the site nav structure.

            Read every file under docs/ (including docs/blog/ and docs/recipes/).\nFor each file, note: title, word count, outbound links, inbound links\n(how many other pages link to it), and a one-line summary of its purpose.\n\nThen produce a report with these sections:\n\n## 1. Weak Dangling Pages\n\nPages that are thin, isolated, or redundant. Signs:\n- Under ~300 words with no unique content (just restates what another page says)\n- Zero or one inbound links (orphaned in the nav)\n- Content that would be stronger merged into an adjacent page\n- \"Try it in 5 minutes\" sections that assume installation already happened\n- Pages whose title doesn't work as a nav entry (too long, too vague)\n\nFor each: identify the page, explain why it's weak, and recommend\nmerge target or deletion.\n\n## 2. Overly Heavy Pages\n\nPages doing too much. Signs:\n- Over ~1500 words with multiple distinct topics\n- More than 4 H2 sections that could stand alone\n- Reader has to scroll past irrelevant content to find what they need\n- Mixed audience (beginner setup + advanced config on same page)\n\nFor each: identify the page, list the distinct topics, and suggest\nsplit points.\n\n## 3. Missing Cross-Links\n\nPlaces where a reader would naturally want to jump to related content\nbut no link exists. Look for:\n- Concepts mentioned but not linked (e.g., \"scratchpad\" without linking\n  to the scratchpad page)\n- Blog posts that describe features without linking to the reference docs\n- Recipes that reference workflows without linking to the relevant\n  getting-started section\n- Pages that end without a \"Next Up\" or \"See Also\" pointer\n\nFor each: source page, anchor text, suggested link target.\n\n## 4. Narrative Gaps\n\nThe docs should tell a coherent story: problem -> install -> first session\n-> daily workflow -> advanced patterns -> contributing. Look for:\n- Gaps in the progression (e.g., no bridge from \"first session\" to\n  \"daily habits\")\n- Blog posts that introduce concepts the reference docs don't cover\n- Recipes that assume knowledge no other page teaches\n- Features documented in CLI reference but missing from workflows/recipes\n\nFor each: describe the gap and suggest what page or section would fill it.\n\n## 5. Blog Cross-Linking Opportunities\n\nBlog posts are often written in isolation. Look for:\n- Posts that cover the same theme but don't reference each other\n- Posts that describe the evolution of a feature (natural \"part 1 / part 2\")\n- Posts that would benefit from a \"Related posts\" footer\n- Thematic clusters that could be linked from a recipe or reference page\n\nFor each: list the posts, the shared theme, and the suggested links.\n\n## Output Format\n\nFor every finding, include:\n- File path (docs/whatever.md)\n- Severity: high (actively confusing), medium (missed opportunity),\n  low (nice to have)\n- Concrete recommendation (merge into X, split at H2 Y, add link to Z)\n\nEnd with a prioritized action list: what to fix first.\n
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#after-the-audit","level":2,"title":"After the Audit","text":"
            1. Triage findings: not everything needs fixing. Focus on high severity.
            2. Merge weak pages first: fewer pages is almost always better.
            3. Add cross-links: cheapest improvement, highest reader impact.
            4. File split decisions in DECISIONS.md: page splits are architectural.
            5. Regenerate the site and spot-check nav after structural changes.
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/docs-semantic-audit/#history","level":2,"title":"History","text":"
            • 2026-02-17: Created after merging docs/re-explaining.md into docs/about.md, which surfaced the pattern of weak standalone pages that dilute rather than add.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Docs Semantic Audit"],"tags":[]},{"location":"operations/runbooks/hub-deployment/","level":1,"title":"Hub Deployment","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#hub-deployment","level":1,"title":"Hub Deployment","text":"

            Linear runbook for setting up a ctx Hub for yourself or a team. Consolidates pieces currently scattered across hub recipes and operations docs.

            When to use: First-time hub setup, or when onboarding a new team onto an existing hub.

            Prerequisites: ctx binary installed, network connectivity between hub and clients.

            Companion docs:

            • Hub overview: what the hub is and is not
            • Hub operations: data directory, systemd, backup, monitoring
            • Hub failure modes: what can go wrong
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"Quick Start (foreground)Production (systemd)
            ctx hub start\n

            See Hub Operations: Systemd Unit for the full unit file.

            sudo systemctl enable --now ctx-hub\n

            The hub creates admin.token on first start. Save this token; it is the only way to register clients.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-2-generate-the-admin-token","level":2,"title":"Step 2: Generate the Admin Token","text":"

            On first start, the hub writes admin.token to the data directory (default ~/.ctx/hub-data/):

            cat ~/.ctx/hub-data/admin.token\n

            This token has full admin privileges. Keep it secret.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-3-register-clients","level":2,"title":"Step 3: Register Clients","text":"

            For each client (person or machine) that will connect:

            # On the hub machine\nctx hub register --name \"volkan-laptop\" --admin-token <admin-token>\n

            This returns a client token. Distribute it securely to the client.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-4-connect-clients","level":2,"title":"Step 4: Connect Clients","text":"

            On each client machine:

            ctx connect <hub-address> --token <client-token>\n

            Verify the connection:

            ctx connection status\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-5-verify-sync","level":2,"title":"Step 5: Verify Sync","text":"

            Push a test entry from one client and verify it arrives:

            # Client A\nctx add learning \"Hub sync test\" --context \"Verifying hub setup\"\n\n# Client B (after a moment)\nctx status   # should show the new learning\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-6-configure-backup","level":2,"title":"Step 6: Configure Backup","text":"

            Set up regular backups of the hub data directory. See Hub Operations: Backup and Restore.

            Minimum:

            # Add to cron\n0 */6 * * * cp ~/.ctx/hub-data/entries.jsonl ~/backups/entries-$(date +\\%F).jsonl\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#step-7-configure-tls-when-available","level":2,"title":"Step 7: Configure TLS (When Available)","text":"

            Coming Soon

            TLS support is planned (H-01/H-02). Until then, run the hub on a trusted network or behind a reverse proxy with TLS termination.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#team-onboarding-checklist","level":2,"title":"Team Onboarding Checklist","text":"

            When adding a new team member to an existing hub:

            • Generate a client token (ctx hub register --name \"<name>\")
            • Share the token and hub address securely
            • Have them run ctx connect <hub-address> --token <token>
            • Verify with ctx connection status
            • Point them to the Hub Getting Started recipe
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#connection-refused","level":3,"title":"\"Connection Refused\"","text":"

            The hub isn't running or the port is wrong. Check:

            ctx hub status          # on the hub machine\nss -tlnp | grep 9900   # default port\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#authentication-failed","level":3,"title":"\"Authentication Failed\"","text":"

            The client token is wrong or was never registered. Re-register:

            ctx hub register --name \"<name>\" --admin-token <admin-token>\n
            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/hub-deployment/#entries-not-syncing","level":3,"title":"Entries Not Syncing","text":"

            Check that the client is listening:

            ctx connection status\n

            If connected but not syncing, check the hub logs for sequence mismatch errors. See Hub Failure Modes for details.

            ","path":["Operations","Runbooks","Hub Deployment"],"tags":[]},{"location":"operations/runbooks/new-contributor/","level":1,"title":"New Contributor","text":"","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#new-contributor-onboarding","level":1,"title":"New Contributor Onboarding","text":"

            Step-by-step onboarding sequence for new contributors. Consolidates setup instructions currently scattered across the README, contributing guide, and setup docs.

            When to use: First-time contributor setup, or when verifying your development environment after a major upgrade.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-1-clone-the-repository","level":2,"title":"Step 1: Clone the Repository","text":"
            git clone https://github.com/ActiveMemory/ctx.git\ncd ctx\n

            Or fork first on GitHub, then clone your fork.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-2-initialize-context","level":2,"title":"Step 2: Initialize Context","text":"
            ctx init\n

            This creates the .context/ directory with knowledge files and the .claude/ directory with agent configuration. If ctx is not yet installed, proceed to Step 3 first, then come back.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-3-build-and-install","level":2,"title":"Step 3: Build and Install","text":"
            make build\nsudo make install\n

            Verify:

            ctx --version\n
            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-4-install-the-plugin-claude-code-users","level":2,"title":"Step 4: Install the Plugin (Claude Code Users)","text":"

            If you use Claude Code, install the plugin from your local clone so skills and hooks reflect your working tree:

            1. Launch claude
            2. Type /plugin and press Enter
            3. Select Marketplaces -> Add Marketplace
            4. Enter the absolute path to your clone (e.g., ~/WORKSPACE/ctx)
            5. Back in /plugin, select Install and choose ctx

            Verify:

            claude /plugin list   # should show ctx\n

            See Contributing: Install the Plugin for details on cache clearing.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-5-switch-to-dev-profile","level":2,"title":"Step 5: Switch to Dev Profile","text":"
            ctx config switch dev\n

            This enables verbose logging and notify events (useful during development).

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-6-verify-hooks","level":2,"title":"Step 6: Verify Hooks","text":"

            Start a Claude Code session and check that hooks fire:

            claude\n

            You should see ctx session hooks (ceremonies reminder, context loading) on session start. If not, check that the plugin is installed correctly (Step 4).

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-7-run-your-first-session","level":2,"title":"Step 7: Run Your First Session","text":"

            In Claude Code:

            /ctx-status\n

            This should show context file health, active tasks, and recent decisions. If it works, your setup is complete.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-8-verify-context-persistence","level":2,"title":"Step 8: Verify Context Persistence","text":"

            End the session and start a new one:

            /ctx-remember\n

            The agent should recall what happened in the previous session. This confirms that context persistence is working end-to-end.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#step-9-run-tests","level":2,"title":"Step 9: Run Tests","text":"
            make test     # unit tests\nmake audit    # full check: fmt + vet + lint + drift + docs + test\n

            All tests should pass with a clean clone.

            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#quick-reference","level":2,"title":"Quick Reference","text":"Task Command Build make build Install sudo make install Test make test Full audit make audit Rebuild docs site make site Serve docs locally make site-serve Clear plugin cache make plugin-reload Switch config profile ctx config switch dev","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/new-contributor/#next-steps","level":2,"title":"Next Steps","text":"
            • Read the contributing guide for project layout, code style, and PR process
            • Check TASKS.md for open work items
            • Ask /ctx-next for suggested work
            ","path":["Operations","Runbooks","New Contributor"],"tags":[]},{"location":"operations/runbooks/plugin-release/","level":1,"title":"Plugin Release","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#plugin-release","level":1,"title":"Plugin Release","text":"

            Plugin-specific release procedure. The general release checklist covers the full ctx release; this runbook covers the plugin-specific steps that are not part of that flow.

            When to use: When releasing plugin changes (new skills, hook updates, permission changes) independently of a ctx binary release, or as a sub-procedure within the full release.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#what-ships-in-the-plugin","level":2,"title":"What Ships in the Plugin","text":"

            The plugin lives at internal/assets/claude/ and includes:

            Component Path What it does Skills internal/assets/claude/skills/ User-facing /ctx-* slash commands Hooks internal/assets/claude/hooks/ Pre/post tool-use hooks Plugin manifest internal/assets/claude/.claude-plugin/plugin.json Declares skills, hooks, version Marketplace .claude-plugin/marketplace.json Points Claude Code to the plugin","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-1-update-hooksjson-if-hooks-changed","level":2,"title":"Step 1: Update hooks.json (If Hooks Changed)","text":"

            If you added, removed, or modified hooks:

            # Verify hook definitions match implementations\nmake audit\n

            Check that plugin.json lists all hooks correctly. Missing hooks silently fail to fire.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-2-bump-version","level":2,"title":"Step 2: Bump Version","text":"

            Update the version in three places:

            • internal/assets/claude/.claude-plugin/plugin.json
            • .claude-plugin/marketplace.json (two fields)
            • editors/vscode/package.json + package-lock.json (if VS Code extension is affected)

            The Release Script Does This

            If you're running make release, the script bumps these automatically from VERSION. Only bump manually if you're releasing the plugin independently.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-3-test-against-a-fresh-install","level":2,"title":"Step 3: Test Against a Fresh Install","text":"
            # Clear cached plugin\nmake plugin-reload\n\n# Restart Claude Code, then:\nclaude /plugin list    # verify version\n

            Test the critical paths:

            • /ctx-status works
            • Session hooks fire (ceremonies, context loading)
            • At least one user-facing skill works end-to-end
            • Pre-tool-use hooks block when they should
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-4-test-against-a-clean-project","level":2,"title":"Step 4: Test Against a Clean Project","text":"

            Create a temporary project to verify the plugin works outside the ctx repo:

            mkdir /tmp/test-ctx-plugin && cd /tmp/test-ctx-plugin\ngit init\nctx init\nclaude   # start a session, verify hooks fire\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-5-verify-skill-count","level":2,"title":"Step 5: Verify Skill Count","text":"

            The plugin manifest declares all user-invocable skills. Verify the count matches:

            # Count skills in plugin.json\njq '.skills | length' internal/assets/claude/.claude-plugin/plugin.json\n\n# Count skill directories\nls -d internal/assets/claude/skills/ctx-*/ | wc -l\n

            These numbers should match (some skills are not user-invocable and won't appear in both counts).

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#step-6-commit-and-tag","level":2,"title":"Step 6: Commit and Tag","text":"

            If releasing independently of a binary release:

            git add internal/assets/claude/ .claude-plugin/\ngit commit -m \"chore: release plugin v0.X.Y\"\ngit tag plugin-v0.X.Y\ngit push origin main --tags\n

            If part of a full release, the release checklist handles this.

            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#troubleshooting","level":2,"title":"Troubleshooting","text":"","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#skills-dont-appear-after-update","level":3,"title":"Skills Don't Appear After Update","text":"

            Claude Code caches plugin files aggressively:

            make plugin-reload    # clears cache\n# restart Claude Code\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#hooks-dont-fire","level":3,"title":"Hooks Don't Fire","text":"

            Check that the hook is registered in plugin.json and that the command it calls exists:

            jq '.hooks' internal/assets/claude/.claude-plugin/plugin.json\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/plugin-release/#version-mismatch","level":3,"title":"Version Mismatch","text":"

            If claude /plugin list shows an old version after updating:

            make plugin-reload\n# restart Claude Code\nclaude /plugin list   # should show new version\n
            ","path":["Operations","Runbooks","Plugin Release"],"tags":[]},{"location":"operations/runbooks/release-checklist/","level":1,"title":"Release Checklist","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release-checklist","level":1,"title":"Release Checklist","text":"

            The canonical pre-release sequence. This runbook ties together the audits, tests, and release steps that are otherwise scattered across docs and the operator's head.

            When to run: Before every release. No exceptions.

            Companion: The /_ctx-release skill automates the tag-and-push portion; this checklist covers everything before and after that automation.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#pre-release","level":2,"title":"Pre-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#1-run-the-codebase-audit","level":3,"title":"1. Run the Codebase Audit","text":"

            Use the codebase audit runbook prompt with your agent. Focus on analyses 1-4 (extractable patterns, documentation drift, maintainability, security). Triage findings into TASKS.md; anything blocking ships before the release.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#2-run-the-docs-semantic-audit","level":3,"title":"2. Run the Docs Semantic Audit","text":"

            Use the docs semantic audit runbook prompt. Fix high-severity findings (weak pages, broken narrative arcs). Medium-severity items can be deferred.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#3-sanitize-permissions","level":3,"title":"3. Sanitize Permissions","text":"

            Follow the sanitize permissions runbook. Clean up .claude/settings.local.json before it gets committed as part of the release.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#4-run-the-full-test-suite","level":3,"title":"4. Run the Full Test Suite","text":"
            make audit    # fmt + vet + lint + drift + docs + test\nmake smoke    # integration smoke tests\n

            All tests must pass. No exceptions.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#5-check-context-health","level":3,"title":"5. Check Context Health","text":"
            ctx drift          # broken references, stale patterns\nctx status         # context file health\n/ctx-link-check    # dead links in docs\n

            Fix anything flagged.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#6-review-tasksmd","level":3,"title":"6. Review TASKS.md","text":"

            Scan for incomplete tasks tagged as release-blocking. Either finish them or explicitly defer with a reason in the task note.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#release","level":2,"title":"Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#7-bump-version","level":3,"title":"7. Bump Version","text":"
            echo \"0.X.0\" > VERSION\ngit add VERSION\ngit commit -m \"chore: bump version to 0.X.0\"\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#8-generate-release-notes","level":3,"title":"8. Generate Release Notes","text":"

            In Claude Code:

            /_ctx-release-notes\n

            Review dist/RELEASE_NOTES.md. Ensure it captures all user-visible changes.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#9-cut-the-release","level":3,"title":"9. Cut the Release","text":"
            make release\n

            Or in Claude Code: /_ctx-release. See Cutting a Release for the full step-by-step.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#post-release","level":2,"title":"Post-Release","text":"","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#10-verify-the-github-release","level":3,"title":"10. Verify the GitHub Release","text":"
            • GitHub Releases shows the new version
            • All 6 binaries are attached
            • SHA256 checksums are attached
            • Release notes render correctly
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#11-update-the-plugin-marketplace","level":3,"title":"11. Update the Plugin Marketplace","text":"

            If the plugin version changed, verify the marketplace entry:

            claude /plugin list   # shows updated version\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#12-announce","level":3,"title":"12. Announce","text":"

            Post in the project's communication channels. Reference the release notes.

            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/release-checklist/#13-clean-up","level":3,"title":"13. Clean Up","text":"
            rm dist/RELEASE_NOTES.md   # consumed by the release script\ngit stash pop              # if you stashed earlier\n
            ","path":["Operations","Runbooks","Release Checklist"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/","level":1,"title":"Sanitize Permissions","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#sanitize-permissions","level":1,"title":"Sanitize Permissions","text":"

            Manual procedure for cleaning up .claude/settings.local.json. The agent may analyze and recommend, but you make every edit.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#why-manual-not-automated","level":2,"title":"Why Manual, Not Automated","text":"

            settings.local.json controls what the agent can do without asking. An agent that can edit its own permission file is a self-escalation vector, especially if the skill is auto-accepted. Keep this manual.

            When to run: After busy sessions where you clicked \"Allow\" many times, weekly hygiene (pair with ctx drift), or before committing .claude/settings.local.json.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-1-snapshot","level":2,"title":"Step 1: Snapshot","text":"
            cp .claude/settings.local.json /tmp/settings-backup-$(date +%Y%m%d).json\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-2-extract-the-allow-list","level":2,"title":"Step 2: Extract the Allow List","text":"
            jq '.permissions.allow[]' .claude/settings.local.json | sort\n

            Eyeball it. You're looking for four categories:

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-3-identify-problems","level":2,"title":"Step 3: Identify Problems","text":"","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#a-garbage-nonsense","level":3,"title":"A. Garbage / Nonsense","text":"

            Entries that are clearly broken or meaningless:

            Bash(done)\nBash(__NEW_LINE_aa838494a90279c4__ echo \"\")\n

            Action: Delete.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#b-one-off-commands-session-debris","level":3,"title":"B. One-Off Commands (Session Debris)","text":"

            Entries with hardcoded paths, literal arguments, or exact commands that were accepted during a specific debugging session:

            Bash(git -C /home/jose/WORKSPACE/ctx log --oneline --all -20)\nBash(/home/jose/WORKSPACE/ctx/ctx add decision \"Use PostgreSQL\" --context ...)\n

            Signs of a one-off:

            • Full absolute paths to specific files
            • Literal string arguments (not wildcards)
            • Very specific flag combinations
            • Commands that look like they came from a single task

            Action: Delete unless you want to promote to a wildcard pattern.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#c-subsumed-entries-redundant","level":3,"title":"C. Subsumed Entries (Redundant)","text":"

            A narrow entry that's already covered by a broader one:

            # Narrow (redundant):\nBash(ctx journal source)\nBash(git -C /home/jose/WORKSPACE/ctx log --oneline -5)\n\n# Broad (already covers the above):\nBash(ctx journal source:*)\nBash(git -C:*)\n

            To find these, look for entries where removing the specific args would match an existing wildcard entry.

            Action: Delete the narrow entry.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#d-duplicate-intent-different-spelling","level":3,"title":"D. Duplicate Intent, Different Spelling","text":"

            Same command with env vars in different order, or slight variations:

            Bash(CGO_ENABLED=0 CTX_SKIP_PATH_CHECK=1 go test:*)\nBash(CTX_SKIP_PATH_CHECK=1 CGO_ENABLED=0 go test:*)\n

            Action: Keep one, delete the other.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-4-check-for-security-concerns","level":2,"title":"Step 4: Check for Security Concerns","text":"

            While you're in here, also flag:

            Pattern Risk Bash(git push:*) Bypasses block-git-push.sh hook Bash(rm -rf:*) Recursive delete, no confirmation Bash(sudo:*) Privilege escalation Bash(echo:*), Bash(cat:*) Can compose into writes to sensitive files Bash(curl:*), Bash(wget:*) Arbitrary network access Any write to .claude/ paths Agent self-modification

            See the /ctx-permission-sanitize skill for the full threat matrix.

            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-5-edit","level":2,"title":"Step 5: Edit","text":"

            Edit .claude/settings.local.json directly in your editor. Remove flagged entries. Keep the JSON valid.

            # Validate JSON after editing\njq . .claude/settings.local.json > /dev/null && echo \"valid\" || echo \"BROKEN\"\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-6-verify","level":2,"title":"Step 6: Verify","text":"
            # Compare before/after\ndiff /tmp/settings-backup-$(date +%Y%m%d).json .claude/settings.local.json\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#step-7-optionally-commit","level":2,"title":"Step 7: Optionally Commit","text":"
            git add .claude/settings.local.json\ngit commit -m \"chore: sanitize agent permissions\"\n
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#asking-the-agent-for-help","level":2,"title":"Asking the Agent for Help","text":"

            You can safely ask the agent to analyze the file:

            \"Look at my settings.local.json and tell me which permissions look like one-offs or are redundant.\"

            The agent can read and report. You do the edits.

            Do not add these to your allow list:

            • Skill(ctx-permission-sanitize)
            • Edit(.claude/settings.local.json)
            • Any Bash(...) pattern that writes to .claude/
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"operations/runbooks/sanitize-permissions/#history","level":2,"title":"History","text":"
            • 2026-02-15: Created as manual-only procedure after deciding against a self-modifying skill.
            • 2026-04-16: Moved from hack/runbooks/ to docs/operations/runbooks/.
            ","path":["Operations","Runbooks","Sanitize Permissions"],"tags":[]},{"location":"recipes/","level":1,"title":"Recipes","text":"

            Workflow recipes combining ctx commands and skills to solve specific problems.

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#getting-started","level":2,"title":"Getting Started","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#guide-your-agent","level":3,"title":"Guide Your Agent","text":"

            How commands, skills, and conversational patterns work together. Train your agent to be proactive through ask, guide, reinforce.

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#setup-across-ai-tools","level":3,"title":"Setup across AI Tools","text":"

            Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf. Includes shell completion, watch mode for non-native tools, and verification.

            Uses: ctx init, ctx setup, ctx agent, ctx completion, ctx watch

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#multilingual-session-parsing","level":3,"title":"Multilingual Session Parsing","text":"

            Parse session journal entries written in other languages. Configure recognized session-header prefixes so the journal pipeline works for Turkish, Japanese, and any other locale.

            Uses: ctx journal source, ctx journal import, session_prefixes in .ctxrc

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#keeping-context-in-a-separate-repo","level":3,"title":"Keeping Context in a Separate Repo","text":"

            Store context files outside the project tree: in a private repo, shared directory, or anywhere else. Useful for open source projects with private context or multi-repo setups.

            Uses: ctx init, CTX_DIR, .ctxrc, /ctx-status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#sessions","level":2,"title":"Sessions","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#the-complete-session","level":3,"title":"The Complete Session","text":"

            Walk through a full ctx session from start to finish:

            • Loading context,
            • Picking what to work on,
            • Committing with context,
            • Capturing, reflecting, and saving a snapshot.

            Uses: ctx status, ctx agent, /ctx-remember, /ctx-next, /ctx-commit, /ctx-reflect

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-ceremonies","level":3,"title":"Session Ceremonies","text":"

            The two bookend rituals for every session: /ctx-remember at the start to load and confirm context, /ctx-wrap-up at the end to review the session and persist learnings, decisions, and tasks.

            Uses: /ctx-remember, /ctx-wrap-up, /ctx-commit, ctx agent, ctx add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#browsing-and-enriching-past-sessions","level":3,"title":"Browsing and Enriching Past Sessions","text":"

            Export your AI session history to a browsable journal site. Enrich entries with metadata and search across months of work.

            Uses: ctx journal source/import, ctx journal site, ctx journal obsidian, ctx serve, /ctx-history, /ctx-journal-enrich, /ctx-journal-enrich-all

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#session-reminders","level":3,"title":"Session Reminders","text":"

            Leave a message for your next session. Reminders surface automatically at session start and repeat until dismissed. Date-gate reminders to surface only after a specific date.

            Uses: ctx remind, ctx remind list, ctx remind dismiss, ctx system check-reminders

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#reviewing-session-changes","level":3,"title":"Reviewing Session Changes","text":"

            See what moved since your last session: context file edits, code commits, directories touched. Auto-detects session boundaries from state markers.

            Uses: ctx change, ctx agent, ctx status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#pausing-context-hooks","level":3,"title":"Pausing Context Hooks","text":"

            Silence all nudge hooks for a quick task that doesn't need ceremony overhead. Session-scoped: Other sessions are unaffected. Security hooks still fire.

            Uses: ctx hook pause, ctx hook resume, /ctx-pause, /ctx-resume

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#knowledge-and-tasks","level":2,"title":"Knowledge and Tasks","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#persisting-decisions-learnings-and-conventions","level":3,"title":"Persisting Decisions, Learnings, and Conventions","text":"

            Record architectural decisions with rationale, capture gotchas and lessons learned, and codify conventions so they survive across sessions and team members.

            Uses: ctx add decision, ctx add learning, ctx add convention, ctx decision reindex, ctx learning reindex, /ctx-decision-add, /ctx-learning-add, /ctx-convention-add, /ctx-reflect

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#tracking-work-across-sessions","level":3,"title":"Tracking Work across Sessions","text":"

            Add, prioritize, complete, snapshot, and archive tasks. Keep TASKS.md focused as your project evolves across dozens of sessions.

            Uses: ctx add task, ctx task complete, ctx task archive, ctx task snapshot, /ctx-task-add, /ctx-archive, /ctx-next

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#using-the-scratchpad","level":3,"title":"Using the Scratchpad","text":"

            Use the encrypted scratchpad for quick notes, working memory, and sensitive values during AI sessions. Natural language in, encrypted storage out.

            Uses: ctx pad, /ctx-pad, ctx pad show, ctx pad edit

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#syncing-scratchpad-notes-across-machines","level":3,"title":"Syncing Scratchpad Notes across Machines","text":"

            Distribute your scratchpad encryption key, push and pull encrypted notes via git, and resolve merge conflicts when two machines edit simultaneously.

            Uses: ctx init, ctx pad, ctx pad resolve, scp

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#bridging-claude-code-auto-memory","level":3,"title":"Bridging Claude Code Auto Memory","text":"

            Mirror Claude Code's auto memory (MEMORY.md) into .context/ for version control, portability, and drift detection. Import entries into structured context files with heuristic classification.

            Uses: ctx memory sync, ctx memory status, ctx memory diff, ctx memory import, ctx memory publish, ctx system check-memory-drift

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hooks-and-notifications","level":2,"title":"Hooks and Notifications","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-output-patterns","level":3,"title":"Hook Output Patterns","text":"

            Choose the right output pattern for your Claude Code hooks: VERBATIM relay for user-facing reminders, hard gates for invariants, agent directives for nudges, and five more patterns across the spectrum.

            Uses: ctx plugin hooks, settings.local.json

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#customizing-hook-messages","level":3,"title":"Customizing Hook Messages","text":"

            Customize what hooks say without changing what they do. Override the QA gate for Python (pytest instead of make lint), silence noisy ceremony nudges, or tailor post-commit instructions for your stack.

            Uses: ctx hook message list, ctx hook message show, ctx hook message edit, ctx hook message reset

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hook-sequence-diagrams","level":3,"title":"Hook Sequence Diagrams","text":"

            Mermaid sequence diagrams for every system hook: entry conditions, state reads, output, throttling, and exit points. Includes throttling summary table and state file reference.

            Uses: All ctx system hooks

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#auditing-system-hooks","level":3,"title":"Auditing System Hooks","text":"

            The 12 system hooks that run invisibly during every session: what each one does, why it exists, and how to verify they're actually firing. Covers webhook-based audit trails, log inspection, and detecting silent hook failures.

            Uses: ctx system, ctx hook notify, .context/logs/, .ctxrc notify.events

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#webhook-notifications","level":3,"title":"Webhook Notifications","text":"

            Get push notifications when loops complete, hooks fire, or agents hit milestones. Webhook URL is encrypted: never stored in plaintext. Works with IFTTT, Slack, Discord, ntfy.sh, or any HTTP endpoint.

            Uses: ctx hook notify setup, ctx hook notify test, ctx hook notify --event, .ctxrc notify.events

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#configuration-profiles","level":3,"title":"Configuration Profiles","text":"

            Switch between dev and base runtime configurations without editing .ctxrc by hand. Verbose logging and webhooks for debugging, clean defaults for normal sessions.

            Uses: ctx config switch, ctx config status, /ctx-config

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#maintenance","level":2,"title":"Maintenance","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#detecting-and-fixing-drift","level":3,"title":"Detecting and Fixing Drift","text":"

            Keep context files accurate by detecting structural drift (stale paths, missing files, stale file ages) and task staleness.

            Uses: ctx drift, ctx sync, ctx compact, ctx status, /ctx-drift, /ctx-status, /ctx-prompt-audit

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#state-directory-maintenance","level":3,"title":"State Directory Maintenance","text":"

            Clean up session tombstones from .context/state/. Prune old per-session files, identify stale global markers, and keep the state directory lean.

            Uses: ctx prune

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#troubleshooting","level":3,"title":"Troubleshooting","text":"

            Diagnose hook failures, noisy nudges, stale context, and configuration issues. Start with ctx doctor for a structural health check, then use /ctx-doctor for agent-driven analysis of event patterns.

            Uses: ctx doctor, ctx hook event, /ctx-doctor

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#claude-code-permission-hygiene","level":3,"title":"Claude Code Permission Hygiene","text":"

            Keep .claude/settings.local.json clean: recommended safe defaults, what to never pre-approve, and a maintenance workflow for cleaning up session debris.

            Uses: ctx init, /ctx-drift, /ctx-permission-sanitize, ctx permission snapshot, ctx permission restore

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#permission-snapshots","level":3,"title":"Permission Snapshots","text":"

            Capture a known-good permission baseline as a golden image, then restore at session start to automatically drop session-accumulated permissions.

            Uses: ctx permission snapshot, ctx permission restore, /ctx-permission-sanitize

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#turning-activity-into-content","level":3,"title":"Turning Activity into Content","text":"

            Generate blog posts from project activity, write changelog posts from commit ranges, and publish a browsable journal site from your session history.

            The output is generic Markdown, but the skills are tuned for the ctx-style blog artifacts you see on this website.

            Uses: ctx journal site, ctx journal obsidian, ctx serve, ctx journal import, /ctx-blog, /ctx-blog-changelog, /ctx-journal-enrich

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#importing-claude-code-plans","level":3,"title":"Importing Claude Code Plans","text":"

            Import Claude Code plan files (~/.claude/plans/*.md) into specs/ as permanent project specs. Filter by date, select interactively, and optionally create tasks referencing each imported spec.

            Uses: /ctx-plan-import, /ctx-task-add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#design-before-coding","level":3,"title":"Design Before Coding","text":"

            Front-load design with a four-skill chain: brainstorm the approach, spec the design, task the work, implement step-by-step. Each step produces an artifact that feeds the next.

            Uses: /ctx-brainstorm, /ctx-spec, /ctx-task-add, /ctx-implement, /ctx-decision-add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#scrutinizing-a-plan","level":3,"title":"Scrutinizing a Plan","text":"

            Once a plan exists, run an adversarial interview to surface what's weak, missing, or unexamined before you commit. Walks the plan depth-first: assumptions, failure modes, alternatives, sequencing, reversibility. The complement to brainstorm: brainstorm produces plans, this attacks them.

            Uses: /ctx-plan, /ctx-spec, /ctx-decision-add, /ctx-learning-add

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#agents-and-automation","level":2,"title":"Agents and Automation","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#building-project-skills","level":3,"title":"Building Project Skills","text":"

            Encode repeating workflows into reusable skills the agent loads automatically. Covers the full cycle: identify a pattern, create the skill, test with realistic prompts, and iterate until it triggers correctly.

            Uses: /ctx-skill-create, ctx init

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#running-an-unattended-ai-agent","level":3,"title":"Running an Unattended AI Agent","text":"

            Set up a loop where an AI agent works through tasks overnight without you at the keyboard, using ctx for persistent memory between iterations.

            This recipe shows how ctx supports long-running agent loops without losing context or intent.

            Uses: ctx init, ctx loop, ctx watch, ctx load, /ctx-loop, /ctx-implement

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#when-to-use-a-team-of-agents","level":3,"title":"When to Use a Team of Agents","text":"

            Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

            This recipe covers the file overlap test, when teams make things worse, and what ctx provides at each level.

            Uses: /ctx-worktree, /ctx-next, ctx status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#parallel-agent-development-with-git-worktrees","level":3,"title":"Parallel Agent Development with Git Worktrees","text":"

            Split a large backlog across 3-4 agents using git worktrees, each on its own branch and working directory. Group tasks by file overlap, work in parallel, merge back.

            Uses: /ctx-worktree, /ctx-next, git worktree, git merge

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#architecture-deep-dive","level":3,"title":"Architecture Deep Dive","text":"

            Three-pass pipeline for understanding a codebase: map what exists, enrich with code intelligence, then hunt for where it will silently fail. Produces architecture docs, quantified dependency data, and ranked failure hypotheses.

            Uses: /ctx-architecture, /ctx-architecture-enrich, /ctx-architecture-failure-analysis

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#writing-steering-files","level":3,"title":"Writing Steering Files","text":"

            Tell your AI assistant how to behave with rule-based prompt injection that fires automatically when prompts match a description. Walks through scaffolding a steering file, previewing matches, and syncing to each AI tool's native format.

            Uses: ctx steering add, ctx steering preview, ctx steering list, ctx steering sync

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#authoring-lifecycle-triggers","level":3,"title":"Authoring Lifecycle Triggers","text":"

            Run executable shell scripts at session-start, pre-tool-use, file-save, and other lifecycle events. Script-based automation (complementary to steering's rule-based prompts), with a security-first workflow: scaffold disabled, test with mock input, enable only after review.

            Uses: ctx trigger add, ctx trigger test, ctx trigger enable, ctx trigger disable, ctx trigger list

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#hub","level":2,"title":"Hub","text":"","path":["Recipes"],"tags":[]},{"location":"recipes/#hub-overview","level":3,"title":"Hub Overview","text":"

            Mental model and three user stories for the ctx Hub. What flows, what doesn't, and when not to use it. Read this before any of the other Hub recipes.

            Uses: ctx hub, ctx connection, ctx add --share

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-getting-started","level":3,"title":"ctx Hub: Getting Started","text":"

            Stand up a single-node hub on localhost, register two projects, publish a decision from one, and watch it appear in the other. End-to-end in under five minutes.

            Uses: ctx hub start, ctx connection register, ctx connection subscribe, ctx connection sync, ctx connection listen, ctx add --share, ctx agent --include-hub

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#personal-cross-project-brain","level":3,"title":"Personal Cross-Project Brain","text":"

            Story 1 day-to-day workflow: one developer, many projects, one hub on localhost. Records a learning in project A, watches it show up automatically in project B. Walks through a realistic day of using the hub as passive infrastructure (no manual sync, no git push, no ceremony).

            Uses: ctx add --share, ctx connection subscribe, ctx agent --include-hub

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#team-knowledge-bus","level":3,"title":"Team Knowledge Bus","text":"

            Story 2 day-to-day workflow: a small trusted team sharing decisions, learnings, and conventions via a hub on an internal server. Covers the team publishing culture, what belongs on the hub vs. local, token management, and the social rules that make a shared knowledge stream stay signal-rich.

            Uses: ctx add --share, ctx connection status, ctx connection subscribe, ctx hub status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-multi-machine","level":3,"title":"ctx Hub: Multi-Machine","text":"

            Run the hub on a LAN host as a daemon and connect from project directories on other workstations. Firewall guidance, TLS via a reverse proxy, and safe daemon restart semantics.

            Uses: ctx hub start --daemon, ctx hub stop, ctx connection register, ctx connection status

            ","path":["Recipes"],"tags":[]},{"location":"recipes/#ctx-hub-ha-cluster","level":3,"title":"ctx Hub: HA Cluster","text":"

            Raft-based leader election across three or more nodes for redundancy. Covers bootstrap, runtime peer management, graceful stepdown, and the Raft-lite durability caveat.

            Uses: ctx hub start --peers, ctx hub status, ctx hub peer add/remove, ctx hub stepdown

            ","path":["Recipes"],"tags":[]},{"location":"recipes/activating-context/","level":1,"title":"Activating a Context Directory","text":"","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#the-problem","level":2,"title":"The Problem","text":"

            You ran a ctx command and got:

            Error: no context directory specified for this project\n

            This means ctx doesn't know which .context/ directory to operate on. It will not guess, and it will not walk up from your current working directory looking for one; that behavior was removed deliberately, because silent inference was the source of several bugs (stray agent-created directories, cross-project bleed-through, webhook-route misrouting, sub-agent fragmentation). Every ctx command requires you to declare the target directory explicitly.

            This page shows you the three ways to do that and when to use each.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#tldr","level":2,"title":"TL;DR","text":"

            If the project has already been initialized and you just need to bind it for your shell:

            eval \"$(ctx activate)\"\n

            That's 95% of the time. Add it to .zshrc / .bashrc per project with direnv, or run it once per terminal.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#when-you-see-the-error","level":2,"title":"When You See the Error","text":"

            The exact error message depends on how many .context/ directories are visible from the current directory:

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#zero-candidates","level":3,"title":"Zero Candidates","text":"
            Error: no context directory specified for this project\n

            Either you haven't initialized this project yet (run ctx init) or you're in a directory that doesn't belong to a ctx-tracked project. If you know the project lives elsewhere, use one of the declaration methods below with its absolute path.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#one-candidate","level":3,"title":"One Candidate","text":"
            Error: no context directory specified; a likely candidate is at\n    /Users/you/repos/myproject/.context\n

            ctx found a single .context/ on the way up from here but won't bind to it automatically. Run eval \"$(ctx activate)\" and ctx will emit the export for the candidate. Or set CTX_DIR by hand.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#multiple-candidates","level":3,"title":"Multiple Candidates","text":"
            Error: no context directory specified; multiple candidates visible:\n  /Users/you/repos/myproject/.context\n  /Users/you/repos/myproject/packages/web/.context\n

            You're inside nested projects. Pick the one you mean:

            ctx activate /Users/you/repos/myproject/.context\n# …copy and paste the `export` line it prints, or wrap in eval:\neval \"$(ctx activate /Users/you/repos/myproject/.context)\"\n
            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#three-ways-to-declare","level":2,"title":"Three Ways to Declare","text":"","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#1-ctx-activate-recommended-for-shells","level":3,"title":"1. ctx activate (Recommended for Shells)","text":"

            ctx activate emits a shell-native export CTX_DIR=... line to stdout. Wrap it in eval and the binding takes effect for the current shell:

            # Walk up from current dir and bind the single visible candidate:\neval \"$(ctx activate)\"\n\n# Bind a specific path explicitly:\neval \"$(ctx activate /abs/path/to/.context)\"\n\n# Clear the binding:\neval \"$(ctx deactivate)\"\n

            ctx activate validates paths strictly: the target must exist, be a directory, and contain at least one canonical context file (CONSTITUTION.md or TASKS.md). It refuses to emit for multiple upward candidates; pick one explicitly in that case.

            Under the hood, the emitted line is just:

            export CTX_DIR='/abs/path/to/.context'\n

            So you can copy it into your .zshrc / .bashrc if you want the binding permanent for a given shell setup. Better: use direnv with a per-project .envrc.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#2-ctx_dir-env-var","level":3,"title":"2. CTX_DIR Env Var","text":"

            If you already know the path, export it directly:

            export CTX_DIR=/abs/path/to/.context\nctx status\n

            CTX_DIR is the same variable ctx activate writes; activate is just a convenience that figures out the path for you.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#3-inline-one-shot","level":3,"title":"3. Inline One-Shot","text":"

            For one-shot commands (CI jobs, scripts, debugging a specific project without changing your shell state), prefix the binding inline:

            CTX_DIR=/abs/path/to/.context ctx status\n

            This binds CTX_DIR for that invocation only.

            CTX_DIR must be an absolute path with .context as its basename. Relative paths and other names are rejected on first use; the basename guard catches the common footgun (export CTX_DIR=$(pwd)) before stray writes can leak to the project root.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#for-ci-and-scripts","level":2,"title":"For CI and Scripts","text":"

            Do not rely on shell activation in automated flows. Set CTX_DIR explicitly at the top of the script:

            #!/usr/bin/env bash\nset -euo pipefail\n\nexport CTX_DIR=\"$GITHUB_WORKSPACE/.context\"\nctx status\nctx drift\n
            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#for-claude-code-users","level":2,"title":"For Claude Code Users","text":"

            The ctx plugin's hooks are generated with CTX_DIR=\"$CLAUDE_PROJECT_DIR/.context\" prefixed to each command, so hook-driven ctx invocations resolve correctly without any per-session setup. You only need to activate manually when running ctx yourself in a terminal.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#one-project-one-context","level":2,"title":"One Project, One .context/","text":"

            The context directory is not a free-floating bag of files. It is pinned to a project by contract: filepath.Dir(ContextDir()) is the project root. That parent directory is what ctx sync, ctx drift, and the memory-drift hook scan for code, secret files, and MEMORY.md respectively.

            The practical consequences:

            • Don't share one .context/ across multiple projects. It holds per-project journals, per-session state, and per-project secrets. Pointing two codebases at the same directory corrupts all three.
            • If you want to share knowledge (CONSTITUTION, CONVENTIONS, ARCHITECTURE) across projects, use ctx hub. It cherry-picks entries at the right granularity and keeps the per-project bits where they belong.
            • The CTX_DIR you activate is implicitly a project-root declaration. Setting CTX_DIR=/weird/place/.context means you're telling ctx the project root is /weird/place/. That's your call to make; ctx does not police it.
            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#recommended-layout","level":3,"title":"Recommended Layout","text":"
            ~/WORKSPACE/my-to-do-list\n  ├── .git\n  ├── .context          ← owned by this project; do not share\n  ├── ideas\n  │   └── ...\n  ├── Makefile\n  ├── Makefile.ctx\n  └── specs\n      └── ...\n

            .context/ sits at the project root, next to .git. ctx activate binds to it; every ctx subsystem reads the project from its parent.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/activating-context/#why-not-walk-up-automatically","level":2,"title":"Why Not Walk Up Automatically?","text":"

            Nested projects, submodules, rogue agent-created .context/ directories, and sub-agent sessions all produced silent misrouting under the old walk-up model. See the explicit-context-dir spec and the analysis doc for the full reasoning.

            The short version: ctx decided to stop guessing and require the caller to declare. Every other decision flows from there.

            ","path":["Recipes","Getting Started","Activating a Context Directory"],"tags":[]},{"location":"recipes/architecture-deep-dive/","level":1,"title":"Architecture Deep Dive","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-problem","level":2,"title":"The Problem","text":"

            Understanding a codebase at the surface level is easy. Understanding where it will break under real-world conditions takes three passes: mapping what exists, quantifying how it connects, and hunting for where it silently fails. Most teams stop at the first pass.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tldr","level":2,"title":"TL;DR","text":"
            # Pass 1: Map the system\n/ctx-architecture\n\n# Pass 2: Enrich with code intelligence\n/ctx-architecture-enrich\n\n# Pass 3: Hunt for failure modes\n/ctx-architecture-failure-analysis\n

            Each pass builds on the previous one. Run them in order. The output accumulates in .context/; each pass reads the prior artifacts and extends them.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-architecture Skill Map modules, dependencies, data flow, patterns /ctx-architecture-enrich Skill Verify blast radius and flows with code intel /ctx-architecture-failure-analysis Skill Generate falsifiable incident hypotheses ctx drift CLI Detect stale paths and broken references ctx status CLI Quick structural overview","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-1-map-what-exists","level":3,"title":"Pass 1: Map What Exists","text":"
            /ctx-architecture\n

            Produces:

            • ARCHITECTURE.md: succinct project map (< 4000 tokens), loaded at every session start
            • DETAILED_DESIGN*.md: deep per-module reference with exported API, data flow, danger zones, extension points
            • CHEAT-SHEETS.md: lifecycle flow diagrams
            • map-tracking.json: coverage state with confidence scores

            This pass forces deep code reading. No shortcuts, no code intelligence tools; the agent reads every module it analyzes. That forced reading is what makes the subsequent passes useful.

            When to run: First time on a codebase, or after significant structural changes (new packages, moved files, changed dependencies).

            Principal mode: Add principal to get strategic analysis (ARCHITECTURE-PRINCIPAL.md, DANGER-ZONES.md from P4):

            /ctx-architecture principal\n
            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-2-enrich-with-code-intelligence","level":3,"title":"Pass 2: Enrich with Code Intelligence","text":"
            /ctx-architecture-enrich\n

            Takes the Pass 1 artifacts as baseline and layers on verified, graph-backed data from GitNexus:

            • Blast radius numbers for key functions
            • Execution flow traces through hot paths
            • Domain clustering validation
            • Registration site discovery

            This pass does not replace reading; it quantifies what reading found. If Pass 1 says \"module X depends on module Y,\" Pass 2 says \"module X has 47 callers in module Y, and changing function Z would affect 12 downstream consumers.\"

            When to run: After Pass 1, when you need quantified confidence for refactoring decisions or risk assessment.

            Requires: GitNexus MCP server connected.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#pass-3-hunt-for-failure-modes","level":3,"title":"Pass 3: Hunt for Failure Modes","text":"
            /ctx-architecture-failure-analysis\n

            The adversarial pass. Reads all prior artifacts, then systematically hunts for correctness bugs across 9 failure categories:

            1. Concurrency (races, deadlocks, goroutine leaks)
            2. Ordering assumptions (init, registration, shutdown)
            3. Cache staleness (TTL-less, read-your-writes, cross-process)
            4. Fan-out amplification (N+1, retry storms)
            5. Ownership and lifecycle (orphans, double-close)
            6. Error handling (silent swallowing, partial failure)
            7. Scaling cliffs (quadratic, unbounded, global locks)
            8. Idempotency failures (duplicate processing, retry mutations)
            9. State machine drift (illegal states, unvalidated transitions)

            Every finding must meet an evidence standard: code path, trigger, failure path, silence reason, and code evidence. A mandatory challenge phase attempts to disprove each finding before it is accepted. Findings carry a confidence level (High/Medium/Low) and explicit risk score.

            Produces DANGER-ZONES.md, a ranked inventory of findings split into Critical and Elevated tiers.

            When to run: Before releases, after major refactors, when investigating incident categories, or when onboarding.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#what-you-get","level":2,"title":"What You Get","text":"

            After all three passes, .context/ contains:

            File From Purpose ARCHITECTURE.md Pass 1 System map (session-start context) DETAILED_DESIGN*.md Pass 1 Module-level deep reference CHEAT-SHEETS.md Pass 1 Lifecycle flow diagrams map-tracking.json Pass 1 Coverage and confidence data CONVERGENCE-REPORT.md Pass 1 What's covered, what's not DANGER-ZONES.md Pass 3 Ranked failure hypotheses

            Pass 2 enriches Pass 1 artifacts in-place rather than creating new files.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#tips","level":2,"title":"Tips","text":"
            • Run Pass 1 with focus areas if the codebase is large. The skill asks what to go deep on, so name the modules you're about to change.
            • You don't need all three passes every time. Pass 1 is the foundation. Pass 2 and 3 are for when you need quantified confidence or adversarial rigor.
            • Re-run Pass 1 incrementally. It tracks coverage in map-tracking.json and only re-analyzes stale modules.
            • Pass 3 is most valuable before releases. The ranked DANGER-ZONES.md is a pre-release checklist.
            • The trilogy maps to a question progression: How does it work? How well does it connect? Where will it break?
            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/architecture-deep-dive/#see-also","level":2,"title":"See Also","text":"

            See also: Detecting and Fixing Context Drift to keep architecture artifacts fresh between deep-dive sessions.

            See also: Detecting and Fixing Context Drift for structural checks that complement architecture analysis.

            ","path":["Architecture Deep Dive"],"tags":[]},{"location":"recipes/autonomous-loops/","level":1,"title":"Running an Unattended AI Agent","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-problem","level":2,"title":"The Problem","text":"

            You have a project with a clear list of tasks, and you want an AI agent to work through them autonomously: overnight, unattended, without you sitting at the keyboard.

            Each iteration needs to remember what the previous one did, mark tasks as completed, and know when to stop.

            Without persistent memory, every iteration starts fresh and the loop collapses. With ctx, each iteration can pick up where the last one left off, but only if the agent persists its context as part of the work.

            Unattended operation works because the agent treats context persistence as a first-class deliverable, not an afterthought.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                                    # 1. init context\n# Edit TASKS.md with phased work items\nctx loop --tool claude --max-iterations 10  # 2. generate loop.sh\n./loop.sh 2>&1 | tee /tmp/loop.log &        # 3. run the loop\nctx watch --log /tmp/loop.log               # 4. process context updates\n# Next morning:\nctx status && ctx load                      # 5. review the results\n

            Read on for permissions, isolation, and completion signals.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init Command Initialize project context and prompt templates ctx loop Command Generate the loop shell script ctx watch Command Monitor AI output and persist context updates ctx load Command Display assembled context (for debugging) /ctx-loop Skill Generate loop script from inside Claude Code /ctx-implement Skill Execute a plan step-by-step with verification","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-1-initialize-for-unattended-operation","level":3,"title":"Step 1: Initialize for Unattended Operation","text":"

            Start by creating a .context/ directory configured so the agent can work without human input.

            ctx init\n

            This creates .context/ with the template files (including a loop prompt at .context/loop.md), and seeds Claude Code permissions in .claude/settings.local.json. Install the ctx plugin for hooks and skills.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-2-populate-tasksmd-with-phased-work","level":3,"title":"Step 2: Populate TASKS.md with Phased Work","text":"

            Open .context/TASKS.md and organize your work into phases. The agent works through these systematically, top to bottom, using priority tags to break ties.

            # Tasks\n\n## Phase 1: Foundation\n\n- [ ] Set up project structure and build system `#priority:high`\n- [ ] Configure testing framework `#priority:high`\n- [ ] Create CI pipeline `#priority:medium`\n\n## Phase 2: Core Features\n\n- [ ] Implement user registration `#priority:high`\n- [ ] Add email verification `#priority:high`\n- [ ] Create password reset flow `#priority:medium`\n\n## Phase 3: Hardening\n\n- [ ] Add rate limiting to API endpoints `#priority:medium`\n- [ ] Improve error messages `#priority:low`\n- [ ] Write integration tests `#priority:medium`\n

            Phased organization matters because it gives the agent natural boundaries. Phase 1 tasks should be completable without Phase 2 code existing yet.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-3-configure-the-loop-prompt","level":3,"title":"Step 3: Configure the Loop Prompt","text":"

            The loop prompt at .context/loop.md instructs the agent to operate autonomously:

            1. Read .context/CONSTITUTION.md first (hard rules, never violated)
            2. Load context from .context/ files
            3. Pick one task per iteration
            4. Complete the task and update context files
            5. Commit changes (including .context/)
            6. Signal status with a completion signal

            You can customize .context/loop.md for your project. The critical parts are the one-task-per-iteration discipline, proactive context persistence, and completion signals at the end:

            ## Signal Status\n\nEnd your response with exactly ONE of:\n\n* `SYSTEM_CONVERGED`: All tasks in `TASKS.md` are complete (*this is the\n  signal the loop script detects by default*)\n* `SYSTEM_BLOCKED`: Cannot proceed, need human input (explain why)\n* (*no signal*): More work remains, continue to the next iteration\n\nNote: the loop script only checks for `SYSTEM_CONVERGED` by default.\n`SYSTEM_BLOCKED` is a convention for the human reviewing the log.\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-4-configure-permissions","level":3,"title":"Step 4: Configure Permissions","text":"

            An unattended agent needs permission to use tools without prompting. By default, Claude Code asks for confirmation on file writes, bash commands, and other operations, which stops the loop and waits for a human who is not there.

            There are two approaches.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-a-explicit-allowlist-recommended","level":4,"title":"Option A: Explicit Allowlist (Recommended)","text":"

            Grant only the permissions the agent needs. In .claude/settings.local.json:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(make:*)\",\n      \"Bash(go:*)\",\n      \"Bash(git:*)\",\n      \"Bash(ctx:*)\",\n      \"Read\",\n      \"Write\",\n      \"Edit\"\n    ]\n  }\n}\n

            Adjust the Bash patterns for your project's toolchain. The agent can run make, go, git, and ctx commands but cannot run arbitrary shell commands.

            This is recommended even in sandboxed environments because it limits blast radius.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#option-b-skip-all-permission-checks","level":4,"title":"Option B: Skip All Permission Checks","text":"

            Claude Code supports a --dangerously-skip-permissions flag that disables all permission prompts:

            claude --dangerously-skip-permissions -p \"$(cat .context/loop.md)\"\n

            This Flag Means What It Says

            With --dangerously-skip-permissions, the agent can execute any shell command, write to any file, and make network requests without confirmation.

            Only use this on a sandboxed machine: ideally a virtual machine with no access to host credentials, no SSH keys, and no access to production systems.

            If you would not give an untrusted intern sudo on this machine, do not use this flag.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#enforce-isolation-at-the-os-level","level":4,"title":"Enforce Isolation at the OS Level","text":"

            The only controls an agent cannot override are the ones enforced by the operating system, the container runtime, or the hypervisor.

            Do Not Skip This Section

            This is not optional hardening:

            An unattended agent with unrestricted OS access is an unattended shell with unrestricted OS access.

            The allowlist above is a strong first layer, but do not rely on a single runtime boundary.

            For unattended runs, enforce isolation at the infrastructure level:

            Layer What to enforce User account Run the agent as a dedicated unprivileged user with no sudo access and no membership in privileged groups (docker, wheel, adm). Filesystem Restrict the project directory via POSIX permissions or ACLs. The agent should have no access to other users' files or system directories. Container Run inside a Docker/Podman sandbox. Mount only the project directory. Drop capabilities (--cap-drop=ALL). Disable network if not needed (--network=none). Never mount the Docker socket and do not run privileged containers. Prefer rootless containers. Virtual machine Prefer a dedicated VM with no shared folders, no host passthrough, and no keys to other machines. Network If the agent does not need the internet, disable outbound access entirely. If it does, restrict to specific domains via firewall rules. Resource limits Apply CPU, memory, and disk limits (cgroups/container limits). A runaway loop should not fill disk or consume all RAM. Self-modification Make instruction files read-only. CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md should not be writable by the agent user. If using project-local hooks, protect those too.

            A minimal Docker setup for overnight runs:

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh 2>&1 | tee /tmp/loop.log\n

            Defense in Depth

            Use multiple layers together: OS-level isolation (the boundary the agent cannot cross), a permission allowlist (what Claude Code will do within that boundary), and CONSTITUTION.md (a soft nudge for the common case).

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-5-generate-the-loop-script","level":3,"title":"Step 5: Generate the Loop Script","text":"

            Use ctx loop to generate a loop.sh tailored to your AI tool:

            # Generate for Claude Code with a 10-iteration cap\nctx loop --tool claude --max-iterations 10\n\n# Generate for Aider\nctx loop --tool aider --max-iterations 10\n\n# Custom prompt file and output filename\nctx loop --tool claude --prompt my-prompt.md --output my-loop.sh\n

            The generated script reads .context/loop.md, runs the tool, checks for completion signals, and loops until done or the cap is reached.

            You can also use the /ctx-loop skill from inside Claude Code.

            A Shell Loop Is the Best Practice

            The shell loop approach spawns a fresh AI process each iteration, so the only state that carries between iterations is what lives in .context/ and git.

            Claude Code's built-in /loop runs iterations within the same session, which can allow context window state to leak between iterations. This can be convenient for short runs, but it is less reliable for unattended loops.

            See Shell Loop vs Built-in Loop for details.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-6-run-with-watch-mode","level":3,"title":"Step 6: Run with Watch Mode","text":"

            Open two terminals. In the first, run the loop. In the second, run ctx watch to process context updates from the AI output.

            # Terminal 1: Run the loop\n./loop.sh 2>&1 | tee /tmp/loop.log\n\n# Terminal 2: Watch for context updates\nctx watch --log /tmp/loop.log\n

            The watch command parses XML context-update commands from the AI output and applies them:

            <context-update type=\"complete\">user registration</context-update>\n<context-update type=\"learning\"\n  context=\"Setting up user registration\"\n  lesson=\"Email verification needs SMTP configured\"\n  application=\"Add SMTP setup to deployment checklist\"\n>SMTP Requirement</context-update>\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-7-completion-signals-end-the-loop","level":3,"title":"Step 7: Completion Signals End the Loop","text":"

            The generated script checks for one completion signal per run. By default this is SYSTEM_CONVERGED. You can change it with the --completion flag:

            ctx loop --tool claude --completion BOOTSTRAP_COMPLETE --max-iterations 5\n

            The following signals are conventions used in .context/loop.md:

            Signal Convention How the script handles it SYSTEM_CONVERGED All tasks in TASKS.md are done Detected by default (--completion default value) SYSTEM_BLOCKED Agent cannot proceed Only detected if you set --completion to this BOOTSTRAP_COMPLETE Initial scaffolding done Only detected if you set --completion to this

            The script uses grep -q on the agent's output, so any string works as a signal. If you need to detect multiple signals in one run, edit the generated loop.sh to add additional grep checks.

            When you return in the morning, check the log and the context files:

            tail -100 /tmp/loop.log\nctx status\nctx load\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#step-8-use-ctx-implement-for-plan-execution","level":3,"title":"Step 8: Use /ctx-implement for Plan Execution","text":"

            Within each iteration, the agent can use /ctx-implement to execute multi-step plans with verification between steps. This is useful for complex tasks that touch multiple files.

            The skill breaks a plan into atomic, verifiable steps:

            Step 1/6: Create user model .................. OK\nStep 2/6: Add database migration ............. OK\nStep 3/6: Implement registration handler ..... OK\nStep 4/6: Write unit tests ................... OK\nStep 5/6: Run test suite ..................... FAIL\n  -> Fixed: missing test dependency\n  -> Re-verify ............................... OK\nStep 6/6: Update TASKS.md .................... OK\n

            Each step is verified (build, test, syntax check) before moving to the next.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A typical overnight run:

            ctx init\n# Edit TASKS.md and .context/loop.md\n\nctx loop --tool claude --max-iterations 20\n\n./loop.sh 2>&1 | tee /tmp/loop.log &\nctx watch --log /tmp/loop.log\n\n# Next morning:\nctx status\nctx load\n
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#why-autonomous-loops-work-proactive-context-persistence","level":2,"title":"Why Autonomous Loops Work: Proactive Context Persistence","text":"

            The autonomous loop pattern works because the agent persists context as part of the job.

            Without proactive persistence, the loop degrades into disconnected iterations that repeat work, contradict decisions, and lose track of progress. The agent cannot rely on a human to prompt it. It must treat context updates as part of every task, not as a separate step.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-agent-playbook-self-check-drives-autonomy","level":3,"title":"The Agent Playbook Self-Check Drives Autonomy","text":"

            The Agent Playbook defines a self-check prompt the agent applies continuously:

            \"If this session ended right now, would the next session know what happened?\"\n

            In interactive sessions, a human might remind the agent to save context. When the agent runs unattended, nobody is there to remind it.

            The self-check becomes the trigger: after meaningful work, the agent evaluates whether the context files reflect reality and updates them immediately if they do not.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#what-the-agent-does-proactively-between-iterations","level":3,"title":"What the Agent Does Proactively between Iterations","text":"

            At milestones within an iteration, the agent persists without waiting for instructions:

            Milestone What the agent does proactively Completes a task Marks it [x] in TASKS.md Discovers a gotcha Adds it to LEARNINGS.md Makes a design choice Records it in DECISIONS.md with rationale Identifies follow-up work Creates new tasks in TASKS.md with #added timestamp Hits an unexpected failure Documents the root cause before moving on","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#example-what-proactive-persistence-looks-like","level":3,"title":"Example: What Proactive Persistence Looks Like","text":"

            Here is what the agent does during a single iteration beyond writing code:

            Iteration 4:\n  1. Read TASKS.md -> pick \"Add email verification\" (#priority:high)\n  2. Add #started:2026-01-25-030012 to the task\n  3. Implement the feature (code, tests, docs if needed)\n  4. Tests pass -> mark task [x]\n  5. Add learning: \"SMTP config must be set before verification handler registers. Order matters in init().\"\n  6. Add decision: \"Use token-based verification links (not codes) because links work better in automated tests.\"\n  7. Create follow-up task: \"Add rate limiting to verification endpoint\" #added:...\n  8. Commit all changes including `.context/`\n  9. No signal emitted -> loop continues to iteration 5\n

            Steps 2, 4, 5, 6, and 7 are proactive context persistence:

            The agent was not asked to do any of them.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#context-persistence-at-milestones","level":3,"title":"Context Persistence at Milestones","text":"

            For long autonomous runs, the agent persists context at natural boundaries, often at phase transitions or after completing a cluster of related tasks. It updates TASKS.md, DECISIONS.md, and LEARNINGS.md as it goes.

            If the loop crashes at 4 AM, the context files tell you exactly where to resume. You can also use ctx journal source to review the session transcripts.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#the-persistence-contract","level":3,"title":"The Persistence Contract","text":"

            The autonomous loop has an implicit contract:

            1. Every iteration reads context: TASKS.md, DECISIONS.md, LEARNINGS.md
            2. Every iteration writes context: task updates, new learnings, decisions
            3. Every commit includes .context/ so the next iteration sees changes
            4. Context stays current: if the loop stopped right now, nothing important is lost

            Break any part of this contract and the loop degrades.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#tips","level":2,"title":"Tips","text":"

            Markdown Is Not Enforcement

            Your real guardrails are permissions and isolation, not Markdown. CONSTITUTION.md can nudge the agent, but it is probabilistic.

            The permission allowlist and OS isolation are deterministic:

            For unattended runs, trust the sandbox and the allowlist, not the prose.

            • Start with a small iteration cap. Use --max-iterations 5 on your first run.
            • Keep tasks atomic. Each task should be completable in a single iteration.
            • Check signal discipline. If the loop runs forever, the agent is not emitting SYSTEM_CONVERGED or SYSTEM_BLOCKED. Make the signal requirement explicit in .context/loop.md.
            • Commit after context updates. Finish code, update .context/, commit including .context/, then signal.
            • Set up webhook notifications to get notified when the loop completes, hits max iterations, or when hooks fire nudges. The generated loop script includes ctx hook notify calls automatically.
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#next-up","level":2,"title":"Next Up","text":"

            When to Use a Team of Agents →: Decision framework for choosing between a single agent, parallel worktrees, and a full agent team.

            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/autonomous-loops/#see-also","level":2,"title":"See Also","text":"
            • Autonomous Loops: loop pattern, prompt templates, troubleshooting
            • CLI Reference: ctx loop: flags and options
            • CLI Reference: ctx watch: watch mode details
            • CLI Reference: ctx init: init flags
            • The Complete Session: interactive workflow
            • Tracking Work Across Sessions: structuring TASKS.md
            ","path":["Recipes","Agents and Automation","Running an Unattended AI Agent"],"tags":[]},{"location":"recipes/building-skills/","level":1,"title":"Building Project Skills","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-problem","level":2,"title":"The Problem","text":"

            You have workflows your agent needs to repeat across sessions: a deploy checklist, a review protocol, a release process. Each time, you re-explain the steps. The agent gets it mostly right but forgets edge cases you corrected last time.

            Skills solve this by encoding domain knowledge into a reusable document the agent loads automatically when triggered. A skill is not code - it is a structured prompt that captures what took you sessions to learn.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-skill-create\n

            The skill-creator walks you through: identify a repeating workflow, draft a skill, test with realistic prompts, iterate until it triggers correctly and produces good output.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-skill-create Skill Interactive skill creation and improvement workflow ctx init Command Deploys template skills to .claude/skills/ on first setup","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-1-identify-a-repeating-pattern","level":3,"title":"Step 1: Identify a Repeating Pattern","text":"

            Good skill candidates:

            • Checklists you repeat: deploy steps, release prep, code review
            • Decisions the agent gets wrong: if you keep correcting the same behavior, encode the correction
            • Multi-step workflows: anything with a sequence of commands and conditional branches
            • Domain knowledge: project-specific terminology, architecture constraints, or conventions the agent cannot infer from code alone

            Not good candidates: one-off instructions, things the platform already handles (file editing, git operations), or tasks too narrow to reuse.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-2-create-the-skill","level":3,"title":"Step 2: Create the Skill","text":"

            Invoke the skill-creator:

            You: \"I want a skill for our deploy process\"\n\nAgent: [Asks about the workflow: what steps, what tools,\n        what edge cases, what the output should look like]\n

            Or capture a workflow you just did:

            You: \"Turn what we just did into a skill\"\n\nAgent: [Extracts the steps from conversation history,\n        confirms understanding, drafts the skill]\n

            The skill-creator produces a SKILL.md file in .claude/skills/your-skill/.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-3-test-with-realistic-prompts","level":3,"title":"Step 3: Test with Realistic Prompts","text":"

            The skill-creator proposes 2-3 test prompts - the kind of thing a real user would say. It runs each one and shows the result alongside a baseline (same prompt without the skill) so you can compare.

            Agent: \"Here are test prompts I'd try:\n        1. 'Deploy to staging'\n        2. 'Ship the hotfix'\n        3. 'Run the release checklist'\n        Want to adjust these?\"\n
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-4-iterate-on-the-description","level":3,"title":"Step 4: Iterate on the Description","text":"

            The description field in frontmatter determines when a skill triggers. Claude tends to undertrigger - descriptions need to be specific and slightly \"pushy\":

            # Weak - too vague, will undertrigger\ndescription: \"Use for deployments\"\n\n# Strong - covers situations and synonyms\ndescription: >-\n  Use when deploying to staging or production, running the release\n  checklist, or when the user says 'ship it', 'deploy this', or\n  'push to prod'. Also use after merging to main when a deploy\n  is expected.\n

            The skill-creator helps you tune this iteratively.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#step-5-deploy-as-template-optional","level":3,"title":"Step 5: Deploy as Template (Optional)","text":"

            If the skill should be available to all projects (not just this one), place it in internal/assets/claude/skills/ so ctx init deploys it to new projects automatically.

            Most project-specific skills stay in .claude/skills/ and travel with the repo.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#skill-anatomy","level":2,"title":"Skill Anatomy","text":"
            my-skill/\n  SKILL.md         # Required: frontmatter + instructions (<500 lines)\n  scripts/         # Optional: deterministic code the skill can execute\n  references/      # Optional: detail loaded on demand (not always)\n  assets/          # Optional: output templates, not loaded into context\n

            Key sections in SKILL.md:

            Section Purpose Required? Frontmatter Name, description (trigger) Yes When to Use Positive triggers Yes When NOT to Use Prevents false activations Yes Process Steps and commands Yes Examples Good/bad output pairs Recommended Quality Checklist Verify before reporting completion For complex skills","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#tips","level":2,"title":"Tips","text":"
            • Description is everything. A great skill with a vague description never fires. Spend time on trigger coverage - synonyms, concrete situations, edge cases.
            • Stay under 500 lines. If your skill is growing past this, move detail into references/ files and point to them from SKILL.md.
            • Do not duplicate the platform. If the agent already knows how to do something (edit files, run git commands), do not restate it. Tag paragraphs as Expert/Activation/Redundant and delete Redundant ones.
            • Explain why, not just what. \"Sort by date because users want recent results first\" beats \"ALWAYS sort by date.\" The agent generalizes from reasoning better than from rigid rules.
            • Test negative triggers. Make sure the skill does not fire on unrelated prompts. A skill that activates too broadly becomes noise.
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#next-up","level":2,"title":"Next Up","text":"

            Parallel Agent Development with Git Worktrees ->: Split work across multiple agents using git worktrees.

            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/building-skills/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: full listing of all bundled and project-local skills
            • Guide Your Agent: how commands, skills, and conversational patterns work together
            • Design Before Coding: the four-skill chain for front-loading design work
            ","path":["Recipes","Agents and Automation","Building Project Skills"],"tags":[]},{"location":"recipes/claude-code-permissions/","level":1,"title":"Claude Code Permission Hygiene","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code's .claude/settings.local.json controls what the agent can do without asking. Over time, this file accumulates one-off permissions from individual sessions: Exact commands with hardcoded paths, duplicate entries, and stale skill references.

            A noisy \"allowlist\" makes it harder to spot dangerous permissions and increases the surface area for unintended behavior.

            Since settings.local.json is .gitignored, it drifts independently of your codebase. There is no PR review, no CI check: just whatever you clicked \"Allow\" on.

            This recipe shows what a well-maintained permission file looks like and how to keep it clean.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                            # seeds safe defaults\n/ctx-drift                          # detects missing/stale permissions\n/ctx-permission-sanitize               # audits for dangerous patterns\n

            See Recommended Defaults for the full list.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Populates default ctx permissions /ctx-drift Detects missing or stale permission entries /ctx-permission-sanitize Audits for dangerous patterns (security-focused)","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#recommended-defaults","level":2,"title":"Recommended Defaults","text":"

            After running ctx init, your settings.local.json will have the ctx defaults pre-populated. Here is an opinionated safe starting point for a Go project using ctx:

            {\n  \"permissions\": {\n    \"allow\": [\n      \"Bash(/tmp/ctx-*:*)\",\n      \"Bash(CGO_ENABLED=0 go build:*)\",\n      \"Bash(CGO_ENABLED=0 go test:*)\",\n      \"Bash(ctx:*)\",\n      \"Bash(git add:*)\",\n      \"Bash(git branch:*)\",\n      \"Bash(git check-ignore:*)\",\n      \"Bash(git checkout:*)\",\n      \"Bash(git commit:*)\",\n      \"Bash(git diff:*)\",\n      \"Bash(git log:*)\",\n      \"Bash(git remote:*)\",\n      \"Bash(git restore:*)\",\n      \"Bash(git show:*)\",\n      \"Bash(git stash:*)\",\n      \"Bash(git status:*)\",\n      \"Bash(git tag:*)\",\n      \"Bash(go build:*)\",\n      \"Bash(go fmt:*)\",\n      \"Bash(go test:*)\",\n      \"Bash(go vet:*)\",\n      \"Bash(golangci-lint run:*)\",\n      \"Bash(grep:*)\",\n      \"Bash(ls:*)\",\n      \"Bash(make:*)\",\n      \"Skill(ctx-convention-add)\",\n      \"Skill(ctx-decision-add)\",\n      \"Skill(ctx-learning-add)\",\n      \"Skill(ctx-task-add)\",\n      \"Skill(ctx-agent)\",\n      \"Skill(ctx-archive)\",\n      \"Skill(ctx-blog)\",\n      \"Skill(ctx-blog-changelog)\",\n      \"Skill(absorb)\",\n      \"Skill(ctx-commit)\",\n      \"Skill(ctx-drift)\",\n      \"Skill(ctx-implement)\",\n      \"Skill(ctx-journal-enrich)\",\n      \"Skill(ctx-journal-enrich-all)\",\n      \"Skill(ctx-loop)\",\n      \"Skill(ctx-next)\",\n      \"Skill(ctx-pad)\",\n      \"Skill(ctx-prompt-audit)\",\n      \"Skill(ctx-history)\",\n      \"Skill(ctx-reflect)\",\n      \"Skill(ctx-remember)\",\n      \"Skill(ctx-status)\",\n      \"Skill(ctx-worktree)\",\n      \"WebSearch\"\n    ],\n    \"deny\": [\n      \"Bash(sudo *)\",\n      \"Bash(git push *)\",\n      \"Bash(git push)\",\n      \"Bash(rm -rf /*)\",\n      \"Bash(rm -rf ~*)\",\n      \"Bash(curl *)\",\n      \"Bash(wget *)\",\n      \"Bash(chmod 777 *)\",\n      \"Read(**/.env)\",\n      \"Read(**/.env.*)\",\n      \"Read(**/*credentials*)\",\n      \"Read(**/*secret*)\",\n      \"Read(**/*.pem)\",\n      \"Read(**/*.key)\",\n      \"Edit(**/.env)\",\n      \"Edit(**/.env.*)\"\n    ]\n  }\n}\n

            This Is a Starting Point, Not a Mandate

            Your project may need more or fewer entries.

            The goal is intentional permissions: Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#design-principles","level":3,"title":"Design Principles","text":"

            Use wildcards for trusted binaries: If you trust the binary (your own project's CLI, make, go), a single wildcard like Bash(ctx:*) beats twenty subcommand entries. It reduces noise and means new subcommands work without re-prompting.

            Keep git commands granular: Unlike ctx or make, git has both safe commands (git log, git status) and destructive ones (git reset --hard, git clean -f). Listing safe commands individually prevents accidentally pre-approving dangerous ones.

            Pre-approve all ctx- skills: Skills shipped with ctx (Skill(ctx-*)) are safe to pre-approve. They are part of your project and you control their content. This prevents the agent from prompting on every skill invocation.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#default-deny-rules","level":3,"title":"Default Deny Rules","text":"

            ctx init automatically populates permissions.deny with rules that block dangerous operations. Deny rules are evaluated before allow rules: A denied pattern always prompts the user, even if it also matches an allow entry.

            The defaults block:

            Pattern Why Bash(sudo *) Cannot enter password; will hang Bash(git push *) Must be explicit user action Bash(rm -rf /*) etc. Recursive delete of system/home directories Bash(curl *) / wget Arbitrary network requests Bash(chmod 777 *) World-writable permissions Read/Edit(**/.env*) Secrets and credentials Read(**/*.pem, *.key) Private keys

            Read/Edit Deny Rules

            Read() and Edit() deny rules have known upstream enforcement issues (claude-code#6631,#24846).

            They are included as defense-in-depth and intent documentation.

            Blocked by default deny rules: no action needed, ctx init handles these:

            Pattern Risk Bash(git push:*) Must be explicit user action Bash(sudo:*) Privilege escalation Bash(rm -rf:*) Recursive delete with no confirmation Bash(curl:*) / Bash(wget:*) Arbitrary network requests

            Requires manual discipline: Never add these to allow:

            Pattern Risk Bash(git reset:*) Can discard uncommitted work Bash(git clean:*) Deletes untracked files Skill(ctx-permission-sanitize) Edits this file: self-modification vector Skill(release) Runs the release pipeline: high impact","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#hooks-regex-safety-net","level":2,"title":"Hooks: Regex Safety Net","text":"

            Deny rules handle prefix-based blocking natively. Hooks complement them by catching patterns that require regex matching: Things deny rules can't express.

            The ctx plugin ships these blocking hooks:

            Hook What it blocks ctx system block-non-path-ctx Running ctx from wrong path

            Project-local hooks (not part of the plugin) catch regex edge cases:

            Hook What it blocks block-dangerous-commands.sh Mid-command sudo/git push (after &&), copies to bin dirs, absolute-path ctx

            Pre-Approved + Hook-Blocked = Silent Block

            If you pre-approve a command that a hook blocks, the user never sees the confirmation dialog. The agent gets a block response and must handle it, which is confusing.

            It's better not to pre-approve commands that hooks are designed to intercept.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#the-maintenance-workflow","level":2,"title":"The Maintenance Workflow","text":"","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#after-busy-sessions","level":3,"title":"After Busy Sessions","text":"

            Permissions accumulate fastest during debugging and exploration sessions. After a session where you clicked \"Allow\" many times:

            1. Open .claude/settings.local.json in your editor;
            2. Look for entries at the bottom of the allowlist (new entries append there);
            3. Delete anything that looks session-specific:
              • Exact commands with hardcoded paths,
              • Commands with literal string arguments,
              • Entries that duplicate an existing wildcard.

            See the Sanitize Permissions runbook for a step-by-step procedure.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#periodically","level":3,"title":"Periodically","text":"

            Run /ctx-drift to catch permission drift:

            • Missing Bash(ctx:*) wildcard;
            • Missing Skill(ctx-*) entries for installed skills;
            • Stale Skill(ctx-*) entries for removed skills;
            • Granular Bash(ctx <subcommand>:*) entries that should be consolidated.

            Run /ctx-permission-sanitize to catch security issues:

            • Hook bypass patterns
            • Destructive commands
            • Overly broad permissions
            • Injection vectors
            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#when-adding-new-skills","level":3,"title":"When Adding New Skills","text":"

            If you create a custom ctx-* skill, add its Skill() entry to the allowlist manually.

            ctx init only populates the default permissions: It won't pick up custom skills.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#golden-image-snapshots","level":3,"title":"Golden Image Snapshots","text":"

            If manual cleanup is too tedious, use a golden image to automate it:

            Snapshot a curated permission set, then restore at session start to automatically drop session-accumulated permissions. See the Permission Snapshots recipe for the full workflow.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#adapting-for-other-languages","level":2,"title":"Adapting for Other Languages","text":"

            The recommended defaults above are Go-specific. For other stacks, swap the build/test tooling:

            Node.js / TypeScript:

            \"Bash(npm run:*)\",\n\"Bash(npm test:*)\",\n\"Bash(npx:*)\",\n\"Bash(node:*)\"\n

            Python:

            \"Bash(pytest:*)\",\n\"Bash(python:*)\",\n\"Bash(pip show:*)\",\n\"Bash(ruff:*)\"\n

            Rust:

            \"Bash(cargo build:*)\",\n\"Bash(cargo test:*)\",\n\"Bash(cargo clippy:*)\",\n\"Bash(cargo fmt:*)\"\n

            The ctx, git, and skill entries remain the same across all stacks.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#next-up","level":2,"title":"Next Up","text":"

            Permission Snapshots →: Save and restore permission baselines for reproducible setups.

            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/claude-code-permissions/#see-also","level":2,"title":"See Also","text":"
            • Setting Up ctx Across AI Tools: full setup recipe including settings.local.json creation
            • Context Health: keeping .context/ files accurate
            • Sanitize Permissions runbook: manual cleanup procedure
            ","path":["Recipes","Maintenance","Claude Code Permission Hygiene"],"tags":[]},{"location":"recipes/configuration-profiles/","level":1,"title":"Configuration Profiles","text":"","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#configuration-profiles","level":1,"title":"Configuration Profiles","text":"

            Switch between dev and base runtime configurations without editing .ctxrc by hand. Useful when you want verbose logging and webhook notifications during development, then clean defaults for normal sessions.

            Uses: ctx config switch, ctx config status, /ctx-config

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#how-it-works","level":2,"title":"How It Works","text":"

            The ctx repo ships two source profiles committed to git:

            File Profile Description .ctxrc.base base All defaults, notifications off .ctxrc.dev dev Verbose logging, webhook notifications on

            The working copy (.ctxrc) is gitignored. Switching profiles copies the source file over .ctxrc, so your runtime configuration is always a clean snapshot of one of the two sources.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#switching-profiles","level":2,"title":"Switching Profiles","text":"
            # Switch to dev (verbose logging, notifications)\nctx config switch dev\n\n# Switch to base (defaults)\nctx config switch base\n\n# Toggle to the opposite profile\nctx config switch\n\n# \"prod\" is an alias for \"base\"\nctx config switch prod\n

            The detection heuristic checks for an uncommented notify: line in .ctxrc: present means dev, absent means base.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#checking-the-active-profile","level":2,"title":"Checking the Active Profile","text":"
            ctx config status\n

            Output examples:

            active: dev (verbose logging enabled)\nactive: base (defaults)\nactive: none (.ctxrc does not exist)\n
            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#typical-workflow","level":2,"title":"Typical Workflow","text":"
            1. Start of a debugging session: switch to dev for verbose logging and webhook notifications so you can trace hook activity and get push alerts.
            ctx config switch dev\n
            1. Work through the issue: hooks log verbosely, webhooks fire on key events (commits, ceremony nudges, drift warnings).

            2. Done debugging: switch back to base to silence the noise.

            ctx config switch base\n
            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#customizing-profiles","level":2,"title":"Customizing Profiles","text":"

            Edit the source files directly:

            • .ctxrc.dev: add any .ctxrc keys you want active during development (e.g., log_level: debug, notify.events, notify.webhook_url).
            • .ctxrc.base: keep this minimal. It represents your \"production\" defaults.

            After editing a source file, re-run ctx config switch <profile> to apply the changes to the working copy.

            Commit Your Profiles

            Both .ctxrc.base and .ctxrc.dev should be committed to git so team members share the same profile definitions. The working copy .ctxrc stays gitignored.

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/configuration-profiles/#using-the-skill","level":2,"title":"Using the Skill","text":"

            In a Claude Code session, say any of:

            • \"switch to dev mode\"
            • \"switch to base\"
            • \"what profile am I on?\"
            • \"toggle verbose logging\"

            The /ctx-config skill handles the rest.

            See also: ctx config reference, Configuration

            ","path":["Recipes","Maintenance","Configuration Profiles"],"tags":[]},{"location":"recipes/context-health/","level":1,"title":"Detecting and Fixing Drift","text":"","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-problem","level":2,"title":"The Problem","text":"

            ctx files drift: you rename a package, delete a module, or finish a sprint, and suddenly ARCHITECTURE.md references paths that no longer exist, TASKS.md is 80 percent completed checkboxes, and CONVENTIONS.md describes patterns you stopped using two months ago.

            Stale context is worse than no context:

            An AI tool that trusts outdated references will hallucinate confidently.

            This recipe shows how to detect drift, fix it, and keep your .context/ directory lean and accurate.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tldr","level":2,"title":"TL;DR","text":"
            ctx drift                      # detect problems\nctx drift --fix                # auto-fix the easy ones\nctx sync --dry-run && ctx sync # reconcile after refactors\nctx compact --archive          # archive old completed tasks\nctx fmt                        # normalize line widths\nctx status                     # verify\n

            Or just ask your agent: \"Is our context clean?\"

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx drift Command Detect stale paths, missing files, violations ctx drift --fix Command Auto-fix simple issues ctx sync Command Reconcile context with codebase structure ctx compact Command Archive completed tasks, clean up empty sections ctx fmt Command Normalize context files to 80-char line width ctx status Command Quick health overview /ctx-drift Skill Structural plus semantic drift detection /ctx-architecture Skill Refresh ARCHITECTURE.md from actual codebase /ctx-status Skill In-session context summary /ctx-prompt-audit Skill Audit prompt quality and token efficiency","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#the-workflow","level":2,"title":"The Workflow","text":"

            The best way to maintain context health is conversational: Ask your agent, guide it, and let it detect problems, explain them, and fix them with your approval. CLI commands exist for CI pipelines, scripting, and fine-grained control.

            For day-to-day maintenance, talk to your agent.

            Your Questions Reinforce the Pattern

            Asking \"is our context clean?\" does two things:

            • It triggers a drift check right now
            • It reinforces the habit

            This is reinforcement, not enforcement.

            Do not wait for the agent to be proactive on its own:

            Guide your agent, especially in early sessions.

            Over time, you will ask less and the agent will start offering more.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-1-ask-your-agent","level":3,"title":"Step 1: Ask Your Agent","text":"

            The simplest way to check context health:

            Is our context clean?\nAnything stale?\nHow healthy are our context files?\n

            Or invoke the skill directly:

            /ctx-drift\n

            The agent performs two layers of analysis:

            Layer 1, structural checks (via ctx drift): Dead paths, missing files, completed task counts, constitution violations. Fast and programmatic.

            Layer 2, semantic analysis (agent-driven): Does CONVENTIONS.md describe patterns the code no longer follows? Does DECISIONS.md contain entries whose rationale no longer applies? Are there learnings about bugs that are now fixed? This is where the agent adds value the CLI cannot: It reads both context files and source code and compares them.

            The agent reports both layers together, explains each finding in plain language, and offers to fix what it can.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-2-maintenance-at-session-start","level":3,"title":"Step 2: Maintenance at Session Start","text":"

            You do not need to ask explicitly.

            Using Claude Code

            ctx ships with Claude Code hooks that remind the agent at the right time to take initiative.

            Checking context health at the session start, offering to persist learnings before you quit, and flagging drift when it matters. The agent stays proactive without you having to prompt it:

            Agent: Good morning. I've loaded the context files. A few things\n       before we start:\n\n       - ARCHITECTURE.md references `pkg/auth/` which is now empty\n       - DECISIONS.md hasn't been updated in 40 days\n       - There are 18 completed tasks ready for archival\n\n       Want me to run a quick maintenance pass, or should we jump\n       straight into today's work?\n

            ☝️️ this is what persistent, initiative-driven sessions feel like when context is treated as a system instead of a prompt.

            If the agent does not offer this on its own, a gentle nudge is enough:

            Anything stale before we start?\nHow's the context looking?\n

            This turns maintenance from a scheduled chore into a conversation that happens when it matters.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-3-real-time-detection-during-work","level":3,"title":"Step 3: Real-Time Detection during Work","text":"

            Agents can notice drift while working: When a mismatch is directly in the path of their current task. If an agent reads ARCHITECTURE.md to find where to add a handler and internal/handlers/ doesn't exist, it will notice because the stale reference blocks its work:

            Agent: ARCHITECTURE.md references `internal/handlers/` but that directory\n       doesn't exist. I'll look at the actual source tree to find where\n       handlers live now.\n

            This happens reliably when the drift intersects the task. What is less reliable is the agent generalizing from one mismatch to \"there might be more stale references; let me run drift detection\" That leap requires the agent to know /ctx-drift exists and to decide the current task should pause for maintenance.

            If you want that behavior, reinforce it:

            Good catch. Yes, run /ctx-drift and clean up any other stale references.\n

            Over time, agents that have seen this pattern will start offering proactively. But do not expect it from a cold start.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#step-4-archival-and-cleanup","level":3,"title":"Step 4: Archival and Cleanup","text":"

            ctx drift detects when TASKS.md has more than 10 completed items and flags it as a staleness warning. Running ctx drift --fix archives completed tasks automatically.

            You can also run /ctx-archive to compact on demand.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#knowledge-health-flow","level":3,"title":"Knowledge Health Flow","text":"

            Over time, LEARNINGS.md and DECISIONS.md accumulate entries that overlap or partially repeat each other. The check-persistence hook detects when entry counts exceed a configurable threshold and surfaces a nudge:

            \"LEARNINGS.md has 25+ entries. Consider running /ctx-consolidate to merge overlapping items.\"

            The consolidation workflow:

            1. Review: /ctx-consolidate groups entries by keyword similarity and presents candidate merges for your approval.
            2. Merge: Approved groups are combined into single entries that preserve the key information from each original.
            3. Archive: Originals move to .context/archive/, not deleted -- the full history is preserved in git and the archive directory.
            4. Verify: Run ctx drift after consolidation to confirm no cross-references were broken by the merge.

            This replaces ad-hoc cleanup with a repeatable, nudge-driven cycle: detect accumulation, review candidates, merge with approval, archive originals.

            See also: Knowledge Capture for the recording workflow that feeds into this maintenance cycle.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-doctor-the-superset-check","level":2,"title":"ctx doctor: The Superset Check","text":"

            ctx doctor combines drift detection with hook auditing, configuration checks, event logging status, and token size reporting in a single command. If you want one command that covers structural health, hooks, and state:

            ctx doctor          # everything in one pass\nctx doctor --json   # machine-readable for scripting\n

            Use /ctx-doctor Too

            For agent-driven diagnosis that adds semantic analysis on top of the structural checks, use /ctx-doctor.

            See the Troubleshooting recipe for the full workflow.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#cli-reference","level":2,"title":"CLI Reference","text":"

            The conversational approach above uses CLI commands under the hood. When you need direct control, use the commands directly.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift","level":3,"title":"ctx drift","text":"

            Scan context files for structural problems:

            ctx drift\n

            Sample output:

            Drift Report\n============\n\nWarnings (3):\n  ARCHITECTURE.md:14  path \"internal/api/router.go\" does not exist\n  ARCHITECTURE.md:28  path \"pkg/auth/\" directory is empty\n  CONVENTIONS.md:9    path \"internal/handlers/\" not found\n\nViolations (1):\n  TASKS.md            31 completed tasks (recommend archival)\n\nStaleness:\n  DECISIONS.md        last modified 45 days ago\n  LEARNINGS.md        last modified 32 days ago\n\nExit code: 1 (warnings found)\n
            Level Meaning Action Warning Stale path references, missing files Fix or remove Violation Constitution rule heuristic failures, heavy clutter Fix soon Staleness Files not updated recently Review content

            Exit codes: 0 equals clean, 1 equals warnings, 3 equals violations.

            For CI integration:

            ctx drift --json | jq '.warnings | length'\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-drift-fix","level":3,"title":"ctx drift --fix","text":"

            Auto-fix mechanical issues:

            ctx drift --fix\n

            This handles removing dead path references, updating unambiguous renames, clearing empty sections. Issues requiring judgment are flagged but left for you.

            Run ctx drift again afterward to confirm what remains.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-sync","level":3,"title":"ctx sync","text":"

            After a refactor, reconcile context with the actual codebase structure:

            ctx sync --dry-run   # preview first\nctx sync             # apply\n

            ctx sync scans for structural changes, compares with ARCHITECTURE.md, checks for new dependencies worth documenting, and identifies context referring to code that no longer exists.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-compact","level":3,"title":"ctx compact","text":"

            Consolidate completed tasks and clean up empty sections:

            ctx compact            # move completed tasks to Completed section,\n                       # remove empty sections\nctx compact --archive  # also archive old tasks to .context/archive/\n
            • Tasks: moves completed items (with all subtasks done) into the Completed section of TASKS.md
            • All files: removes empty sections left behind
            • With --archive: writes tasks older than 7 days to .context/archive/tasks-YYYY-MM-DD.md

            Without --archive, nothing is deleted: Tasks are reorganized in place.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-fmt","level":3,"title":"ctx fmt","text":"

            Normalize context file line widths:

            ctx fmt              # wrap long lines to 80 chars\nctx fmt --check      # CI: exit 1 if files need formatting\n

            Long task descriptions, decision rationale, and learning entries accumulate as single-line entries. ctx fmt wraps them at word boundaries with 2-space continuation indent for list items. Headings, tables, and comments are preserved.

            Idempotent: safe to run repeatedly.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-status","level":3,"title":"ctx status","text":"

            Quick health overview:

            ctx status --verbose\n

            Shows file counts, token estimates, modification times, and drift warnings in a single glance.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

            Checks whether your context files are readable, compact, and token-efficient for the model.

            /ctx-prompt-audit\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Conversational approach (recommended):

            Is our context clean?  -> agent runs structural plus semantic checks\nFix what you can       -> agent auto-fixes and proposes edits\nArchive the done tasks -> agent runs ctx compact --archive\nHow's token usage?     -> agent checks ctx status\n

            CLI approach (for CI, scripts, or direct control):

            ctx drift                      # 1. Detect problems\nctx drift --fix                # 2. Auto-fix the easy ones\nctx sync --dry-run && ctx sync # 3. Reconcile after refactors\nctx compact --archive          # 4. Archive old completed tasks\nctx fmt                        # 5. Normalize line widths\nctx status                     # 6. Verify\n
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#tips","level":2,"title":"Tips","text":"

            Agents cross-reference context files with source code during normal work. When drift intersects their current task, they will notice: a renamed package, a deleted directory, a path that doesn't resolve. But they rarely generalize from one mismatch to a full audit on their own. Reinforce the pattern: when an agent mentions a stale reference, ask it to run /ctx-drift. Over time, it starts offering.

            When an agent says \"this reference looks stale,\" it is usually right.

            Semantic drift is more damaging than structural drift: ctx drift catches dead paths. But CONVENTIONS.md describing a pattern your code stopped following three weeks ago is worse. When you ask \"is our context clean?\", the agent can do both checks.

            Use ctx status as a quick check: It shows file counts, token estimates, and drift warnings in a single glance. Good for a fast \"is everything ok?\" before diving into work.

            Drift detection in CI: add ctx drift --json to your CI pipeline and fail on exit code 3 (violations). This catches constitution-level problems before they reach upstream.

            Do not over-compact: Completed tasks have historical value. The --archive flag preserves them in .context/archive/ so you can search past work without cluttering active context.

            Sync is cautious by default: Use --dry-run after large refactors, then apply.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#next-up","level":2,"title":"Next Up","text":"

            Claude Code Permission Hygiene →: Recommended permission defaults and maintenance workflow for Claude Code.

            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/context-health/#see-also","level":2,"title":"See Also","text":"
            • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
            • Tracking Work Across Sessions: task lifecycle and archival
            • Persisting Decisions, Learnings, and Conventions: keeping knowledge files current
            • The Complete Session: where maintenance fits in the daily workflow
            • CLI Reference: full flag documentation for all commands
            • Context Files: structure and purpose of each .context/ file
            ","path":["Recipes","Maintenance","Detecting and Fixing Drift"],"tags":[]},{"location":"recipes/customizing-hook-messages/","level":1,"title":"Customizing Hook Messages","text":"","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-problem","level":2,"title":"The Problem","text":"

            ctx hooks speak ctx's language, not your project's. The QA gate says \"lint the ENTIRE project\" and \"make build,\" but your Python project uses pytest and ruff. The post-commit nudge suggests running lints, but your project uses npm test. You could remove the hook entirely, but then you lose the logic (counting, state tracking, adaptive frequency) just to change the words.

            How do you customize what hooks say without removing what they do?

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tldr","level":2,"title":"TL;DR","text":"
            ctx hook message list                     # see all hooks and their messages\nctx hook message show qa-reminder gate    # view the current template\nctx hook message edit qa-reminder gate    # copy default to .context/ for editing\nctx hook message reset qa-reminder gate   # revert to embedded default\n
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx hook message list CLI command Show all hook messages with category and override status ctx hook message show CLI command Print the effective message template ctx hook message edit CLI command Copy embedded default to .context/ for editing ctx hook message reset CLI command Delete user override, revert to default","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#how-it-works","level":2,"title":"How It Works","text":"

            Hook messages use a 3-tier fallback:

            1. User override: .context/hooks/messages/{hook}/{variant}.txt
            2. Embedded default: compiled into the ctx binary
            3. Hardcoded fallback: belt-and-suspenders safety net

            The hook logic (when to fire, counting, state tracking, cooldowns) is unchanged. Only the content (what text gets emitted) comes from the template. You customize what the hook says without touching how it decides to speak.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#finding-the-original-templates","level":3,"title":"Finding the Original Templates","text":"

            The default templates live in the ctx source tree at:

            internal/assets/hooks/messages/{hook}/{variant}.txt\n

            You can also browse them on GitHub: internal/assets/hooks/messages/

            Or use ctx hook message show to print any template without digging through source code:

            ctx hook message show qa-reminder gate        # QA gate instructions\nctx hook message show check-persistence nudge  # persistence nudge\nctx hook message show post-commit nudge        # post-commit reminder\n

            The show output includes the template source and available variables -- everything you need to write a replacement.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables","level":3,"title":"Template Variables","text":"

            Some messages use Go text/template variables for dynamic content:

            No context files updated in {{.PromptsSinceNudge}}+ prompts.\nHave you discovered learnings, made decisions,\nestablished conventions, or completed tasks\nworth persisting?\n

            The show and edit commands list available variables for each message. When writing a replacement, keep the same {{.VariableName}} placeholders to preserve dynamic content. Variables that you omit render as <no value>: no error, but the output may look odd.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#intentional-silence","level":3,"title":"Intentional Silence","text":"

            An empty template file (0 bytes or whitespace-only) means \"don't emit a message\". The hook still runs its logic but produces no output. This lets you silence specific messages without removing the hook from hooks.json.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-python-project-qa-gate","level":2,"title":"Example: Python Project QA Gate","text":"

            The default QA gate says \"lint the ENTIRE project\" and references make lint. For a Python project, you want pytest and ruff:

            # See the current default\nctx hook message show qa-reminder gate\n\n# Copy it to .context/ for editing\nctx hook message edit qa-reminder gate\n\n# Edit the override\n

            Replace the content in .context/hooks/messages/qa-reminder/gate.txt:

            HARD GATE! DO NOT COMMIT without completing ALL of these steps first:\n(1) Run the full test suite: pytest -x\n(2) Run the linter: ruff check .\n(3) Verify a clean working tree\nRun tests and linter BEFORE every git commit, no exceptions.\n

            The hook still fires on every Edit call. The logic is identical. Only the instructions changed.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-silencing-ceremony-nudges","level":2,"title":"Example: Silencing Ceremony Nudges","text":"

            The ceremony check nudges you to use /ctx-remember and /ctx-wrap-up. If your team has a different workflow and finds these noisy:

            ctx hook message edit check-ceremonies both\nctx hook message edit check-ceremonies remember\nctx hook message edit check-ceremonies wrapup\n

            Then empty each file:

            echo -n \"\" > .context/hooks/messages/check-ceremonies/both.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/remember.txt\necho -n \"\" > .context/hooks/messages/check-ceremonies/wrapup.txt\n

            The hooks still track ceremony usage internally, but they no longer emit any visible output.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#example-javascript-project-post-commit","level":2,"title":"Example: JavaScript Project Post-Commit","text":"

            The default post-commit nudge mentions generic \"lints and tests.\" For a JavaScript project:

            ctx hook message edit post-commit nudge\n

            Replace with:

            Commit succeeded. 1. Offer context capture to the user: Decision (design\nchoice?), Learning (gotcha?), or Neither. 2. Ask the user: \"Want me to\nrun npm test and eslint before you push?\" Do NOT push. The user pushes\nmanually.\n
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#the-two-categories","level":2,"title":"The Two Categories","text":"

            Not all messages are equal. The list command shows each message's category:

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#customizable-17-messages","level":3,"title":"Customizable (17 Messages)","text":"

            Messages that are opinions: project-specific wording that benefits from customization. These are the primary targets for override.

            Hook Variant Description check-freshness stale Technology constant freshness warning check-ceremonies both Both ceremonies missing check-ceremonies remember Start-of-session ceremony check-ceremonies wrapup End-of-session ceremony check-context-size checkpoint Context capacity warning check-context-size oversize Injection oversize nudge check-context-size window Context window usage warning (>80%) check-journal both Unimported sessions + unenriched entries check-journal unenriched Unenriched journal entries check-journal unimported Unimported sessions check-knowledge warning Knowledge file growth check-map-staleness stale Architecture map staleness check-persistence nudge Context persistence nudge post-commit nudge Post-commit context capture qa-reminder gate Pre-commit QA gate","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#ctx-specific-10-messages","level":3,"title":"ctx-Specific (10 Messages)","text":"

            Messages specific to ctx's own development workflow. You can customize them, but edit will warn you first.

            Hook Variant Description block-dangerous-commands cp-to-bin Block copy to bin dirs block-dangerous-commands install-to-local-bin Block copy to ~/.local/bin block-dangerous-commands mid-git-push Block git push block-dangerous-commands mid-sudo Block sudo block-non-path-ctx absolute-path Block absolute path invocation block-non-path-ctx dot-slash Block ./ctx invocation block-non-path-ctx go-run Block go run invocation check-reminders reminders Pending reminders relay check-resources alert Resource pressure alert check-version key-rotation Key rotation nudge check-version mismatch Version mismatch","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#template-variables-reference","level":2,"title":"Template Variables Reference","text":"Hook Variant Variables check-freshness stale {{.StaleFiles}} check-context-size checkpoint (none) check-context-size oversize {{.TokenCount}} check-context-size window {{.TokenCount}}, {{.Percentage}} check-ceremonies both, remember, wrapup (none) check-journal both {{.UnimportedCount}}, {{.UnenrichedCount}} check-journal unenriched {{.UnenrichedCount}} check-journal unimported {{.UnimportedCount}} check-knowledge warning {{.FileWarnings}} check-map-staleness stale {{.LastRefreshDate}}, {{.ModuleCount}} check-persistence nudge {{.PromptsSinceNudge}} check-reminders reminders {{.ReminderList}} check-resources alert {{.AlertMessages}} check-version key-rotation {{.KeyAgeDays}} check-version mismatch {{.BinaryVersion}}, {{.PluginVersion}} post-commit nudge (none) qa-reminder gate (none) block-dangerous-commands all variants (none) block-non-path-ctx all variants (none)

            Templates that reference undefined variables render <no value>: no error, graceful degradation.

            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#tips","level":2,"title":"Tips","text":"
            • Override files are version-controlled: they live in .context/ alongside your other context files. Team members get the same customized messages.
            • Start with show: always check the current default before editing. The embedded template is the baseline your override replaces.
            • Use reset to undo: if a customization causes confusion, reset reverts to the embedded default instantly.
            • Empty file = silence: you don't need to delete the hook. An empty override file silences the message while preserving the hook's logic.
            • JSON output for scripting: ctx hook message list --json returns structured data for automation.
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/customizing-hook-messages/#see-also","level":2,"title":"See Also","text":"
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Auditing System Hooks: verifying hooks are running and auditing their output
            • Configuration: project-level settings via .ctxrc
            ","path":["Recipes","Hooks and Notifications","Customizing Hook Messages"],"tags":[]},{"location":"recipes/design-before-coding/","level":1,"title":"Design Before Coding","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-problem","level":2,"title":"The Problem","text":"

            You start coding a feature. Halfway through, you realize the approach doesn't handle a key edge case. You refactor. Then you discover the CLI interface doesn't fit the existing patterns. More refactoring.

            The design work happened during implementation, mixed in with debugging and trial-and-error. The result works, but the spec was never written down, the trade-offs were never recorded, and the next session has no idea why things are shaped this way.

            How do you front-load design so the implementation is straightforward?

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-brainstorm          # explore the design space\n/ctx-spec                # write the spec document\n/ctx-task-add            # break it into tasks\n/ctx-implement           # execute step-by-step\n

            Four skills, used in sequence. Each produces an artifact that feeds the next.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-brainstorm Skill Structured design dialogue: explore approaches /ctx-spec Skill Scaffold and fill out a spec from the template /ctx-task-add Skill Add implementation tasks to TASKS.md /ctx-implement Skill Execute a plan step-by-step with verification /ctx-decision-add Skill Record design choices made during brainstorming","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-1-brainstorm-the-design","level":3,"title":"Step 1: Brainstorm the Design","text":"

            Start with /ctx-brainstorm when you have a rough idea but haven't committed to an approach. The skill follows a gated process:

            1. Understand context: what exists today, what's the gap
            2. Clarify the idea: one question at a time, not a wall of questions
            3. Surface non-functional requirements: performance, security, compatibility
            4. Lock understanding: confirm before exploring solutions
            5. Explore 2-3 approaches: with trade-offs for each
            6. Stress-test: poke holes in the chosen approach
            7. Present the design: detailed enough to implement from
            You: \"I want to add RSS feed generation to the blog.\"\n\nAgent: \"Before we design, let me understand: is this for the journal\n  site, the docs site, or both?\"\n\nYou: \"Journal site only.\"\n\nAgent: [asks about update frequency, content scope, existing infra...]\n       [presents 3 approaches with trade-offs]\n\nYou: \"Option 2 - generate at build time.\"\n\nAgent: \"Let me stress-test that. What happens when...\"\n       [probes edge cases]\n       \"Design looks solid. Want me to record the approach as a\n       decision and write a spec?\"\n

            The brainstorm produces a validated design in conversation. It doesn't produce a file - that's what the spec is for.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-2-write-the-spec","level":3,"title":"Step 2: Write the Spec","text":"

            Once the design is clear, use /ctx-spec to produce the written artifact. The skill reads the project's spec template (specs/tpl/spec-template.md) and walks through each section:

            You: /ctx-spec (rss feed generation)\n\nAgent: \"Creating specs/rss-feed.md from template.\"\n\n       \"Problem: What user-visible problem does this solve? Why now?\"\n\nYou: \"Users can't subscribe to blog updates. They have to check\n      the site manually.\"\n\nAgent: [writes the Problem section, moves to Approach]\n\n       \"Approach: High-level, how does this work?\"\n\nYou: \"Generate feed.xml during ctx journal site. Atom format.\"\n\nAgent: [writes Approach, moves to Happy Path, then Edge Cases...]\n

            The skill spends extra time on Edge Cases and Error Handling: These are where specs earn their value. The skill pushes for at least three edge cases and won't accept \"none\" without a challenge.

            Sections that don't apply can be skipped. The result is a complete spec at specs/{feature-name}.md.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-3-break-into-tasks","level":3,"title":"Step 3: Break into Tasks","text":"

            After the spec is written, the skill offers to create tasks:

            Agent: \"Spec written to specs/rss-feed.md. Want me to break this\n  into tasks in TASKS.md?\"\n\nYou: \"Yes.\"\n\nAgent: [creates a Phase section referencing the spec, adds tasks]\n       \"Added Phase 0.8: RSS/Atom Feed Generation with 4 tasks:\n         - [ ] Implement feed.xml generation in ctx journal site\n         - [ ] Add --feed flag and .ctxrc feed config\n         - [ ] Write tests for feed generation\n         - [ ] Add recipe and update docs\"\n

            Each task is independently completable and references the spec via Spec: specs/rss-feed.md in the Phase header.

            You can also invoke /ctx-task-add directly at any point to add individual tasks.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#step-4-implement-step-by-step","level":3,"title":"Step 4: Implement Step-by-Step","text":"

            Pick up tasks with /ctx-implement. The skill loads the spec, breaks work into atomic steps, and checkpoints after every 3-5 steps with a build and test verification:

            You: /ctx-implement (specs/rss-feed.md)\n\nAgent: \"Loaded spec. Breaking into steps:\n         1. Add FeedEntry type to journal package\n         2. Implement feed.xml template\n         3. Wire into site generation pipeline\n         [builds and tests after step 3]\n         4. Add --feed flag to ctx journal site\n         5. Add .ctxrc feed configuration\n         [builds and tests after step 5]\n         ...\"\n

            If a build or test fails, the agent stops, diagnoses, and fixes before continuing.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#when-to-skip-steps","level":2,"title":"When to Skip Steps","text":"

            Not every feature needs all four steps. Use your judgment:

            Situation Start at Vague idea, multiple valid approaches Step 1: Brainstorm Clear approach, need to document it Step 2: Spec Spec already exists, need to plan work Step 3: Tasks Tasks exist, ready to code Step 4: Implement

            A brainstorm without a spec is fine for small decisions. A spec without a brainstorm is fine when the design is obvious. The full chain is for features complex enough to warrant front-loaded design.

            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need skill names. Natural language works:

            You say What happens \"Let's think through this feature\" /ctx-brainstorm \"Spec this out\" /ctx-spec \"Write a design doc for...\" /ctx-spec \"Break this into tasks\" /ctx-task-add \"Implement the spec\" /ctx-implement \"Let's design before we build\" Starts at brainstorm","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#tips","level":2,"title":"Tips","text":"
            • Brainstorm first when uncertain. If you can articulate the approach in two sentences, skip to spec. If you can't, brainstorm.
            • Specs prevent scope creep. The Non-Goals section is as important as the approach. Writing down what you won't do keeps implementation focused.
            • Edge cases are the point. A spec that only describes the happy path isn't a spec - it's a wish. The /ctx-spec skill pushes for at least 3 edge cases because that's where designs break.
            • Record decisions during brainstorming. When you choose between approaches, the agent offers to persist the trade-off via /ctx-decision-add. Accept - future sessions need to know why, not just what.
            • Specs are living documents. Update them when implementation reveals new constraints. A spec that diverges from reality is worse than no spec.
            • The spec template is customizable. Edit specs/tpl/spec-template.md to match your project's needs. The /ctx-spec skill reads whatever template it finds there.
            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/design-before-coding/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: /ctx-brainstorm: structured design dialogue
            • Skills Reference: /ctx-spec: spec scaffolding from template
            • Skills Reference: /ctx-implement: step-by-step execution with verification
            • Tracking Work Across Sessions: task lifecycle and archival
            • Importing Claude Code Plans: turning ephemeral plans into permanent specs
            • Persisting Decisions, Learnings, and Conventions: capturing design trade-offs
            ","path":["Recipes","Knowledge and Tasks","Design Before Coding"],"tags":[]},{"location":"recipes/external-context/","level":1,"title":"Keeping Context in a Separate Repo","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-problem","level":2,"title":"The Problem","text":"

            ctx files contain project-specific decisions, learnings, conventions, and tasks. By default, they live in .context/ inside the project tree, and that works well when the context can be public.

            But sometimes you need the context outside the project:

            • Open-source projects with private context: Your architectural notes, internal task lists, and scratchpad entries shouldn't ship with the public repo.
            • Compliance or IP concerns: Context files reference sensitive design rationale that belongs in a separate access-controlled repository.
            • Personal preference: You want to keep notes separate from code.

            ctx supports this by letting you point CTX_DIR anywhere. This recipe shows how to set that up and how to tell your AI assistant where to find the context.

            One .context/ per project

            The parent of the context directory is the project root by contract. ctx sync, ctx drift, and the memory-drift hook all read the codebase at filepath.Dir(ContextDir()). Pointing two projects at the same directory corrupts their journals, state, and secrets. To share knowledge (CONSTITUTION / CONVENTIONS / ARCHITECTURE) across projects, use ctx hub, not a shared .context/.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tldr","level":2,"title":"TL;DR","text":"

            Create the external context directory, initialize it, and bind it:

            mkdir -p ~/repos/myproject-context && cd ~/repos/myproject-context && git init\ncd ~/repos/myproject\n\n# Bind CTX_DIR to the external location, then init creates files there.\nexport CTX_DIR=~/repos/myproject-context/.context\nctx init\n

            All ctx commands now use the external directory. If you share the setup across shells, add the export CTX_DIR=... line to your shell rc, or source a per-project .envrc with direnv.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#what-works-what-quietly-degrades","level":2,"title":"What Works, What Quietly Degrades","text":"

            The single-source-anchor contract states that filepath.Dir(CTX_DIR) is the project root. When the context lives outside the project tree, ctx still resolves correctly for every operation that reads or writes inside .context/. But any operation that scans the codebase scans the wrong tree, and does so silently:

            Operation Behavior with external .context/ ctx status, agent, add ✅ Works. Operates on files inside CTX_DIR. Journal, scratchpad, hub ✅ Works. Same reason. ctx sync ⚠️ Scans the context repo, not the code repo. ctx drift ⚠️ Same. Reports nothing useful. Memory-drift hook (MEMORY.md) ⚠️ Looks for MEMORY.md next to the external .context/, not the code.

            Nothing errors. The code-aware operations just find an empty or unrelated tree where the project root should be.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#workaround-symlink-the-context-into-the-code-tree","level":3,"title":"Workaround: symlink the .context/ into the code tree","text":"

            If you want both the privacy of an external git repo and working ctx sync / drift / memory-drift, symlink the external .context/ into the code repo and point CTX_DIR at the symlink:

            # External repo holds the real files\nmkdir -p ~/repos/myproject-context && cd ~/repos/myproject-context && git init\n\n# Symlink it into the code repo\nln -s ~/repos/myproject-context/.context ~/repos/myproject/.context\n\n# Bind CTX_DIR to the symlink path; ctx init will follow it\nexport CTX_DIR=~/repos/myproject/.context\nctx init\n

            Now filepath.Dir(CTX_DIR) is the code repo, so code-aware operations scan the right tree. The actual files still live in the external repo and commit there. Add .context to the code repo's .gitignore (or .git/info/exclude) so the symlink itself isn't tracked by the code repo.

            The basename guard is permissive about symlinks: it checks the declared name, not the resolved target, so a .context symlink pointing anywhere is accepted as long as the declared basename is .context.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context directory ctx activate CLI command Emit export CTX_DIR=... for the shell CTX_DIR Env variable Declare context directory per-session .ctxrc Config file Per-project configuration /ctx-status Skill Verify context is loading correctly","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-1-create-the-private-context-repo","level":3,"title":"Step 1: Create the Private Context Repo","text":"

            Create a separate repository for your context files. This can live anywhere: a private GitHub repo, a shared drive, a sibling directory:

            # Create the context repo\nmkdir -p ~/repos/myproject-context\ncd ~/repos/myproject-context\ngit init\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-2-initialize-ctx-pointing-at-it","level":3,"title":"Step 2: Initialize ctx Pointing at It","text":"

            From your project root, declare CTX_DIR pointing to the external location, then initialize:

            cd ~/repos/myproject\nCTX_DIR=~/repos/myproject-context/.context ctx init\n

            This creates the canonical .context/ file set inside ~/repos/myproject-context/ instead of ~/repos/myproject/.context/.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-3-make-it-stick","level":3,"title":"Step 3: Make It Stick","text":"

            Declaring CTX_DIR on every command is tedious. Pick one of these methods to make the configuration permanent. The context directory itself must be declared via CTX_DIR; .ctxrc does not carry the path.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-a-ctx_dir-environment-variable-recommended","level":4,"title":"Option A: CTX_DIR Environment Variable (Recommended)","text":"
            # Direct path. Works for ctx status / agent / add but degrades\n# code-aware operations. See \"What Works, What Quietly Degrades\".\nexport CTX_DIR=~/repos/myproject-context/.context\n\n# Or, with the symlink approach above, point at the symlink path\n# inside the code repo so code-aware operations stay healthy.\nexport CTX_DIR=~/repos/myproject/.context\n

            Put either form in your shell profile (~/.bashrc, ~/.zshrc) or a direnv .envrc.

            For a single session, run eval \"$(ctx activate)\" from any directory inside the project where exactly one .context/ candidate is visible (the symlink counts). activate does not accept a path argument; bind a specific path by exporting CTX_DIR directly instead.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#option-b-ctxrc-for-other-settings","level":4,"title":"Option B: .ctxrc for Other Settings","text":"

            Put any settings (token budget, priority order, freshness files) in a .ctxrc at the project root (dirname(CTX_DIR)), which here is the parent of the external .context/:

            # ~/repos/myproject-context/.ctxrc\ntoken_budget: 16000\n

            .ctxrc is always read from the parent of CTX_DIR, so this file is picked up whenever CTX_DIR points at ~/repos/myproject-context/.context.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#resolution","level":4,"title":"Resolution","text":"

            ctx reads the context directory from a single channel: the CTX_DIR environment variable. When CTX_DIR is unset, ctx errors with a \"no context directory specified\" hint pointing at ctx activate and this recipe. When set, the value must be an absolute path with .context as its basename; relative paths and other names are rejected on first use.

            See Activating a Context Directory for the full recipe.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-4-agent-auto-discovery-via-bootstrap","level":3,"title":"Step 4: Agent Auto-Discovery via Bootstrap","text":"

            When context lives outside the project tree, your AI assistant needs to know where to find it. The ctx system bootstrap command resolves the configured context directory and communicates it to the agent automatically:

            $ ctx system bootstrap\nctx system bootstrap\n====================\n\ncontext_dir: /home/user/repos/myproject-context/.context\n\nFiles:\n  CONSTITUTION.md, TASKS.md, DECISIONS.md, ...\n

            The CLAUDE.md template generated by ctx init already instructs the agent to run ctx system bootstrap at session start. Because CTX_DIR is inherited by child processes, your agent picks up the external path automatically.

            Here is the relevant section from CLAUDE.md for reference:

            <!-- CLAUDE.md -->\n1. **Run `ctx system bootstrap`**: CRITICAL, not optional.\n   This tells you where the context directory is. If it returns any\n   error, relay the error output to the user verbatim, point them at\n   https://ctx.ist/recipes/activating-context/ for setup, and STOP.\n   Do not try to recover; the user decides.\n

            Moreover, every nudge (context checkpoint, persistence reminder, etc.) also includes a Context: /home/user/repos/myproject-context/.context footer, so the agent remains anchored to the correct directory even in long sessions.

            Export CTX_DIR in your shell profile so every hook process inherits it:

            export CTX_DIR=~/repos/myproject-context/.context\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-5-share-with-teammates","level":3,"title":"Step 5: Share with Teammates","text":"

            Teammates clone both repos and export CTX_DIR:

            # Clone the project\ngit clone git@github.com:org/myproject.git\ncd myproject\n\n# Clone the private context repo\ngit clone git@github.com:org/myproject-context.git ~/repos/myproject-context\nexport CTX_DIR=~/repos/myproject-context/.context\n

            If teammates use different paths, each developer sets their own CTX_DIR.

            For encryption key distribution across the team, see the Syncing Scratchpad Notes recipe.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#step-6-day-to-day-sync","level":3,"title":"Step 6: Day-to-Day Sync","text":"

            The external context repo has its own git history. Treat it like any other repo: commit and push after sessions:

            cd ~/repos/myproject-context\n\n# After a session\ngit add -A\ngit commit -m \"Session: refactored auth module, added rate-limit learning\"\ngit push\n

            Your AI assistant can do this too. When ending a session:

            You: \"Save what we learned and push the context repo.\"\n\nAgent: [runs ctx add learning, then commits and pushes the context repo]\n

            You can also set up a post-session habit: project code gets committed to the project repo, context gets committed to the context repo.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember the flags; simply ask your assistant:

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#set-up-your-system-using-natural-language","level":3,"title":"Set Up Your System Using Natural Language","text":"
            You: \"Set up ctx to use ~/repos/myproject-context as the context directory.\"\n\nAgent: \"I'll set CTX_DIR to that path, run ctx init to materialize\n       it, and show you the export line to add to your shell\n       profile. Want me to seed the core context files too?\"\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#configure-separate-repo-for-context-folder-using-natural-language","level":3,"title":"Configure Separate Repo for .context Folder Using Natural Language","text":"
            You: \"My context is in a separate repo. Can you load it?\"\n\nAgent: [reads CTX_DIR, loads context from the external dir]\n       \"Loaded. You have 3 pending tasks, last session was about the auth\n       refactor.\"\n
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#tips","level":2,"title":"Tips","text":"
            • Start simple. If you don't need external context yet, don't set it up. The default .context/ in-tree is the easiest path. Move to an external repo when you have a concrete reason.
            • One context repo per project. Sharing a single context directory across multiple projects corrupts journals, state, and secrets. Use ctx hub for cross-project knowledge sharing.
            • Export CTX_DIR in your shell profile so hooks and tools inherit the path without per-command flags.
            • Commit both repos at session boundaries. Context without code history (or code without context history) loses half the value.
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#next-up","level":2,"title":"Next Up","text":"

            The Complete Session →: Walk through a full ctx session from start to finish.

            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/external-context/#see-also","level":2,"title":"See Also","text":"
            • Setting Up ctx Across AI Tools: initial setup recipe
            • Syncing Scratchpad Notes Across Machines: distribute encryption keys when context is shared
            • CLI Reference: full command list and global options
            ","path":["Recipes","Getting Started","Keeping Context in a Separate Repo"],"tags":[]},{"location":"recipes/guide-your-agent/","level":1,"title":"Guide Your Agent","text":"

            Commands vs. Skills

            Commands (ctx status, ctx add task) run in your terminal.

            Skills (/ctx-reflect, /ctx-next) run inside your AI coding assistant.

            Recipes combine both.

            Think of commands as structure and skills as behavior.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#proactive-behavior","level":2,"title":"Proactive Behavior","text":"

            These recipes show explicit commands and skills, but agents trained on the ctx playbook are proactive: They offer to save learnings after debugging, record decisions after trade-offs, create follow-up tasks after completing work, and suggest what to work on next.

            Your questions train the agent. Asking \"what have we learned?\" or \"is our context clean?\" does two things:

            • It triggers the workflow right now,
            • and it reinforces the pattern.

            The more you guide, the more the agent habituates the behavior and begins offering on its own.

            Each recipe includes a Conversational Approach section showing these natural-language patterns.

            Tip

            Don't wait passively for proactive behavior: especially in early sessions.

            Ask, guide, reinforce. Over time, you ask less and the agent offers more.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#next-up","level":2,"title":"Next Up","text":"

            Setup Across AI Tools →: Initialize ctx and configure hooks for Claude Code, Cursor, Aider, Copilot, or Windsurf.

            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/guide-your-agent/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle from start to finish
            • Prompting Guide: general tips for working effectively with AI coding assistants
            ","path":["Recipes","Getting Started","Guide Your Agent"],"tags":[]},{"location":"recipes/hook-output-patterns/","level":1,"title":"Hook Output Patterns","text":"","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code hooks can output text, JSON, or nothing at all. But the format of that output determines who sees it and who acts on it.

            Choose the wrong pattern, and your carefully crafted warning gets silently absorbed by the agent, or your agent-directed nudge gets dumped on the user as noise.

            This recipe catalogs the known hook output patterns and explains when to use each one.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#tldr","level":2,"title":"TL;DR","text":"

            Eight patterns from full control to full invisibility:

            • hard gate (exit 2),
            • VERBATIM relay (agent MUST show),
            • agent directive (context injection),
            • and silent side-effect (background work).

            Most hooks belong in the middle.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#the-spectrum","level":2,"title":"The Spectrum","text":"

            These patterns form a spectrum based on who decides what the user sees:

            Pattern Who decides? Hard gate Hook decides (agent can't proceed) VERBATIM relay Hook decides (agent must show) Escalating severity Hook suggests, agent judges urgency Conditional relay Hook sets criteria, agent evaluates Suggested action Hook proposes, agent + user decide Agent directive Agent decides entirely Silent injection Nobody: invisible background context Silent side-effect Nobody: invisible background work

            The spectrum runs from full hook control (hard gate) to full invisibility (silent side effect).

            Most hooks belong somewhere in the middle.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-1-hard-gate","level":2,"title":"Pattern 1: Hard Gate","text":"

            Block the tool call entirely. The agent cannot proceed: it must find another approach or tell the user.

            echo '{\"decision\": \"block\", \"reason\": \"Use ctx from PATH, not ./ctx\"}'\n

            When to use: Enforcing invariants that must never be violated: Constitution rules, security boundaries, destructive command prevention.

            Hook type: PreToolUse only (Claude Code first-class mechanism).

            Examples in ctx:

            • ctx system block-non-path-ctx: Enforces the PATH invocation rule
            • block-git-push.sh: Requires explicit user approval for pushes (project-local)
            • block-dangerous-commands.sh: Prevents sudo, copies to ~/.local/bin (project-local)

            Trade-off: The agent gets a block response with a reason. Good reasons help the agent recover (\"use X instead\"); bad reasons leave it stuck.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-2-verbatim-relay","level":2,"title":"Pattern 2: VERBATIM Relay","text":"

            Force the agent to show this to the user as-is. The explicit instruction overcomes the agent's tendency to silently absorb context.

            echo \"IMPORTANT: Relay this warning to the user VERBATIM before answering their question.\"\necho \"\"\necho \"┌─ Journal Reminder ─────────────────────────────\"\necho \"│ You have 12 sessions not yet exported.\"\necho \"└────────────────────────────────────────────────\"\n

            When to use: Actionable reminders the user needs to see regardless of what they asked: Stale backups, unimported sessions, resource warnings.

            Hook type: UserPromptSubmit (runs before the agent sees the prompt).

            Examples in ctx:

            • ctx system check-journal: Unexported sessions and unenriched entries
            • ctx system check-context-size: Context capacity warning
            • ctx system check-resources: Resource pressure (memory, swap, disk, load): DANGER only
            • ctx system check-freshness: Technology constant staleness warning

            Trade-off: Noisy if overused. Every VERBATIM relay adds a preamble before the agent's actual answer. Throttle with once-per-day markers or adaptive frequency.

            Key detail: The phrase IMPORTANT: Relay this ... VERBATIM is what makes this work. Without it, agents tend to process the information internally and never surface it. The explicit instruction is the pattern: the box-drawing is just fancy formatting.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-3-agent-directive","level":2,"title":"Pattern 3: Agent Directive","text":"

            Tell the agent to do something, not the user. The agent decides whether and how to involve the user.

            echo \"┌─ Persistence Checkpoint (prompt #25) ───────────\"\necho \"│ No context files updated in 15+ prompts.\"\necho \"│ Have you discovered learnings, decisions,\"\necho \"│ or completed tasks worth persisting?\"\necho \"└──────────────────────────────────────────────────\"\n

            When to use: Behavioral nudges. The hook detects a condition and asks the agent to consider an action. The user may never need to know.

            Hook type: UserPromptSubmit.

            Examples in ctx:

            • ctx system check-persistence: Nudges the agent to persist context

            Trade-off: No guarantee the agent acts. The nudge is one signal among many in the context window. Strong phrasing helps (\"Have you...?\" is better than \"Consider...\"), but ultimately the agent decides.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-4-silent-context-injection","level":2,"title":"Pattern 4: Silent Context Injection","text":"

            Load context with no visible output. The agent gets enriched without either party noticing.

            ctx agent --budget 4000 >/dev/null || true\n

            When to use: Background context loading that should be invisible. The agent benefits from the information, but neither it, nor the user needs to know it happened.

            Hook type: PreToolUse with .* matcher (runs on every tool call).

            Examples in ctx:

            • The ctx agent PreToolUse hook: injects project context silently

            Trade-off: Adds latency to every tool call. Keep the injected content small and fast to generate.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-5-silent-side-effect","level":2,"title":"Pattern 5: Silent Side-Effect","text":"

            Do work, produce no output: Housekeeping that needs no acknowledgment.

            find \"$CTX_TMPDIR\" -type f -mtime +15 -delete\n

            When to use: Cleanup, log rotation, temp file management. Anything where the action is the point and nobody needs to know it happened.

            Hook type: Any hook where output is irrelevant.

            Examples in ctx:

            • Log rotation, marker file cleanup, state directory maintenance

            Trade-off: None, if the action is truly invisible. If it can fail in a way that matters, consider logging.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-6-conditional-relay","level":3,"title":"Pattern 6: Conditional Relay","text":"

            Tell the agent to relay only if a condition holds in context.

            echo \"If the user's question involves modifying .context/ files,\"\necho \"relay this warning VERBATIM:\"\necho \"\"\necho \"┌─ Context Integrity ─────────────────────────────\"\necho \"│ CONSTITUTION.md has not been verified in 7 days.\"\necho \"└────────────────────────────────────────────────\"\necho \"\"\necho \"Otherwise, proceed normally.\"\n

            When to use: Warnings that only matter in certain contexts. Avoids noise when the user is doing unrelated work.

            Trade-off: Depends on the agent's judgment about when the condition holds. More fragile than VERBATIM relay, but less noisy.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-7-suggested-action","level":3,"title":"Pattern 7: Suggested Action","text":"

            Give the agent a specific command to propose to the user.

            echo \"┌─ Stale Dependencies ──────────────────────────\"\necho \"│ go.sum is 30+ days newer than go.mod.\"\necho \"│ Suggested: run \\`go mod tidy\\`\"\necho \"│ Ask the user before proceeding.\"\necho \"└───────────────────────────────────────────────\"\n

            When to use: The hook detects a fixable condition and knows the fix. Goes beyond a nudge: Gives the agent a concrete next step. The agent still asks for permission but knows exactly what to propose.

            Trade-off: The suggestion might be wrong or outdated. The \"ask the user before proceeding\" part is critical.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#pattern-8-escalating-severity","level":3,"title":"Pattern 8: Escalating Severity","text":"

            Different urgency tiers with different relay expectations.

            # INFO: agent processes silently, mentions if relevant\necho \"INFO: Last test run was 3 days ago.\"\n\n# WARN: agent should mention to user at next natural pause\necho \"WARN: 12 uncommitted changes across 3 branches.\"\n\n# CRITICAL: agent must relay immediately, before any other work\necho \"CRITICAL: Relay VERBATIM before answering. Disk usage at 95%.\"\n

            When to use: When you have multiple hooks producing output and need to avoid overwhelming the user. INFO gets absorbed, WARN gets mentioned, CRITICAL interrupts.

            Examples in ctx:

            • ctx system check-resources: Uses two tiers (WARNING/DANGER) internally but only fires the VERBATIM relay at DANGER level: WARNING is silent. See ctx system for the user-facing command that shows both tiers.

            Trade-off: Requires agent training or convention to recognize the tiers. Without a shared protocol, the prefixes are just text.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#choosing-a-pattern","level":2,"title":"Choosing a Pattern","text":"
            Is the agent about to do something forbidden?\n  └─ Yes → Hard gate\n\nDoes the user need to see this regardless of what they asked?\n  └─ Yes → VERBATIM relay\n  └─ Sometimes → Conditional relay\n\nShould the agent consider an action?\n  └─ Yes, with a specific fix → Suggested action\n  └─ Yes, open-ended → Agent directive\n\nIs this background context the agent should have?\n  └─ Yes → Silent injection\n\nIs this housekeeping?\n  └─ Yes → Silent side-effect\n
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#design-tips","level":2,"title":"Design Tips","text":"

            Throttle aggressively: VERBATIM relays that fire every prompt will be ignored or resented. Use once-per-day markers (touch $REMINDED), adaptive frequency (every Nth prompt), or staleness checks (only fire if condition persists).

            Include actionable commands: \"You have 12 unimported sessions\" is less useful than \"You have 12 unimported sessions. Run: ctx journal import --all.\" Give the user (or agent) the exact next step.

            Use box-drawing for visual structure: The ┌─ ─┐ │ └─ ─┘ pattern makes hook output visually distinct from agent prose. It also signals \"this is machine-generated, not agent opinion.\"

            Test the silence path: Most hook runs should produce no output (the condition isn't met). Make sure the common case is fast and silent.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#common-pitfalls","level":2,"title":"Common Pitfalls","text":"

            Lessons from 19 days of hook debugging in ctx. Every one of these was encountered, debugged, and fixed in production.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#silent-misfire-wrong-key-name","level":3,"title":"Silent Misfire: Wrong Key Name","text":"
            { \"PreToolUseHooks\": [ ... ] }\n

            The key is PreToolUse, not PreToolUseHooks. Claude Code validates silently: A misspelled key means the hook is ignored with no error. Always test with a debug echo first to confirm the hook fires before adding real logic.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#json-escaping-breaks-shell-commands","level":3,"title":"JSON Escaping Breaks Shell Commands","text":"

            Go's json.Marshal escapes >, <, and & as Unicode sequences (\\u003e) by default. This breaks shell commands in generated config:

            \"command\": \"ctx agent 2\\u003e/dev/null\"\n

            Fix: use json.Encoder with SetEscapeHTML(false) when generating hook configuration.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#stdin-not-environment-variables","level":3,"title":"stdin, Not Environment Variables","text":"

            Hook input arrives as JSON via stdin, not environment variables:

            # Wrong:\nCOMMAND=\"$CLAUDE_TOOL_INPUT\"\n\n# Right:\nHOOK_INPUT=$(cat)\nCOMMAND=$(echo \"$HOOK_INPUT\" | jq -r '.tool_input.command // empty')\n
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#regex-overfitting","level":3,"title":"Regex Overfitting","text":"

            A regex meant to catch ctx as a binary will also match ctx as a directory component:

            # Too broad: blocks: git -C /home/jose/WORKSPACE/ctx status\n(/home/|/tmp/|/var/)[^ ]*ctx[^ ]*\n\n# Narrow to binary only:\n(/home/|/tmp/|/var/)[^ ]*/ctx( |$)\n

            Test hook regexes against paths that contain the target string as a substring, not just as the final component.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#repetition-fatigue","level":3,"title":"Repetition Fatigue","text":"

            Injecting context on every tool call sounds safe. In practice, after seeing the same context injection fifteen times, the agent treats it as background noise: Conventions stated in the injected context get violated because salience has been destroyed by repetition.

            Fix: cooldowns. ctx agent --session $PPID --cooldown 10m injects at most once per ten minutes per session using a tombstone file in /tmp/. This is not an optimization; it is a correction for a design flaw. Every injection consumes attention budget: 50 tool calls at 4,000 tokens each means 200,000 tokens of repeated context, most of it wasted.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#hardcoded-paths","level":3,"title":"Hardcoded Paths","text":"

            A username rename (parallels to jose) broke every hook at once. Use $CLAUDE_PROJECT_DIR instead of absolute paths:

            \"command\": \"\\\"$CLAUDE_PROJECT_DIR\\\"/.claude/hooks/block-git-push.sh\"\n

            If the platform provides a runtime variable for paths, always use it.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#next-up","level":2,"title":"Next Up","text":"

            Webhook Notifications →: Get push notifications when loops complete, hooks fire, or agents hit milestones.

            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-output-patterns/#see-also","level":2,"title":"See Also","text":"
            • Customizing Hook Messages: override what hooks say without changing what they do
            • Claude Code Permission Hygiene: how permissions and hooks work together
            • Defense in Depth: why hooks matter for agent security
            ","path":["Recipes","Hooks and Notifications","Hook Output Patterns"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/","level":1,"title":"Hook Sequence Diagrams","text":"","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#hook-lifecycle","level":2,"title":"Hook Lifecycle","text":"

            This page documents the ctx system hooks: the built-in ctx system * subcommands that Claude Code invokes via .claude/hooks.json at lifecycle events. These are owned by ctx itself, not authored by users.

            Not to Be Confused with ctx trigger

            ctx has three distinct hook-like layers:

            • ctx system hooks (this page): built-in, owned by ctx, wired into Claude Code via internal/assets/claude/hooks/hooks.json.
            • ctx trigger: user-authored shell scripts in .context/hooks/<type>/*.sh. See ctx trigger reference and the trigger authoring recipe.
            • Claude Code hooks configured directly in .claude/settings.local.json, tool-specific, not portable across AI tools.

            This page is only about the first category.

            Every ctx system hook is a Go binary invoked by Claude Code at one of three lifecycle events: PreToolUse (before a tool runs, can block), PostToolUse (after a tool completes), or UserPromptSubmit (on every user prompt, before any tools run). Hooks receive JSON on stdin and emit JSON or plain text on stdout.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#pretooluse-hooks","level":2,"title":"PreToolUse Hooks","text":"

            These fire before a tool executes. They can block, gate, or inject context.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#context-load-gate","level":3,"title":"Context-Load-Gate","text":"

            Matcher: .* (all tools)

            Injects the full context packet on first tool use of a session. One-shot per session.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as context-load-gate\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Git as git log\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized\n    alt not initialized\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check ctx-loaded-{session} marker\n    alt marker exists\n        Hook-->>CC: (silent exit, already fired)\n    end\n    Hook->>State: Create marker (one-shot guard)\n    Hook->>State: Prune stale session files\n    loop Each file in ReadOrder\n        alt GLOSSARY or TASK\n            Note over Hook: Skip (Task mentioned in footer only)\n        else DECISION or LEARNING\n            Hook->>Ctx: Extract index table only\n        else other files\n            Hook->>Ctx: Read full content\n        end\n        Hook->>Hook: Estimate tokens per file\n    end\n    Hook->>Git: Detect changes since last session\n    Hook->>Hook: Build injection (files + changes + token counts)\n    Hook-->>CC: JSON {additionalContext: injection}\n    Hook->>Hook: Send webhook (metadata only)\n    Hook->>State: Write oversize flag if tokens > threshold
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-non-path-ctx","level":3,"title":"Block-Non-Path-ctx","text":"

            Matcher: Bash

            Blocks ./ctx, go run ./cmd/ctx, or absolute-path ctx invocations. Constitutionally enforced.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-non-path-ctx\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Test regex: relative-path, go-run, absolute-path\n    alt no match\n        Hook-->>CC: (silent exit)\n    end\n    alt absolute-path + test exception\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason + constitution suffix}\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#qa-reminder","level":3,"title":"Qa-Reminder","text":"

            Matcher: Bash

            Gate nudge before any git command. Reminds agent to lint/test.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as qa-reminder\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Check command contains \"git\"\n    alt no git command\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, gate, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: QA gate}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#specs-nudge","level":3,"title":"Specs-Nudge","text":"

            Matcher: EnterPlanMode

            Nudges agent to save plans/specs when new implementation detected.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as specs-nudge\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: specs nudge}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#posttooluse-hooks","level":2,"title":"PostToolUse Hooks","text":"

            These fire after a tool completes. They observe, nudge, and track state.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#post-commit","level":3,"title":"Post-Commit","text":"

            Matcher: Bash

            Fires after git commit (not amend). Nudges for context capture and checks version drift.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as post-commit\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"git commit\"?\n    alt not a git commit\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Regex: command contains \"--amend\"?\n    alt is amend\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook->>Hook: AppendDir(message)\n    Hook-->>CC: JSON {additionalContext: post-commit nudge}\n    Hook->>Hook: Relay(message)\n    Hook->>Hook: CheckVersionDrift()
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-task-completion","level":3,"title":"Check-Task-Completion","text":"

            Matcher: Edit, Write

            Configurable-interval nudge after edits. Per-session counter resets after firing.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-task-completion\n    participant State as .context/state/\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read task nudge interval\n    alt interval <= 0 (disabled)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read per-session counter\n    Hook->>Hook: Increment counter\n    alt counter < interval\n        Hook->>State: Write counter\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Reset counter to 0\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: JSON {additionalContext: task nudge}\n    Hook->>Hook: Relay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#userpromptsubmit-hooks","level":2,"title":"UserPromptSubmit Hooks","text":"

            These fire on every user prompt, before any tools run. They perform health checks, track state, and nudge for housekeeping.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-context-size","level":3,"title":"Check-Context-Size","text":"

            Adaptive context window monitoring. Fires checkpoints, window warnings, and billing alerts based on prompt count and token usage.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-context-size\n    participant State as .context/state/\n    participant Session as Session JSONL\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized\n    Hook->>Hook: Read input, resolve session ID\n    Hook->>Hook: Check paused\n    alt paused\n        Hook-->>CC: Pause acknowledgment message\n    end\n    Hook->>State: Increment session prompt counter\n    Hook->>Session: Read token info (tokens, model, window)\n\n    rect rgb(255, 240, 240)\n        Note over Hook: Billing check (independent, never suppressed)\n        alt tokens >= billing threshold (one-shot)\n            Hook->>Tpl: LoadMessage(hook, billing, vars)\n            Hook-->>CC: Billing warning nudge box\n            Hook->>Hook: NudgeAndRelay(billing message)\n        end\n    end\n\n    Hook->>State: Check wrap-up marker\n    alt wrapped up recently (< 2h)\n        Hook->>State: Write stats (event: suppressed)\n        Hook-->>CC: (silent exit)\n    end\n\n    rect rgb(240, 248, 255)\n        Note over Hook: Adaptive frequency check\n        alt count > 30 and count % 3 == 0\n            Note over Hook: High frequency trigger\n        else count > 15 and count % 5 == 0\n            Note over Hook: Medium frequency trigger\n        else\n            Hook->>State: Write stats (event: silent)\n            Hook-->>CC: (silent exit)\n        end\n    end\n\n    alt context window >= 80%\n        Hook->>Tpl: LoadMessage(hook, window, vars)\n        Hook-->>CC: Window warning nudge box\n        Hook->>Hook: NudgeAndRelay(window message)\n    else checkpoint trigger\n        Hook->>Tpl: LoadMessage(hook, checkpoint)\n        Hook-->>CC: Checkpoint nudge box\n        Hook->>Hook: NudgeAndRelay(checkpoint message)\n    end\n    Hook->>State: Write session stats
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-ceremonies","level":3,"title":"Check-Ceremonies","text":"

            Daily check for /ctx-remember and /ctx-wrap-up usage in recent journal entries.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-ceremonies\n    participant State as .context/state/\n    participant Journal as Journal files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Read recent files (lookback window)\n    alt no journal files\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Scan for /ctx-remember and /ctx-wrap-up\n    alt both ceremonies present\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Note over Hook: variant: both | remember | wrapup\n    Hook-->>CC: Nudge box (missing ceremonies)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-freshness","level":3,"title":"Check-Freshness","text":"

            Daily check for technology-dependent constants that may need review.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-freshness\n    participant State as .context/state/\n    participant FS as Filesystem\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>FS: Stat tracked files (5 source files)\n    alt all files modified within 6 months\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {StaleFiles})\n    Hook-->>CC: Nudge box (stale file list + review URL)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-journal","level":3,"title":"Check-Journal","text":"

            Daily check for unimported sessions and unenriched journal entries.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-journal\n    participant State as .context/state/\n    participant Journal as Journal dir\n    participant Claude as Claude projects dir\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Check dir exists\n    Hook->>Claude: Check dir exists\n    alt either dir missing\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Journal: Get newest entry mtime\n    Hook->>Claude: Count .jsonl files newer than journal\n    Hook->>Journal: Count unenriched entries\n    alt unimported == 0 and unenriched == 0\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, {counts})\n    Note over Hook: variant: both | unimported | unenriched\n    Hook-->>CC: Nudge box (counts)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-knowledge","level":3,"title":"Check-Knowledge","text":"

            Daily check for knowledge file entry/line counts exceeding configured thresholds.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-knowledge\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant RC as .ctxrc\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>RC: Read thresholds (decisions, learnings, conventions)\n    alt all thresholds disabled (0)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Ctx: Parse DECISIONS.md entry count\n    Hook->>Ctx: Parse LEARNINGS.md entry count\n    Hook->>Ctx: Count CONVENTIONS.md lines\n    Hook->>Hook: Compare against thresholds\n    alt all within limits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, warning, {FileWarnings})\n    Hook-->>CC: Nudge box (file warnings)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-map-staleness","level":3,"title":"Check-Map-Staleness","text":"

            Daily check for architecture map age and relevant code changes.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-map-staleness\n    participant State as .context/state/\n    participant Tracking as map-tracking.json\n    participant Git as git log\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tracking: Read map-tracking.json\n    alt missing, invalid, or opted out\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Parse LastRun date\n    alt map not stale (< N days)\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Git: Count commits touching internal/ since LastRun\n    alt no relevant commits\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, stale, {date, count})\n    Hook-->>CC: Nudge box (last refresh + commit count)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle marker
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-memory-drift","level":3,"title":"Check-Memory-Drift","text":"

            Per-session check for MEMORY.md changes since last sync.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-memory-drift\n    participant State as .context/state/\n    participant Mem as memory.Discover\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check session tombstone\n    alt already nudged this session\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: DiscoverMemoryPath(projectRoot)\n    alt auto memory not active\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Mem: HasDrift(contextDir, sourcePath)\n    alt no drift\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, fallback)\n    Hook-->>CC: Nudge box (drift reminder)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch session tombstone
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-persistence","level":3,"title":"Check-Persistence","text":"

            Tracks context file modification and nudges when edits happen without persisting context. Adaptive threshold based on prompt count.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-persistence\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Read persistence state {Count, LastNudge, LastMtime}\n    alt first prompt (no state)\n        Hook->>State: Initialize state {Count:1, LastNudge:0, LastMtime:now}\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Increment Count\n    Hook->>Ctx: Get current context mtime\n    alt context modified since LastMtime\n        Hook->>State: Reset LastNudge = Count, update LastMtime\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: sinceNudge = Count - LastNudge\n    Hook->>Hook: PersistenceNudgeNeeded(Count, sinceNudge)?\n    alt threshold not reached\n        Hook->>State: Write state\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, nudge, vars)\n    Hook-->>CC: Nudge box (prompt count, time since last persist)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Update LastNudge = Count, write state
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-reminders","level":3,"title":"Check-Reminders","text":"

            Per-prompt check for due reminders. No throttle.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-reminders\n    participant Store as Reminders store\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Store: ReadReminders()\n    alt load error\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter by due date (After <= today)\n    alt no due reminders\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, reminders, {list})\n    Hook-->>CC: Nudge box (reminder list + dismiss hints)\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-resources","level":3,"title":"Check-Resources","text":"

            Checks system resources (memory, swap, disk, load). Fires on every prompt. No initialization required.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-resources\n    participant Sys as sysinfo\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: HookPreamble (parse input, check pause)\n    alt paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Sys: Collect snapshot (memory, swap, disk, load)\n    Hook->>Sys: Evaluate thresholds per metric\n    alt max severity < Danger\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Filter alerts to Danger level only\n    Hook->>Hook: Build alertMessages from danger alerts\n    Hook->>Tpl: LoadMessage(hook, alert, {alertMessages}, fallback)\n    Hook-->>CC: Nudge box (danger alerts)\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#check-version","level":3,"title":"Check-Version","text":"

            Daily binary-vs-plugin version comparison with piggybacked key rotation check.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as check-version\n    participant State as .context/state/\n    participant Config as Binary + Plugin version\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Check daily throttle marker\n    alt throttled\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read binary version\n    alt dev build\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Config: Read plugin version\n    alt plugin version not found or parse error\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Hook: Compare major.minor\n    alt versions match\n        Hook->>State: Touch throttle\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, mismatch, {versions})\n    Hook-->>CC: Nudge box (version mismatch)\n    Hook->>Hook: NudgeAndRelay(message)\n    Hook->>State: Touch throttle\n    Hook->>Hook: CheckKeyAge() (piggybacked)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#heartbeat","level":3,"title":"Heartbeat","text":"

            Silent per-prompt pulse. Tracks prompt count, context modification, and token usage. The agent never sees this hook's output.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as heartbeat\n    participant State as .context/state/\n    participant Ctx as .context/ files\n    participant Notify as Webhook + EventLog\n\n    CC->>Hook: stdin {session_id}\n    Hook->>Hook: Check initialized + HookPreamble\n    alt not initialized or paused\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>State: Increment heartbeat counter\n    Hook->>Ctx: Get latest context file mtime\n    Hook->>State: Compare with last recorded mtime\n    Hook->>State: Update mtime record\n    Hook->>State: Read session token info\n    Hook->>Notify: Send heartbeat notification\n    Hook->>Notify: Append to event log\n    Hook->>State: Write heartbeat log entry\n    Note over Hook: No stdout - agent never sees this
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#project-local-hooks","level":2,"title":"Project-Local Hooks","text":"

            These hooks are configured in settings.local.json and are not shipped with ctx. They are specific to individual developer setups.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#block-dangerous-commands","level":3,"title":"Block-Dangerous-Commands","text":"

            Lifecycle: PreToolUse. Matcher: Bash

            Blocks dangerous shell patterns (sudo, git push, cp to bin). No initialization or pause checks: always active.

            sequenceDiagram\n    participant CC as Claude Code\n    participant Hook as block-dangerous-commands\n    participant Tpl as Message Template\n\n    CC->>Hook: stdin {command, session_id}\n    Hook->>Hook: Extract command\n    alt command empty\n        Hook-->>CC: (silent exit)\n    end\n    Note over Hook: Cascade: first matching regex wins\n    Hook->>Hook: Test MidSudo regex\n    alt match\n        Hook->>Hook: variant = sudo\n    end\n    Hook->>Hook: Test MidGitPush regex (if no variant)\n    alt match\n        Hook->>Hook: variant = git-push\n    end\n    Hook->>Hook: Test CpMvToBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = cp-to-bin\n    end\n    Hook->>Hook: Test InstallToLocalBin regex (if no variant)\n    alt match\n        Hook->>Hook: variant = install-to-bin\n    end\n    alt no variant matched\n        Hook-->>CC: (silent exit)\n    end\n    Hook->>Tpl: LoadMessage(hook, variant, fallback)\n    Hook-->>CC: JSON {decision: BLOCK, reason}\n    Hook->>Hook: NudgeAndRelay(message)
            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#throttling-summary","level":2,"title":"Throttling Summary","text":"Hook Lifecycle Throttle Type Scope context-load-gate PreToolUse One-shot marker Per session block-non-path-ctx PreToolUse None Every match qa-reminder PreToolUse None Every git command specs-nudge PreToolUse None Every prompt post-commit PostToolUse None Every git commit check-task-completion PostToolUse Configurable interval Per session check-context-size UserPromptSubmit Adaptive counter Per session check-ceremonies UserPromptSubmit Daily marker Once per day check-freshness UserPromptSubmit Daily marker Once per day check-journal UserPromptSubmit Daily marker Once per day check-knowledge UserPromptSubmit Daily marker Once per day check-map-staleness UserPromptSubmit Daily marker Once per day check-memory-drift UserPromptSubmit Session tombstone Once per session check-persistence UserPromptSubmit Adaptive counter Per session check-reminders UserPromptSubmit None Every prompt check-resources UserPromptSubmit None Every prompt check-version UserPromptSubmit Daily marker Once per day heartbeat UserPromptSubmit None Every prompt block-dangerous-commands PreToolUse * None Every match

            * Project-local hook (settings.local.json), not shipped with ctx.

            ","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hook-sequence-diagrams/#state-file-reference","level":2,"title":"State File Reference","text":"

            All state files live in .context/state/.

            File Pattern Hook Purpose ctx-loaded-{session} context-load-gate One-shot injection marker ctx-paused-{session} (all) Session pause marker ctx-wrapped-up check-context-size Suppress nudges after wrap-up (2h expiry) freshness-checked check-freshness Daily throttle ceremony-reminded check-ceremonies Daily throttle journal-reminded check-journal Daily throttle knowledge-reminded check-knowledge Daily throttle map-staleness-reminded check-map-staleness Daily throttle version-checked check-version Daily throttle memory-drift-nudged-{session} check-memory-drift Per-session tombstone ctx-context-count-{session} check-context-size Prompt counter stats-{session}.jsonl check-context-size Session stats log persist-{session} check-persistence Counter + mtime state ctx-task-count-{session} check-task-completion Prompt counter heartbeat-count-{session} heartbeat Prompt counter heartbeat-mtime-{session} heartbeat Last context mtime","path":["Hook Sequence Diagrams"],"tags":[]},{"location":"recipes/hub-cluster/","level":1,"title":"HA Cluster","text":"","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#ctx-hub-high-availability-cluster","level":1,"title":"ctx Hub: High-Availability Cluster","text":"

            Run multiple hub nodes with Raft-based leader election for redundancy. Any follower can take over if the leader dies.

            This recipe assumes you've read the ctx Hub overview and the Multi-machine setup. HA only makes sense in the \"small trusted team\" story; a personal cross-project brain on one workstation does not need three Raft peers.

            Raft-Lite

            ctx uses Raft only for leader election, not for data consensus. Entry replication happens via sequence-based gRPC sync on the append-only JSONL store. This is simpler than full Raft log replication and is possible because the store is append-only and clients are idempotent. The implication: a write accepted by the leader is durable on the leader immediately; followers catch up asynchronously. If the leader crashes between accepting a write and replicating it, that write can be lost. Do not use the hub as a bank ledger.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#topology","level":2,"title":"Topology","text":"

            A minimum HA cluster is three nodes. Two is worse than one: it doubles failure probability without providing quorum.

                     +-------------+\n         |  client(s)  |\n         +------+------+\n                |\n    +-----------+-----------+\n    |           |           |\n+---v---+   +---v---+   +---v---+\n| hub A |   | hub B |   | hub C |\n| :9900 |   | :9900 |   | :9900 |\n+-------+   +-------+   +-------+\n    ^           ^           ^\n    +-----------+-----------+\n        Raft (leader election)\n        gRPC (data sync)\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-1-bootstrap-the-first-node","level":2,"title":"Step 1: Bootstrap the First Node","text":"
            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

            The node starts a Raft election as soon as it sees its peers.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-2-start-the-other-nodes","level":2,"title":"Step 2: Start the Other Nodes","text":"

            On hub-b.lan:

            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-c.lan:9900\n

            On hub-c.lan:

            ctx hub start --daemon \\\n  --port 9900 \\\n  --peers hub-a.lan:9900,hub-b.lan:9900\n

            After a few seconds, one node wins the election and becomes the leader. The other two are followers.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-3-verify-cluster-state","level":2,"title":"Step 3: Verify Cluster State","text":"

            From any node:

            ctx hub status\n

            Expected output:

            role:       leader\npeers:      hub-a.lan:9900 (leader)\n            hub-b.lan:9900 (follower, in-sync)\n            hub-c.lan:9900 (follower, in-sync)\nentries:    1248\nuptime:     3h42m\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#step-4-register-clients-with-failover-peers","level":2,"title":"Step 4: Register Clients with Failover Peers","text":"

            When registering a client, give it the full peer list:

            ctx connection register hub-a.lan:9900 \\\n  --token ctx_adm_... \\\n  --peers hub-b.lan:9900,hub-c.lan:9900\n

            If the leader becomes unreachable, the client reconnects to the next peer. Followers redirect to the current leader, so writes always land on the right node.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#runtime-membership-changes","level":2,"title":"Runtime Membership Changes","text":"

            Add a new peer without downtime:

            ctx hub peer add hub-d.lan:9900\n

            Remove a decommissioned peer:

            ctx hub peer remove hub-c.lan:9900\n
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#planned-maintenance","level":2,"title":"Planned Maintenance","text":"

            Before taking a leader offline, hand off leadership:

            ssh hub-a.lan 'ctx hub stepdown'\n

            stepdown triggers a new election among the remaining followers before the leader goes offline. In-flight clients briefly pause, then reconnect to the new leader.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#failure-modes-at-a-glance","level":2,"title":"Failure Modes at a Glance","text":"Event What happens Leader crashes New election; clients reconnect to new leader Follower crashes No write impact; catches up on restart Network partition (majority) Majority side keeps serving; minority read-only Network partition (split) No quorum; all nodes read-only Disk full on leader Writes rejected; read traffic continues

            For the full list, see Hub failure modes.

            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-cluster/#see-also","level":2,"title":"See Also","text":"
            • Multi-machine recipe: single-node deployment
            • Hub operations: backup and maintenance
            • Hub security model: TLS, tokens
            ","path":["Recipes","Hub","HA Cluster"],"tags":[]},{"location":"recipes/hub-getting-started/","level":1,"title":"Getting Started","text":"","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#ctx-hub-getting-started","level":1,"title":"ctx Hub: Getting Started","text":"

            Stand up a single-node ctx Hub on localhost, register two projects, publish a decision from one, and see it appear in the other, all in under five minutes.

            Read This First

            If you haven't already, skim the ctx Hub overview. It explains the mental model, names the two user stories (personal vs small team), and (importantly) lists what the hub does not do. This recipe assumes you already know you want the feature.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-youll-get-out-of-this-recipe","level":2,"title":"What You'll Get out of This Recipe","text":"

            By the end, you will have:

            1. A local hub process running on port 9900.
            2. Two project directories both registered with the ctx Hub.
            3. A decision published from project alpha that appears automatically in project beta's .context/hub/ and in ctx agent --include-hub output.

            Concretely, the payoff this unlocks: a lesson you record in one project becomes visible to your agent the next time you open another project, without touching local files in the second project or opening another editor window.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#what-this-recipe-does-not-cover","level":2,"title":"What This Recipe Does Not Cover","text":"
            • Sharing .context/journal/, .context/pad, or any other local state. The hub only fans out decision, learning, convention, and task entries. Everything else stays local.
            • Multi-user attribution. The hub identifies projects, not people.
            • Running over a LAN; see Multi-machine setup.
            • Redundancy; see HA cluster.
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#prerequisites","level":2,"title":"Prerequisites","text":"
            • ctx installed and on PATH
            • Two project directories, each already initialized with ctx init
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-1-start-the-hub","level":2,"title":"Step 1: Start the Hub","text":"

            In a dedicated terminal:

            ctx hub start\n

            On first run, the hub generates an admin token and prints it to stdout. Copy it; you'll need it for each project registration:

            ctx hub listening on :9900\nadmin token: ctx_adm_7f3a1c2d...\ndata dir: ~/.ctx/hub-data/\n

            The admin token is written to ~/.ctx/hub-data/admin.token so you can recover it later. Treat it like a password.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-2-register-the-first-project","level":2,"title":"Step 2: Register the First Project","text":"
            cd ~/projects/alpha\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\n

            This stores an encrypted connection config in .context/.connect.enc. The admin token is exchanged for a per-project client token; the admin token itself is never persisted in the project.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-3-choose-what-to-receive","level":2,"title":"Step 3: Choose What to Receive","text":"
            ctx connection subscribe decision learning convention\n

            Only the entry types you subscribe to will be delivered by sync and listen.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-4-publish-a-decision","level":2,"title":"Step 4: Publish a Decision","text":"

            Either use ctx add --share to write locally and push to the ctx Hub:

            ctx add decision \"Use UTC timestamps everywhere\" --share \\\n  --context \"We had timezone drift between the API and journal\" \\\n  --rationale \"Single source of truth avoids conversion bugs\" \\\n  --consequence \"The UI does conversion at render time\"\n

            Or publish an existing entry directly:

            ctx connection publish decision \"Use UTC timestamps everywhere\"\n
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-5-register-a-second-project-and-sync","level":2,"title":"Step 5: Register a Second Project and Sync","text":"
            cd ~/projects/beta\nctx connection register localhost:9900 --token ctx_adm_7f3a1c2d...\nctx connection subscribe decision learning convention\nctx connection sync\n

            The decision from alpha now appears in ~/projects/beta/.context/hub/decisions.md with an origin tag and timestamp.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-6-watch-entries-arrive-live","level":2,"title":"Step 6: Watch Entries Arrive Live","text":"

            Instead of re-running sync, stream new entries as they land:

            ctx connection listen\n

            Leave this running in a terminal; every --share publish from any registered project will appear in .context/hub/ immediately.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#step-7-feed-shared-knowledge-into-the-agent","level":2,"title":"Step 7: Feed Shared Knowledge into the Agent","text":"

            Once entries exist in .context/hub/, include them in the agent context packet:

            ctx agent --include-hub\n

            Shared entries are added as a dedicated tier in the budget-aware assembly, scored by recency and type relevance.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#auto-sync-on-session-start","level":2,"title":"Auto-Sync on Session Start","text":"

            After register, the check-hub-sync hook pulls new entries at the start of each session (daily throttled). Most users never need to call ctx connection sync manually.

            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-getting-started/#where-to-go-next","level":2,"title":"Where to Go Next","text":"
            • Multi-machine hub: run the hub on a LAN host and connect from other workstations.
            • HA cluster: Raft-based leader election for high availability.
            • Hub operations: daemon mode, backup, log rotation, JSONL store layout.
            • Hub security model: token lifecycle, encryption at rest, threat model.
            • ctx connect reference and ctx hub start reference.
            ","path":["Recipes","Hub","Getting Started"],"tags":[]},{"location":"recipes/hub-multi-machine/","level":1,"title":"Multi-Machine","text":"","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#ctx-hub-multi-machine","level":1,"title":"ctx Hub: Multi-Machine","text":"

            Run the hub on a LAN host and connect from project directories on other workstations. This recipe is the Story 2 (\"small trusted team\") shape described in the ctx Hub overview; read that first if you haven't, especially the trust-model warnings.

            This recipe assumes you've already walked through Getting Started and understand what flows through the hub (decisions, learnings, conventions, tasks, not journals, scratchpad, or raw context files).

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#topology","level":2,"title":"Topology","text":"
            +------------------+        +------------------+\n| workstation A    |        | workstation B    |\n|  ~/projects/x    |        |  ~/projects/y    |\n|  ctx connection  |        |  ctx connection  |\n+---------+--------+        +---------+--------+\n          |                           |\n          +-----------+   +-----------+\n                      v   v\n              +-------------------+\n              | LAN host \"nexus\"  |\n              | ctx hub start     |\n              | --daemon          |\n              | :9900             |\n              +-------------------+\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-1-start-the-daemon-on-the-lan-host","level":2,"title":"Step 1: Start the Daemon on the LAN Host","text":"

            On the machine that will hold the hub (call it nexus):

            ctx hub start --daemon --port 9900\n

            The daemon writes a PID file to ~/.ctx/hub-data/hub.pid. Stop it later with:

            ctx hub stop\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-2-firewall-and-port","level":2,"title":"Step 2: Firewall and Port","text":"

            Open port 9900/tcp on nexus to the LAN only. Never expose the hub to the public internet without a reverse proxy and TLS in front of it (see Hub security model).

            Typical LAN allowlist rules:

            firewalldufwnftables
            sudo firewall-cmd --zone=internal \\\n  --add-port=9900/tcp --permanent\nsudo firewall-cmd --reload\n
            sudo ufw allow from 192.168.1.0/24 to any port 9900 proto tcp\n
            sudo nft add rule inet filter input ip saddr 192.168.1.0/24 \\\n  tcp dport 9900 accept\n
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-3-retrieve-the-admin-token","level":2,"title":"Step 3: Retrieve the Admin Token","text":"

            The daemon prints the admin token to stdout on first run. Running as a daemon, that output goes to the log instead:

            cat ~/.ctx/hub-data/admin.token\n

            Copy the token over a trusted channel (SSH, password manager, or an encrypted note). Do not email it or put it in chat.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-4-register-projects-from-each-workstation","level":2,"title":"Step 4: Register Projects from Each Workstation","text":"

            On workstation A:

            cd ~/projects/x\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

            On workstation B:

            cd ~/projects/y\nctx connection register nexus.local:9900 --token ctx_adm_...\nctx connection subscribe decision learning convention\n

            Each registration exchanges the admin token for a per-project client token. Only the client token is persisted in .context/.connect.enc, encrypted with the same AES-256-GCM scheme ctx uses for notification credentials.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#step-5-verify","level":2,"title":"Step 5: Verify","text":"

            From either workstation:

            ctx connection status\n

            You should see the ctx Hub address, role (leader for single-node), subscription filters, and the sequence number you're synced to.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#tls-recommended","level":2,"title":"TLS (Recommended)","text":"

            For anything beyond a trusted home LAN, terminate TLS in front of the hub. The hub speaks gRPC, so the reverse proxy must speak HTTP/2:

            server {\n    listen 443 ssl http2;\n    server_name nexus.example.com;\n\n    ssl_certificate     /etc/letsencrypt/live/nexus.example.com/fullchain.pem;\n    ssl_certificate_key /etc/letsencrypt/live/nexus.example.com/privkey.pem;\n\n    location / {\n        grpc_pass grpc://127.0.0.1:9900;\n    }\n}\n

            Point ctx connection register at the public hostname and port 443.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#handling-daemon-restarts","level":2,"title":"Handling Daemon Restarts","text":"

            The hub is append-only JSONL, so restarts are safe. Clients keep their last-seen sequence in .context/hub/.sync-state.json and pick up exactly where they left off on the next sync or listen reconnect.

            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-multi-machine/#see-also","level":2,"title":"See Also","text":"
            • HA cluster recipe: for redundancy
            • Hub operations: backup, rotation
            • Hub failure modes
            • Hub security model
            ","path":["Recipes","Hub","Multi-Machine"],"tags":[]},{"location":"recipes/hub-overview/","level":1,"title":"Overview","text":"","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#ctx-hub-overview","level":1,"title":"ctx Hub: Overview","text":"

            Start here before the other hub recipes. This page answers what the hub is, who it's for, why you'd run one, and, equally important, what it is not.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#mental-model-in-one-paragraph","level":2,"title":"Mental Model in One Paragraph","text":"

            The hub is a fan-out channel for structured knowledge entries across projects. When you publish a decision, learning, convention, or task with --share, the hub stores it in an append-only log and delivers it to every other project subscribed to that type. The next time your agent loads context in any of those projects, shared entries can be included in the context packet alongside local ones.

            That's the whole feature. It is a project-to-project knowledge bus for a small, curated set of entry types. It is not a shared memory, a shared journal, or a multi-user database.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-flows-through-the-hub","level":2,"title":"What Flows through the Hub","text":"

            Only four entry types:

            Type What it is decision Architectural decisions with rationale learning Gotchas, lessons, surprising behaviors convention Coding patterns and standards task Work items worth sharing across projects

            Each entry is an immutable record with a content blob, the publishing project's name as Origin, a timestamp, and a hub-assigned sequence number. Once published, entries are never rewritten.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#what-does-not-flow-through-the-hub","level":2,"title":"What Does Not Flow through the Hub","text":"

            This is the part new users get wrong most often:

            • Session journals (~/.claude/ logs, .context/journal/) stay local. The hub does not sync your AI session history.
            • Scratchpad (.context/pad) stays local. Encrypted notes never leave the machine they were written on.
            • Local context files as a whole (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) are not mirrored wholesale. Only entries you explicitly --share, or publish later with ctx connection publish, cross the boundary.
            • Anything under .context/ that isn't one of the four entry types above. Configuration, state, logs, memory, journal metadata: all local.

            If you were expecting \"now my agent in project B can see everything my agent did in project A,\" that's not this feature. Local session density still lives on the local machine.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#two-user-stories","level":2,"title":"Two User Stories","text":"

            The hub makes sense in two different shapes. Pick the one that matches your situation; the mechanics are identical but the trust model and threat surface are very different.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-1-personal-cross-project-brain","level":3,"title":"Story 1: Personal Cross-Project Brain","text":"

            One developer, many projects, one hub, usually on localhost.

            You're working across several projects on the same machine (or a handful of machines you own). You want a lesson learned debugging project A to show up when you open project B a week later, without re-discovering it. You want a convention you codified in one project to be visible as-you-type in another.

            Concrete payoff:

            • ctx add learning --share \"...\" in project A → ctx agent --include-hub in project B shows that learning in the next context packet.
            • A decision recorded in your personal \"dotfiles\" project is instantly visible to every other project on your workstation.
            • Cross-project conventions (e.g., \"use UTC timestamps everywhere\") live in one place and propagate.

            Trust model: high, because you trust every participant since every participant is you. Run the hub on localhost or on your own LAN, use the default single-node setup, don't worry about TLS.

            Start here: Getting Started for the one-time setup, then Personal cross-project brain for the day-to-day workflow.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#story-2-small-trusted-team","level":3,"title":"Story 2: Small Trusted Team","text":"

            A few teammates, projects they each own, one hub on a LAN host they all trust.

            Your team has a handful of services and you want a shared \"things we've learned the hard way\" stream. Someone on the platform team records a convention about timestamp handling; everyone else's agents see it the next session. An on-call engineer records a learning from a 3 AM incident; the rest of the team inherits the lesson without needing to read the postmortem.

            Concrete payoff:

            • Team conventions propagate without needing a wiki or chat.
            • Lessons from one team member become available to everyone else's agent context packets automatically.
            • Cross-project decisions (shared libraries, deployment patterns, naming rules) live in a single log the whole team reads.

            Trust model: the hub assumes everyone holding a client token is friendly. There is no per-user attribution you can rely on, Origin is self-asserted by the publishing client, and there is no read ACL beyond the subscription filter. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

            Operational shape: run the hub on a LAN host (or a three-node HA cluster for redundancy), put TLS in front of it for anything beyond a home LAN, distribute client tokens over a trusted channel.

            Start here: Multi-machine setup for the deployment, Team knowledge bus for the day-to-day team workflow, then HA cluster if you need redundancy.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#identity-projects-not-users","level":2,"title":"Identity: Projects, Not Users","text":"

            The hub has no concept of users. Its unit of identity is the project. ctx connection register binds a hub token to a project directory, not to a person. Two developers working on the same project share either:

            • The same .connect.enc, copied between machines over a trusted channel, or
            • Different project names (alpha@laptop-a, alpha@laptop-b), because the hub rejects duplicate registrations of the same project name.

            Either works; neither gives you per-human attribution. If you need \"who wrote this,\" the hub is the wrong tool.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#when-not-to-use-it","level":2,"title":"When Not to Use It","text":"
            • Solo, single-project work. Local .context/ files are enough. The hub adds operational surface for no payoff.
            • Untrusted participants. The hub assumes everyone with a client token is friendly. It is not hardened against hostile insiders or compromised tokens.
            • Compliance-sensitive environments. There is no audit trail that can prove who published what, only which project published what, and Origin is self-asserted.
            • Secrets or PII. Entry content is stored plaintext on the hub and fanned out to every subscribed client. Don't publish anything you wouldn't paste in a team chat.
            • Wholesale journal sharing. See \"what does not flow\" above. If that's what you want, this feature won't provide it. Talk to us in the issue tracker about what would.
            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#how-entries-reach-your-agent","level":2,"title":"How Entries Reach Your Agent","text":"

            Once a project is registered and subscribed, entries arrive by three mechanisms:

            1. ctx connection sync: an on-demand pull, replays everything new since the last sequence you saw.
            2. ctx connection listen: a long-lived gRPC stream that writes new entries to .context/hub/ as they arrive.
            3. check-hub-sync hook: runs at session start, daily throttled, so most users never call sync manually.

            Once entries exist in .context/hub/, ctx agent --include-hub adds a dedicated tier to the budget-aware context packet, scored by recency and type relevance. That's the end of the pipeline.

            ","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-overview/#where-to-go-next","level":2,"title":"Where to Go Next","text":"If you're… Read Trying it for yourself on one machine Getting Started A solo developer using the hub day-to-day Personal cross-project brain Setting up for a small team on a LAN Multi-machine setup A small team using the hub day-to-day Team knowledge bus Running redundant nodes HA cluster Operating a hub in production Operations Assessing the security posture Security model Debugging a hub in trouble Failure modes Just reading the commands ctx connect, ctx serve, ctx hub","path":["Recipes","Hub","Overview"],"tags":[]},{"location":"recipes/hub-personal/","level":1,"title":"Personal Cross-Project Brain","text":"","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#personal-cross-project-brain","level":1,"title":"Personal Cross-Project Brain","text":"

            This recipe shows how one developer uses a ctx Hub across their own projects day-to-day, the \"Story 1\" shape from the Hub overview. You're not setting up infrastructure for a team; you're making a lesson you learned last Tuesday in project A automatically surface when you open project B next Thursday.

            Prerequisites: a working ctx Hub on localhost (see Getting Started for the roughly five-minute setup). This recipe assumes the hub is already running and you've registered at least two projects.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#the-core-loop","level":2,"title":"The Core Loop","text":"

            Every day, the same three verbs matter:

            1. Record: notice a decision, learning, or convention and capture it with ctx add --share.
            2. Subscribe: every project you care about is subscribed to the types you want delivered (set once with ctx connection subscribe).
            3. Load: your agent picks up shared entries on next session start via the auto-sync hook, or explicitly via ctx agent --include-hub.

            That's the whole workflow. The rest of this recipe fills in the concrete moments where each verb matters.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#a-realistic-day","level":2,"title":"A Realistic Day","text":"

            You have three projects on your workstation:

            • ~/projects/api, a Go service you're actively developing
            • ~/projects/cli, a companion CLI that consumes the API
            • ~/projects/dotfiles, your personal conventions and cross-project learnings

            All three are registered with a single hub running on localhost:9900 (started once at boot, or via a systemd user unit; see Hub operations). All three subscribe to decision, learning, and convention.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#0900-start-work-on-api","level":3,"title":"09:00 - Start Work on api","text":"

            You cd ~/projects/api and start a Claude Code session. Behind the scenes, the plugin's PreToolUse hook calls ctx agent --budget 8000 --include-hub before the first tool call. Agent loads:

            • Local .context/ (TASKS, DECISIONS, LEARNINGS, etc.)
            • Foundation steering files (always-inclusion)
            • Everything you've shared from the other two projects

            So the \"use UTC timestamps everywhere\" decision you recorded in dotfiles last week is already in Claude's context for this session, without any manual sync.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1030-you-discover-a-gotcha","level":3,"title":"10:30 - You Discover a Gotcha","text":"

            While debugging, you find that the API's retry loop silently drops the last error when the transport times out. This is the kind of thing you'd normally add to LEARNINGS.md in api/. But it's useful across every Go service you'll ever write, not just this one. So:

            ctx add learning --share \\\n  --context \"Go http.Client retries mask the final error\" \\\n  --lesson  \"Transport timeouts don't surface as errors when the retry loop re-assigns err without wrapping. Check for context.DeadlineExceeded on the request context instead.\" \\\n  --application \"Any retry loop over http.Client.Do that uses a per-attempt timeout\"\n

            The --share flag does two things:

            1. Writes the learning to api/.context/LEARNINGS.md locally (as a normal ctx add learning would).
            2. Publishes the same entry to the ctx Hub, which stores it in the append-only JSONL and fans it out to every subscribed client.

            Within seconds, cli/.context/hub/learnings.md and dotfiles/.context/hub/learnings.md both contain a copy of this learning (the ctx connection listen daemon picks it up from the ctx Hub's Listen stream).

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1200-you-switch-to-cli","level":3,"title":"12:00 - You Switch to cli","text":"

            cd ~/projects/cli, open a new session. The agent packet for cli now includes the learning you just recorded in api, because cli is subscribed to learning and the entry has already been synced into cli/.context/hub/learnings.md.

            You don't have to re-explain the retry-loop gotcha. Claude already sees it.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1400-you-codify-a-convention","level":3,"title":"14:00 - You Codify a Convention","text":"

            You've been writing error messages in api and decided you want a consistent pattern: lowercase start, no trailing period, single-sentence. This is a convention, not a decision; it applies to every Go project you touch. Record it in dotfiles (since that's your \"personal standards\" project), and share it:

            cd ~/projects/dotfiles\nctx add convention --share \\\n  \"Error messages: lowercase start, no trailing period, single sentence (follows Go's stdlib style)\"\n

            The convention lands in dotfiles/CONVENTIONS.md locally and fans out to api and cli via the hub. The next Claude Code session in either project gets the convention injected into the steering-adjacent slot of the agent packet.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#1630-end-of-day","level":3,"title":"16:30 - End of Day","text":"

            You didn't run ctx connection sync once. You didn't git push anything between projects. You didn't remember to tell your agent about the retry-loop gotcha in the new project. The hub did all of it for you.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-the-workflow-actually-looks-like","level":2,"title":"What the Workflow Actually Looks Like","text":"

            Stripped of prose, the day's commands were:

            # Morning: nothing. Agent loads --include-hub automatically.\n\n# Mid-morning: record a learning that should cross projects\nctx add learning --share \\\n  --context \"...\" --lesson \"...\" --application \"...\"\n\n# Afternoon: codify a convention in the \"standards\" project\nctx add convention --share \"...\"\n\n# Evening: nothing. Everything's already propagated.\n

            The hub is passive infrastructure. You never talk to it directly; you talk through it by using --share on commands you were already running.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#tips-for-solo-use","level":2,"title":"Tips for Solo Use","text":"

            Pick a \"standards\" project. One of your projects should play the role of \"canonical source for rules you want everywhere.\" Your dotfiles, a personal scratch repo, or a dedicated ctx-standards project all work. Record cross-cutting conventions there and let the hub propagate them to everything else.

            Subscribe to task only if you want cross-project todos. The four subscribable types are decision, learning, convention, task. Tasks are usually project-local; subscribing makes every hub-shared task from every project show up in every other project's agent packet. That's probably not what you want. Skip task in ctx connection subscribe unless you have a specific reason.

            Run the hub as a user-level daemon so you don't have to remember to start it. On Linux with systemd:

            # ~/.config/systemd/user/ctx-hub.service\n[Unit]\nDescription=ctx Hub (personal)\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/ctx hub start\nRestart=on-failure\n\n[Install]\nWantedBy=default.target\n
            systemctl --user enable --now ctx-hub.service\n

            Don't overthink subscription filters. For personal use, subscribe every project to all four types at first (or three, if you skip task). Tune later if the context packets get noisy.

            Local storage is fine; no TLS needed. The hub runs on localhost. No one else is on the network. Skip the TLS setup from the Multi-machine recipe; it's relevant when the hub is on a LAN host serving multiple workstations, not when it's a personal daemon.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

            Not a setup guide. For the one-time hub install and project registration, use Getting Started.

            Not a team guide. If you're sharing across humans, not just across your own projects, read Team knowledge bus instead; the trust model and operational concerns are different.

            Not production operations. For backup, log rotation, failure recovery, and HA, see Hub operations and Hub failure modes.

            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-personal/#see-also","level":2,"title":"See Also","text":"
            • Hub overview: when to use the Hub and when not to.
            • Team knowledge bus: the multi-human companion recipe.
            • ctx connect: the client-side commands used above (subscribe, publish, sync, listen, status).
            • ctx add: the --share flag reference.
            • ctx hub: operator commands for starting, stopping, and inspecting the hub.
            ","path":["Recipes","Hub","Personal Cross-Project Brain"],"tags":[]},{"location":"recipes/hub-team/","level":1,"title":"Team Knowledge Bus","text":"","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#team-knowledge-bus","level":1,"title":"Team Knowledge Bus","text":"

            This recipe shows how a small trusted team uses a ctx Hub as a shared knowledge bus, the \"Story 2\" shape from the Hub overview. You're not building a wiki, you're not replacing your issue tracker, and you're not running a multi-tenant service. You're connecting 3-10 developers who trust each other so that lessons, decisions, and conventions flow between them without ceremony.

            Prerequisites:

            • A running ctx Hub on a LAN host or internal server everyone on the team can reach. See Multi-machine setup for the deployment guide.
            • Each team member has ctx installed and has ctx connection register-ed their working projects with the hub.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#trust-model-read-this-first","level":2,"title":"Trust Model: Read This First","text":"

            The hub assumes everyone holding a client token is friendly. There's no per-user attribution you can rely on, no read ACL beyond subscription filters, and Origin is self-asserted by the publishing client. Treat the hub like a team wiki: useful because everyone can write to it, not because it can prove who wrote what.

            If your team is:

            • ✅ 3-10 engineers, all known to each other, all trusted with production access
            • ✅ On a single internal network or behind a VPN
            • ✅ Comfortable with \"the hub assumes friendly participants\"

            …this recipe fits. If your team is:

            • ❌ Larger than ~15, with turnover
            • ❌ Includes contractors, untrusted agents, or compromised-workstation concerns
            • ❌ Needs audit trails that prove who published what
            • ❌ Requires per-team-member isolation

            …you're in \"Story 3\" territory, which the hub does not support today. Use a wiki or a dedicated knowledge platform instead.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#the-teams-three-verbs","level":2,"title":"The Team's Three Verbs","text":"

            Everyone on the team does three things, same as in the personal recipe, but with different social expectations:

            1. Record: when you learn something that would save a teammate time, capture it with ctx add --share.
            2. Subscribe: every engineer's project directories subscribe to the types the team cares about.
            3. Load: agents pick up shared entries automatically via the auto-sync hook and the --include-hub flag in the PreToolUse hook pipeline.

            The operational shape is identical to solo use. What's different is the culture around publishing: when do you --share, and what belongs on the hub vs. in your local .context/.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-goes-on-the-hub-team-rules-of-thumb","level":2,"title":"What Goes on the Hub (Team Rules of Thumb)","text":"

            Share it if it's true for more than one person. The central question: \"would the next teammate who hits this problem save time if they already knew this?\" If yes, --share. If no, record it locally and move on.

            Decisions:

            • ✅ Cross-service decisions (database choice, auth model, deployment pattern, monitoring stack).
            • ✅ Policy decisions that apply to all services (naming, API versioning, error-message format).
            • ❌ Internal implementation decisions inside a single service (\"chose a map over a slice here because lookups dominate\").
            • ❌ One-off tactical calls for a specific PR.

            Learnings:

            • ✅ Gotchas, surprising behavior, flaky infrastructure quirks, anything you'd tell a teammate over coffee with \"watch out for X\".
            • ✅ Lessons from incidents, right after the postmortem is the highest-value time to share.
            • ❌ Internal debugging notes that only make sense with context from your current branch.

            Conventions:

            • ✅ Repo layout, commit message format, pre-commit hooks, review expectations.
            • ✅ Language-level style decisions that apply across services.
            • ❌ Per-service idioms (\"in billing/ we prefer…\").

            Tasks: almost always project-local. Don't subscribe to task unless the team has a specific reason (e.g., a cross-cutting migration you want visible everywhere).

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#a-realistic-week","level":2,"title":"A Realistic Week","text":"

            Monday, 3 AM incident, shared learning

            On-call engineer Alice gets paged: the payment service starts returning 500s after a dependency update. After an hour she finds the culprit: a breaking change in a transitive gRPC dep that only manifests under high concurrency. Postmortem on Tuesday, but right now she records the learning:

            ctx add learning --share \\\n  --context \"Payment service 3 AM incident, 2026-04-03\" \\\n  --lesson  \"grpc-go v1.62+ changes DialContext behavior under high \\\n  concurrency: connections from a single channel can deadlock if the \\\n  server emits GOAWAY mid-stream. Symptom: 500 errors cluster in \\\n  30s bursts, no error in grpc client logs.\" \\\n  --application \"Any service on grpc-go. Pin to v1.61 or patch with \\\n  keepalive: https://github.com/grpc/grpc-go/issues/...\" \n

            By Tuesday morning, every other engineer's agent context packet contains this learning. When Bob starts work on the ledger service (which also uses grpc-go), his Claude Code session already knows about the gotcha without Bob having to read the incident channel.

            Wednesday, cross-service decision

            The team agrees on a new pattern for API versioning: header-based instead of URL-based. Platform lead Carol records the decision:

            ctx add decision --share \\\n  --context \"Need consistent API versioning across all 6 services. \\\n  Current URL-based /v1/ isn't working for gradual rollouts.\" \\\n  --rationale \"Header-based versioning lets us route by header at the \\\n  edge, which makes canary rollouts trivial. URL-based versioning \\\n  forces clients to update their paths.\" \\\n  --consequence \"All new endpoints use X-API-Version header. \\\n  Existing /v1/ endpoints stay. Deprecation schedule in q3.\" \\\n  \"Use header-based API versioning for new endpoints\"\n

            Every engineer's next session knows about this decision automatically. When Dave starts adding endpoints to the inventory service on Thursday, Claude already prompts him for the header pattern instead of defaulting to /v1/.

            Friday, convention drift caught at review

            Dave notices that his PR auto-formatted some error messages to end with periods. He recalls the team convention is \"no trailing period\" but can't remember where it was documented. He runs ctx connection status, sees the hub is healthy, greps his local .context/hub/conventions.md, and finds:

            ## [2026-03-12] Error message format\nLowercase start, no trailing period, single sentence.\n

            He fixes the PR. No lookup on the wiki, no question in chat, no context-switch penalty.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#workflow-tips-for-teams","level":2,"title":"Workflow Tips for Teams","text":"

            Designate a \"champion\" for decisions. The team lead or platform engineer should be the person who explicitly --shares cross-cutting decisions. Other team members share learnings freely but should ask \"should this be a decision?\" in review before --shareing a decision. This keeps the decision stream signal-rich.

            Publish postmortem learnings immediately, not after the meeting. The postmortem itself is a document; the actionable rules that come out of it belong on the hub, and they should land within an hour of the incident. \"Share fast, edit later\" is the rule.

            Delete noisy entries, don't tolerate them. The hub is append-only, but the .context/hub/ mirror on each client is just markdown. If a shared learning turns out to be wrong or obsolete, remove it from local mirrors and stop the hub daemon to truncate entries.jsonl (see Hub operations). Noisy shared feeds lose trust fast.

            Don't subscribe every project to every type. For backend engineers, subscribing to decision + learning + convention is usually right. For platform or DevOps projects, adding task makes sense. For a prototype or experiment project, subscribing only to convention might be enough.

            Run a single hub, not one per team. If two teams need to share knowledge, they should share a hub. Splitting hubs by team creates silos, which is often exactly the thing you were trying to solve.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#operational-concerns","level":2,"title":"Operational Concerns","text":"

            The team recipe assumes someone owns the hub host. That person (or a small group) is responsible for:

            • Uptime: the hub is infrastructure; treat it like any other internal service you run. See Hub operations.
            • Backups: entries.jsonl is the source of truth. Snapshot it to the same backup tier as your other internal data.
            • Upgrades: cadence the team agrees on. Major upgrades may require everyone to re-register, so do them at natural breaks.
            • Failures: see Hub failure modes for the standard oncall playbook.

            Optional but recommended: run a 3-node Raft cluster so the hub survives individual node failures. See HA cluster. For teams under 10 people, a single-node hub with daily backups is usually fine.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#token-management","level":2,"title":"Token Management","text":"

            Every team member has a client token stored in their .context/.connect.enc. Rules of thumb:

            • One token per engineer per project. Not one token per team; not one shared token. Each engineer registers each of their working projects separately.
            • Token compromise = revoke immediately. When an engineer leaves, their tokens should be removed from clients.json on the hub. This is a manual operation today; see Hub security for the revocation steps.
            • No checked-in tokens. .context/.connect.enc is encrypted with the local machine key, but don't push it to shared repos; it's per-workstation.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#what-this-recipe-is-not","level":2,"title":"What This Recipe Is Not","text":"

            Not a wiki replacement. The hub is for structured entries, not prose. Put your architecture overviews, onboarding docs, and design discussions in a real wiki.

            Not an audit log. Origin on the hub is self-asserted. If compliance requires provenance, the hub is the wrong tool.

            Not a ticket system. Task sharing works, but mature teams already have Jira/Linear/Github Issues. Don't try to replace those with hub tasks; use the hub for lightweight cross-project todos that your existing tracker doesn't capture well.

            Not a production service for end users. This is internal team infrastructure. Do not expose the hub to customers, partners, or the open internet.

            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/hub-team/#see-also","level":2,"title":"See Also","text":"
            • Hub overview: when to use the hub and when not to.
            • Personal cross-project brain: the single-developer companion recipe.
            • Multi-machine setup: standing up the hub on a LAN host.
            • HA cluster: optional redundancy for larger teams.
            • Hub operations: backup, rotation, monitoring.
            • Hub security: threat model and hardening checklist.
            ","path":["Recipes","Hub","Team Knowledge Bus"],"tags":[]},{"location":"recipes/import-plans/","level":1,"title":"Importing Claude Code Plans","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code plan files (~/.claude/plans/*.md) are ephemeral: They have structured context, approach, and file lists, but they're orphaned after the session ends. The filenames are UUIDs, so you can't tell what's in them without opening each one.

            How do you turn a useful plan into a permanent project spec?

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tldr","level":2,"title":"TL;DR","text":"
            You: /ctx-plan-import\nAgent: [lists plans with dates and titles]\n       1. 2026-02-28  Add authentication middleware\n       2. 2026-02-27  Refactor database connection pool\nYou: \"import 1\"\nAgent: [copies to specs/add-authentication-middleware.md]\n

            Plans are copied (not moved) to specs/, slugified by their H1 heading.

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-plan-import Skill List, filter, and import plan files to specs /ctx-task-add Skill Optionally add a task referencing the spec","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-1-list-available-plans","level":3,"title":"Step 1: List Available Plans","text":"

            Invoke the skill and it lists plans with modification dates and titles:

            You: /ctx-plan-import\n\nAgent: Found 3 plan files:\n         1. 2026-02-28  Add authentication middleware\n         2. 2026-02-27  Refactor database connection pool\n         3. 2026-02-25  Import plans skill\n       Which plans would you like to import?\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-2-filter-optional","level":3,"title":"Step 2: Filter (Optional)","text":"

            You can narrow the list with arguments:

            Argument Effect --today Only plans modified today --since YYYY-MM-DD Only plans modified on or after the date --all Import everything without prompting (none) Interactive selection
            You: /ctx-plan-import --today\nYou: /ctx-plan-import --since 2026-02-27\nYou: /ctx-plan-import --all\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-3-select-and-import","level":3,"title":"Step 3: Select and Import","text":"

            Pick one or more plans by number:

            You: \"import 1 and 3\"\n\nAgent: Imported 2 plan(s):\n         ~/.claude/plans/abc123.md -> specs/add-authentication-middleware.md\n         ~/.claude/plans/ghi789.md -> specs/import-plans-skill.md\n       Want me to add tasks referencing these specs?\n

            The agent reads the H1 heading from each plan and slugifies it for the filename. If a plan has no H1 heading, the original filename (minus extension) is used as the slug.

            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#step-4-add-follow-up-tasks-optional","level":3,"title":"Step 4: Add Follow-Up Tasks (Optional)","text":"

            If you say yes, the agent creates tasks in TASKS.md that reference the imported specs:

            You: \"yes, add tasks\"\n\nAgent: [runs /ctx-task-add for each spec]\n       Added:\n         - [ ] Implement authentication middleware (spec: specs/add-authentication-middleware.md)\n         - [ ] Import plans skill (spec: specs/import-plans-skill.md)\n
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember the exact skill name:

            You say What happens \"import my plans\" /ctx-plan-import (interactive) \"save today's plans as specs\" /ctx-plan-import --today \"import all plans from this week\" /ctx-plan-import --since ... \"turn that plan into a spec\" /ctx-plan-import (filtered)","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#tips","level":2,"title":"Tips","text":"
            • Plans are copied, not moved: The originals stay in ~/.claude/plans/. Claude Code manages that directory; ctx doesn't delete from it.
            • Conflict handling: If specs/{slug}.md already exists, the agent asks whether to overwrite or pick a different name.
            • Specs are project memory: Once imported, specs are tracked in git and available to future sessions. Reference them from TASKS.md phase headers with Spec: specs/slug.md.
            • Pair with /ctx-implement: After importing a plan as a spec, use /ctx-implement to execute it step-by-step with verification.
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/import-plans/#see-also","level":2,"title":"See Also","text":"
            • Skills Reference: /ctx-plan-import: full skill description
            • The Complete Session: where plan import fits in the session flow
            • Tracking Work Across Sessions: managing tasks that reference imported specs
            ","path":["Recipes","Knowledge and Tasks","Importing Claude Code Plans"],"tags":[]},{"location":"recipes/knowledge-capture/","level":1,"title":"Persisting Decisions, Learnings, and Conventions","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-problem","level":2,"title":"The Problem","text":"

            You debug a subtle issue, discover the root cause, and move on.

            Three weeks later, a different session hits the same issue. The knowledge existed briefly in one session's memory but was never written down.

            Architectural decisions suffer the same fate: you weigh trade-offs, pick an approach, and six sessions later the AI suggests the alternative you already rejected.

            How do you make sure important context survives across sessions?

            Prefer Skills to Raw Commands

            Use /ctx-decision-add and /ctx-learning-add instead of raw ctx add commands. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-reflect               # surface items worth persisting\n/ctx-decision-add \"Title\"  # record with context/rationale/consequence\n/ctx-learning-add \"Title\"  # record with context/lesson/application\n

            Or just tell your agent: \"What have we learned this session?\"

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add decision Command Record an architectural decision ctx add learning Command Record a gotcha, tip, or lesson ctx add convention Command Record a coding pattern or standard ctx reindex Command Rebuild both quick-reference indices ctx decision reindex Command Rebuild the DECISIONS.md index ctx learning reindex Command Rebuild the LEARNINGS.md index /ctx-decision-add Skill AI-guided decision capture with validation /ctx-learning-add Skill AI-guided learning capture with validation /ctx-convention-add Skill AI-guided convention recording with placement /ctx-reflect Skill Surface items worth persisting at breakpoints","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#the-workflow","level":2,"title":"The Workflow","text":"

            Decision, Learning, or Convention?

            • If you chose between alternatives, it is a decision.
            • If you discovered something surprising, it is a learning.
            • If you are codifying a repeated pattern, it is a convention.
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-1-understand-what-to-persist","level":3,"title":"Step 1: Understand What to Persist","text":"

            Three context files serve different purposes:

            Decisions (DECISIONS.md) answer \"why is it this way?\" They record trade-offs between alternatives with structured fields: context, rationale, and consequences. Decisions prevent re-debating settled questions.

            Learnings (LEARNINGS.md) answer \"what did we discover the hard way?\" They record gotchas and debugging insights specific to this project with structured fields: context, lesson, and application.

            Learnings prevent repeating past mistakes.

            Conventions (CONVENTIONS.md) answer \"how do we do things here?\" They record patterns and standards. No structured fields required: just a name, a rule, and an example. Conventions keep code consistent across sessions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-2-record-decisions","level":3,"title":"Step 2: Record Decisions","text":"

            Decisions have three structured fields:

            • context,
            • rationale,
            • and consequences.

            The easiest way to record one is to tell your agent:

            You: \"Save that as a decision: we picked file-based cooldown\n      tokens over env vars.\"\n\nAgent: \"Recorded in DECISIONS.md:\n  - Decision: Use file-based cooldown tokens instead of env vars\n  - Context: Hook subprocesses cannot persist env vars to parent shell\n  - Rationale: File tokens survive across processes. Simpler than\n    IPC. Cleanup is automatic via TTL.\n  - Consequence: Tombstone files accumulate in /tmp. Cannot share\n    state across machines.\"\n

            The agent was present for the trade-off discussion. It fills in the structured fields from what it observed, which is hard to reconstruct later as a terminal one-liner.

            When multiple options were considered, the agent includes rejected alternatives in the rationale automatically.

            The /ctx-decision-add skill also guides you through the fields interactively. For quick decisions, it supports a Y-statement:

            \"In the context of [situation], facing [constraint], we decided for [choice] and against [alternatives], to achieve [benefit], accepting that [trade-off].\"

            CLI Command for Scripting and Automation

            When no agent is in the loop (CI pipelines, shell scripts, hooks), use the CLI directly with structured flags:

            ctx add decision \"Use file-based cooldown tokens instead of env vars\" \\\n  --context \"Hook subprocesses cannot persist env vars to parent shell\" \\\n  --rationale \"File tokens survive across processes. Simpler than IPC. Cleanup is automatic via TTL.\" \\\n  --consequence \"Tombstone files accumulate in /tmp. Cannot share state across machines.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-3-record-learnings","level":3,"title":"Step 3: Record Learnings","text":"

            Learnings have three structured fields: context, lesson, and application.

            Again, your agent is the best tool for this. It was there when you discovered the gotcha and can express it more completely than a backslash-escaped terminal command:

            You: \"That subprocess thing was painful. Save it as a learning.\"\n\nAgent: \"Added to LEARNINGS.md:\n  - Learning: Claude Code hooks run in a subprocess\n  - Context: Set env var in PreToolUse hook, but it was not visible\n    in the main session\n  - Lesson: Hook scripts execute in a child process. Env changes do\n    not propagate to parent.\n  - Application: Use tombstone files for hook-to-session\n    communication. Never rely on hook env vars.\"\n

            You said one sentence. The agent produced a structured entry with all three fields filled from the session context.

            That is the point: the agent has the full conversation and can be far more expressive than what you would type at a command prompt.

            The /ctx-learning-add skill applies three quality filters:

            1. Could someone Google this in 5 minutes?
            2. Is it specific to this codebase?
            3. Did it take real effort to discover?

            All three must pass.

            Learnings capture principles and heuristics, not code snippets.

            CLI Command for Scripting and Automation

            When no agent is in the loop:

            ctx add learning \"Claude Code hooks run in a subprocess\" \\\n  --context \"Set env var in PreToolUse hook, but it was not visible in the main session\" \\\n  --lesson \"Hook scripts execute in a child process. Env changes do not propagate to parent.\" \\\n  --application \"Use tombstone files for hook-to-session communication. Never rely on hook env vars.\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-4-record-conventions","level":3,"title":"Step 4: Record Conventions","text":"

            Conventions are simpler: just a name, a rule, and optionally a section.

            These are short enough that either approach works:

            You: \"We've been using kebab-case for every CLI flag. Codify that.\"\n\nAgent: \"Added to CONVENTIONS.md under Naming:\n  'Use kebab-case for all CLI flag names.'\"\n

            Or from the terminal:

            ctx add convention \"Use kebab-case for all CLI flag names\" --section \"Naming\"\n

            Conventions work best for rules that come up repeatedly. Codify a pattern the third time you see it, not the first.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-5-reindex-after-manual-edits","level":3,"title":"Step 5: Reindex After Manual Edits","text":"

            DECISIONS.md and LEARNINGS.md maintain a quick-reference index at the top: a compact table of date and title for each entry. The index updates automatically via ctx add, but falls out of sync after hand edits.

            ctx reindex\n

            This single command regenerates both indices. You can also reindex individually with ctx decision reindex or ctx learning reindex.

            Run reindex after any manual edit. The index lets AI tools scan all entries without reading the full file, which matters when token budgets are tight.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-6-use-ctx-reflect-to-surface-what-to-capture","level":3,"title":"Step 6: Use /ctx-reflect to Surface What to Capture","text":"

            Keep It Conversational

            /ctx-reflect is not the only way to trigger reflection.

            Agents trained on the ctx playbook naturally surface persist-worthy items at breakpoints, even without invoking the skill explicitly.

            A conversational prompt like \"anything worth saving?\" or \"let's wrap up\" can trigger the same review.

            The skill provides a structured checklist, but the behavior is available through natural conversation.

            At natural breakpoints (after completing a feature, fixing a bug, or before ending a session) use /ctx-reflect to identify items worth persisting.

            /ctx-reflect\n

            The skill walks through learnings, decisions, tasks, and session notes, skipping categories with nothing to report. The output includes specific commands for each suggested persist:

            This session implemented file-based cooldown for `ctx agent` and\ndiscovered that hook subprocesses cannot set env vars in the parent.\n\nI'd suggest persisting:\n- **Learning**: Hook subprocesses cannot propagate env vars\n  `ctx add learning \"...\" --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Decision**: File-based cooldown tokens over env vars\n  `ctx add decision \"...\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

            The skill always asks before persisting.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#step-7-the-conversational-approach","level":3,"title":"Step 7: The Conversational Approach","text":"

            The commands shown in Steps 2 to 4 are precise, but you rarely need to type them yourself. When working with an agent that has loaded the ctx playbook, natural language triggers the same operations.

            You ask, the agent acts:

            You say What the agent does \"What have we learned this session?\" Reviews recent work, identifies learnings, offers to persist \"Save that as a decision\" Captures the trade-off you just discussed with structured fields \"Any gotchas worth remembering?\" Scans recent work for non-obvious lessons \"Record that convention\" Adds the pattern you just agreed on to CONVENTIONS.md \"Let's wrap up\" Runs through the reflect checklist conversationally

            Example exchange:

            You: \"What did we learn from that debugging session?\"\n\nAgent: \"We discovered three things worth persisting:\n  1. Hook subprocesses can't propagate env vars to the parent. We\n     spent 20 minutes on this before switching to tombstone files.\n  2. Tombstone files need a TTL or they accumulate in /tmp.\n  3. The cooldown logic must check file mtime, not just existence.\n  Want me to save these as learnings?\"\n\nYou: \"Yes, all of them.\"\n\nAgent: \"Done. Added all three to LEARNINGS.md with structured fields.\"\n

            The agent can also be proactive without being asked. The playbook encourages agents to persist context at natural milestones.

            You may see:

            • After fixing a tricky bug: \"I noticed this was a non-obvious issue. Want me to save it as a learning?\"
            • After choosing between approaches: \"We just made a trade-off between file-based tokens and IPC. Should I record this as a decision?\"
            • At session milestones: the agent checks what is worth persisting and offers a brief summary.

            When agents run unattended (no human in the loop), they may persist directly:

            • \"I've added the subprocess env var gotcha to LEARNINGS.md so we don't hit it again.\"
            • \"Recorded the decision to use file-based tokens over env vars in DECISIONS.md.\"
            • \"Marked the cooldown task done and added a follow-up for TTL cleanup.\"

            This is behavior by design.

            The playbook's self-check prompt, \"If this session ended right now, would the next session know what happened?\" drives agents to persist early and often rather than waiting for explicit instructions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#command-line-approach-scripting-and-automation","level":3,"title":"Command-Line Approach (Scripting and Automation)","text":"
            # Decision: record the trade-off\nctx add decision \"Use PostgreSQL over SQLite\" \\\n  --context \"Need concurrent multi-user access\" \\\n  --rationale \"SQLite locks on writes; Postgres handles concurrency\" \\\n  --consequence \"Requires a database server; team needs Postgres training\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Learning: record the gotcha\nctx add learning \"SQL migrations must be idempotent\" \\\n  --context \"Deploy failed when migration ran twice after rollback\" \\\n  --lesson \"CREATE TABLE without IF NOT EXISTS fails on retry\" \\\n  --application \"Always use IF NOT EXISTS guards in migrations\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Convention: record the pattern\nctx add convention \"API handlers return structured errors\" --section \"API\"\n\n# Reindex after manual edits\nctx reindex\n\n# Reflect at breakpoints (from AI assistant)\n# /ctx-reflect\n
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#conversational-approach-interactive-sessions","level":3,"title":"Conversational Approach (Interactive Sessions)","text":"
            You: \"We just picked Postgres over SQLite for the concurrency\n      reasons. Save that?\"\n\nAgent: \"Recorded. Added to DECISIONS.md:\n  - Decision: Use PostgreSQL over SQLite\n  - Context: Need concurrent multi-user access\n  - Rationale: SQLite locks on writes; Postgres handles concurrency\n  - Consequence: Requires a database server; team needs Postgres\n    training\"\n\nYou: \"Also that migration thing. The deploy blew up when the\n      migration ran twice.\"\n\nAgent: \"Got it. Added to LEARNINGS.md:\n  - Learning: SQL migrations must be idempotent\n  - Lesson: CREATE TABLE without IF NOT EXISTS fails on retry\n  - Application: Always use IF NOT EXISTS guards in migrations\"\n\nYou: \"Good. Anything else worth saving before we wrap up?\"\n\nAgent: \"We've been using structured error returns in every API\n  handler this session. Want me to codify that as a convention?\"\n\nYou: \"Do it.\"\n\nAgent: \"Done. Added 'API handlers return structured errors' to\n  CONVENTIONS.md under the API section.\"\n

            Both approaches produce the same structured entries in the same context files.

            • The conversational approach is the natural fit for interactive sessions;
            • the CLI commands are better suited for scripts, hooks, and automation pipelines.
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#tips","level":2,"title":"Tips","text":"
            • Record decisions at the moment of choice. The alternatives you considered and the reasons you rejected them fade quickly. Capture trade-offs while they are fresh.
            • Learnings should fail the Gemini test. If someone could find it in a 5-minute Gemini search, it does not belong in LEARNINGS.md.
            • Conventions earn their place through repetition. Add a convention the third time you see a pattern, not the first.
            • Use /ctx-reflect at natural breakpoints. The checklist catches items you might otherwise lose.
            • Keep the entries self-contained. Each entry should make sense on its own. A future session may load only one due to token budget constraints.
            • Reindex after every hand edit. It takes less than a second. A stale index causes AI tools to miss entries.
            • Prefer the structured fields. The verbosity forces clarity. A decision without a rationale is just a fact. A learning without an application is just a story.
            • Talk to your agent, do not type commands. In interactive sessions, the conversational approach is the recommended way to capture knowledge. Say \"save that as a learning\" or \"any decisions worth recording?\" and let the agent handle the structured fields. Reserve the CLI commands for scripting, automation, and CI/CD pipelines where there is no agent in the loop.
            • Trust the agent's proactive instincts. Agents trained on the ctx playbook will offer to persist context at milestones. A brief \"want me to save this?\" is cheaper than re-discovering the same lesson three sessions later.
            • Relax provenance per-project if --session-id, --branch, or --commit are impractical (e.g., manual notes outside an AI session). Add to .ctxrc:

              provenance_required:\n  session_id: false   # allow entries without --session-id\n  branch: true        # still require --branch\n  commit: true        # still require --commit\n

              Default is all three required. Only human config relaxes: Agents cannot bypass, and that's by design.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#next-up","level":2,"title":"Next Up","text":"

            Tracking Work Across Sessions →: Add, prioritize, complete, and archive tasks across sessions.

            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/knowledge-capture/#see-also","level":2,"title":"See Also","text":"
            • Tracking Work Across Sessions: managing the tasks that decisions and learnings support
            • The Complete Session: full session lifecycle including reflection and context persistence
            • Detecting and Fixing Drift: keeping knowledge files accurate as the codebase evolves
            • CLI Reference: full documentation for ctx add, ctx decision, ctx learning
            • Context Files: format and conventions for DECISIONS.md, LEARNINGS.md, and CONVENTIONS.md
            ","path":["Recipes","Knowledge and Tasks","Persisting Decisions, Learnings, and Conventions"],"tags":[]},{"location":"recipes/memory-bridge/","level":1,"title":"Bridging Claude Code Auto Memory","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code maintains per-project auto memory at ~/.claude/projects/<slug>/memory/MEMORY.md. This file is:

            • Outside the repo - not version-controlled, not portable
            • Machine-specific - tied to one ~/.claude/ directory
            • Invisible to ctx - context loading and hooks don't read it

            Meanwhile, ctx maintains structured context files (DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) that are git-tracked, portable, and token-budgeted - but Claude Code doesn't automatically write to them.

            The two systems hold complementary knowledge with no bridge between them.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#tldr","level":2,"title":"TL;DR","text":"
            ctx memory sync          # Mirror MEMORY.md into .context/memory/mirror.md\nctx memory status        # Check for drift\nctx memory diff          # See what changed since last sync\n

            The check-memory-drift hook nudges automatically when MEMORY.md changes - you don't need to remember to sync manually.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx memory sync CLI command Copy MEMORY.md to mirror, archive previous ctx memory status CLI command Show drift, timestamps, line counts ctx memory diff CLI command Show changes since last sync ctx memory import CLI command Classify and promote entries to .context/ files ctx memory publish CLI command Push curated .context/ content to MEMORY.md ctx memory unpublish CLI command Remove published block from MEMORY.md ctx system check-memory-drift Hook Nudge when MEMORY.md has changed (once/session)","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#how-it-works","level":2,"title":"How It Works","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#discovery","level":3,"title":"Discovery","text":"

            Claude Code encodes project paths as directory names under ~/.claude/projects/. The encoding replaces / with - and prefixes with -:

            /home/jose/WORKSPACE/ctx  →  ~/.claude/projects/-home-jose-WORKSPACE-ctx/\n

            ctx memory uses this encoding to locate MEMORY.md automatically from your project root - no configuration needed.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#mirroring","level":3,"title":"Mirroring","text":"

            When you run ctx memory sync:

            1. The previous mirror is archived to .context/memory/archive/mirror-<timestamp>.md
            2. MEMORY.md is copied to .context/memory/mirror.md
            3. Sync state is updated in .context/state/memory-import.json

            The mirror is git-tracked, so it travels with the project. Archives provide a fallback for projects that don't use git.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#drift-detection","level":3,"title":"Drift Detection","text":"

            The check-memory-drift hook compares MEMORY.md's modification time against the mirror. When drift is detected, the agent sees:

            ┌─ Memory Drift ────────────────────────────────────────────────\n│ MEMORY.md has changed since last sync.\n│ Run: ctx memory sync\n│ Context: .context\n└────────────────────────────────────────────────────────────────\n

            The nudge fires once per session to avoid noise.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#typical-workflow","level":2,"title":"Typical Workflow","text":"","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#at-session-start","level":3,"title":"At Session Start","text":"

            If the hook fires a drift nudge, sync before diving into work:

            ctx memory diff     # Review what changed\nctx memory sync     # Mirror the changes\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#periodic-check","level":3,"title":"Periodic Check","text":"
            ctx memory status\n# Memory Bridge Status\n#   Source:      ~/.claude/projects/.../memory/MEMORY.md\n#   Mirror:      .context/memory/mirror.md\n#   Last sync:   2026-03-05 14:30 (2 hours ago)\n#\n#   MEMORY.md:  47 lines\n#   Mirror:     32 lines\n#   Drift:      detected (source is newer)\n#   Archives:   3 snapshots in .context/memory/archive/\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#dry-run","level":3,"title":"Dry Run","text":"

            Preview what sync would do without writing:

            ctx memory sync --dry-run\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#storage-layout","level":2,"title":"Storage Layout","text":"
            .context/\n├── memory/\n│   ├── mirror.md                          # Raw copy of MEMORY.md (often git-tracked)\n│   └── archive/\n│       ├── mirror-2026-03-05-143022.md    # Timestamped pre-sync snapshots\n│       └── mirror-2026-03-04-220015.md\n├── state/\n│   └── memory-import.json                 # Sync tracking state\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#edge-cases","level":2,"title":"Edge Cases","text":"Scenario Behavior Auto memory not active sync exits 1 with message. status reports \"not active\". Hook skips silently. First sync (no mirror) Creates mirror without archiving. MEMORY.md is empty Syncs to empty mirror (valid). Not initialized Init guard rejects (same as all ctx commands).","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#importing-entries","level":2,"title":"Importing Entries","text":"

            Once you've synced, you can classify and promote entries into structured .context/ files:

            ctx memory import --dry-run    # Preview classification\nctx memory import              # Actually promote entries\n

            Each entry is classified by keyword heuristics:

            Keywords Target always use, prefer, never use, standard CONVENTIONS.md decided, chose, trade-off, approach DECISIONS.md gotcha, learned, watch out, bug, caveat LEARNINGS.md todo, need to, follow up TASKS.md Everything else Skipped

            Entries that don't match any pattern are skipped - they stay in the mirror for manual review. Deduplication (hash-based) prevents re-importing the same entry on subsequent runs.

            Review Before Importing

            Use --dry-run first. The heuristic classifier is deliberately simple - it may misclassify ambiguous entries. Review the plan, then import.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-workflow","level":3,"title":"Full Workflow","text":"
            ctx memory sync                # 1. Mirror MEMORY.md\nctx memory import --dry-run    # 2. Preview what would be imported\nctx memory import              # 3. Promote entries to .context/ files\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#publishing-context-to-memorymd","level":2,"title":"Publishing Context to MEMORY.md","text":"

            Push curated .context/ content back into MEMORY.md so Claude Code sees structured project context on session start - without needing hooks.

            ctx memory publish --dry-run    # Preview what would be published\nctx memory publish              # Write to MEMORY.md\nctx memory publish --budget 40  # Tighter line budget\n

            Published content is wrapped in markers:

            <!-- ctx:published -->\n# Project Context (managed by ctx)\n\n## Pending Tasks\n- [ ] Implement feature X\n...\n<!-- ctx:end -->\n

            Rules:

            • ctx owns everything between the markers
            • Claude owns everything outside the markers
            • ctx memory import reads only outside the markers
            • ctx memory publish replaces only inside the markers

            To remove the published block entirely:

            ctx memory unpublish\n

            Publish at Wrap-Up, Not on Commit

            The best time to publish is during session wrap-up, after persisting decisions and learnings. Never auto-publish - give yourself a chance to review what's going into MEMORY.md.

            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/memory-bridge/#full-bidirectional-workflow","level":3,"title":"Full Bidirectional Workflow","text":"
            ctx memory sync                 # 1. Mirror MEMORY.md\nctx memory import --dry-run     # 2. Check what Claude wrote\nctx memory import               # 3. Promote entries to .context/\nctx memory publish --dry-run    # 4. Check what would be published\nctx memory publish              # 5. Push context to MEMORY.md\n
            ","path":["Recipes","Knowledge and Tasks","Bridging Claude Code Auto Memory"],"tags":[]},{"location":"recipes/multi-tool-setup/","level":1,"title":"Setup Across AI Tools","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-problem","level":2,"title":"The Problem","text":"

            You have installed ctx and want to set it up with your AI coding assistant so that context persists across sessions. Different tools have different integration depths. For example:

            • Claude Code supports native hooks that load and save context automatically.
            • Cursor injects context via its system prompt.
            • Aider reads context files through its --read flag.

            This recipe walks through the complete setup for each tool, from initialization through verification, so you end up with a working memory layer regardless of which AI tool you use.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tldr","level":2,"title":"TL;DR","text":"
            cd your-project\nctx init                      # creates .context/\nsource <(ctx completion zsh)  # shell completion (or bash/fish)\n\n# ## Claude Code (automatic after plugin install) ##\nclaude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n\n# ## Cursor / Aider / Copilot / Windsurf ##\nctx setup cursor # or: aider, copilot, windsurf\n\n# ## Companion tools (highly recommended) ##\nnpx gitnexus analyze          # code knowledge graph\n# Add Gemini Search MCP server for grounded web search\n

            Create a .ctxrc in your project root to configure token budgets, context directory, drift thresholds, and more.

            Then start your AI tool and ask: \"Do you remember?\"

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx init Create .context/ directory, templates, and permissions ctx setup Generate integration configuration for a specific AI tool ctx agent Print a token-budgeted context packet for AI consumption ctx load Output assembled context in read order (for manual pasting) ctx watch Auto-apply context updates from AI output (non-native tools) ctx completion Generate shell autocompletion for bash, zsh, or fish ctx journal import Import sessions to editable journal Markdown","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-1-initialize-ctx","level":3,"title":"Step 1: Initialize ctx","text":"

            Run ctx init in your project root. This creates the .context/ directory with all template files and seeds ctx permissions in settings.local.json.

            cd your-project\nctx init\n

            This produces the following structure:

            .context/\n  CONSTITUTION.md     # Hard rules the AI must never violate\n  TASKS.md            # Current and planned work\n  CONVENTIONS.md      # Code patterns and standards\n  ARCHITECTURE.md     # System overview\n  DECISIONS.md        # Architectural decisions with rationale\n  LEARNINGS.md        # Lessons learned, gotchas, tips\n  GLOSSARY.md         # Domain terms and abbreviations\n  AGENT_PLAYBOOK.md   # How AI tools should use this system\n

            Using a Different .context Directory

            The .context/ directory doesn't have to live inside your project. Point ctx to an external folder by exporting CTX_DIR (the only declaration channel).

            Useful when context must stay private while the code is public, or when you want to commit notes to a separate repo.

            Caveats (the recipe covers both with workarounds):

            • Code-aware operations degrade silently. ctx sync, ctx drift, and the memory-drift hook read the codebase from dirname(CTX_DIR). With an external .context/, that's the context repo, not your code repo. They scan the wrong tree without erroring. The recipe shows a symlink workaround that keeps both healthy.
            • One .context/ per project, always. Sharing one directory across multiple projects corrupts journals, state, and secrets. For cross-project knowledge sharing (CONSTITUTION, CONVENTIONS, ARCHITECTURE, etc.) use ctx hub, not a shared .context/.

            See External Context for the full recipe and Configuration for the resolver details.

            For Claude Code, install the ctx plugin to get hooks and skills:

            claude /plugin marketplace add ActiveMemory/ctx\nclaude /plugin install ctx@activememory-ctx\n

            If you only need the core files (useful for lightweight setups), use the --minimal flag:

            ctx init --minimal\n

            This creates only TASKS.md, DECISIONS.md, and CONSTITUTION.md.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-2-generate-tool-specific-hooks","level":3,"title":"Step 2: Generate Tool-Specific Hooks","text":"

            If you are using a tool other than Claude Code (which is configured automatically by ctx init), generate its integration configuration:

            # For Cursor\nctx setup cursor\n\n# For Aider\nctx setup aider\n\n# For GitHub Copilot\nctx setup copilot\n\n# For Windsurf\nctx setup windsurf\n

            Each command prints the configuration you need. How you apply it depends on the tool.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#claude-code","level":4,"title":"Claude Code","text":"

            No action needed. Just install ctx from the Marketplace as ActiveMemory/ctx.

            Claude Code Is a First-Class Citizen

            With the ctx plugin installed, Claude Code gets hooks and skills automatically. The PreToolUse hook runs ctx agent --budget 4000 on every tool call (with a 10-minute cooldown so it only fires once per window).

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#cursor","level":4,"title":"Cursor","text":"

            Add the system prompt snippet to .cursor/settings.json:

            {\n  \"ai.systemPrompt\": \"Read .context/TASKS.md and .context/CONVENTIONS.md before responding. Follow rules in .context/CONSTITUTION.md.\"\n}\n

            Context files appear in Cursor's file tree. You can also paste a context packet directly into chat:

            ctx agent --budget 4000 | xclip    # Linux\nctx agent --budget 4000 | pbcopy   # macOS\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#aider","level":4,"title":"Aider","text":"

            Create .aider.conf.yml so context files are loaded on every session:

            read:\n  - .context/CONSTITUTION.md\n  - .context/TASKS.md\n  - .context/CONVENTIONS.md\n  - .context/DECISIONS.md\n

            Then start Aider normally:

            aider\n

            Or specify files on the command line:

            aider --read .context/TASKS.md --read .context/CONVENTIONS.md\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-3-set-up-shell-completion","level":3,"title":"Step 3: Set Up Shell Completion","text":"

            Shell completion lets you tab-complete ctx subcommands and flags, which is especially useful while learning the CLI.

            # Bash (add to ~/.bashrc)\nsource <(ctx completion bash)\n\n# Zsh (add to ~/.zshrc)\nsource <(ctx completion zsh)\n\n# Fish\nctx completion fish > ~/.config/fish/completions/ctx.fish\n

            After sourcing, typing ctx a<TAB> completes to ctx agent, and ctx journal <TAB> shows list, show, and export.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-4-verify-the-setup-works","level":3,"title":"Step 4: Verify the Setup Works","text":"

            Start a fresh session in your AI tool and ask:

            \"Do you remember?\"

            A correctly configured tool responds with specific context: current tasks from TASKS.md, recent decisions, and previous session topics. It should not say \"I don't have memory\" or \"Let me search for files.\"

            This question checks the passive side of memory. A properly set-up agent is also proactive: it treats context maintenance as part of its job:

            • After a debugging session, it offers to save a learning.
            • After a trade-off discussion, it asks whether to record the decision.
            • After completing a task, it suggests follow-up items.

            The \"do you remember?\" check verifies both halves: recall and responsibility.

            For example, after resolving a tricky bug, a proactive agent might say:

            That Redis timeout issue was subtle. Want me to save this as a *learning*\nso we don't hit it again?\n

            If you see behavior like this, the setup is working end to end.

            In Claude Code, you can also invoke the /ctx-status skill:

            /ctx-status\n

            This prints a summary of all context files, token counts, and recent activity, confirming that hooks are loading context.

            If context is not loading, check the basics:

            Symptom Fix ctx: command not found Ensure ctx is in your PATH: which ctx Hook errors Verify plugin is installed: claude /plugin list Context not refreshing Cooldown may be active; wait 10 minutes or set --cooldown 0","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-5-enable-watch-mode-for-non-native-tools","level":3,"title":"Step 5: Enable Watch Mode for Non-Native Tools","text":"

            Tools like Aider, Copilot, and Windsurf do not support native hooks for saving context automatically. For these, run ctx watch alongside your AI tool.

            Pipe the AI tool's output through ctx watch:

            # Terminal 1: Run Aider with output logged\naider 2>&1 | tee /tmp/aider.log\n\n# Terminal 2: Watch the log for context updates\nctx watch --log /tmp/aider.log\n

            Or for any generic tool:

            your-ai-tool 2>&1 | tee /tmp/ai.log &\nctx watch --log /tmp/ai.log\n

            When the AI emits structured update commands, ctx watch parses and applies them automatically:

            <context-update type=\"learning\"\n  context=\"Debugging rate limiter\"\n  lesson=\"Redis MULTI/EXEC does not roll back on error\"\n  application=\"Wrap rate-limit checks in Lua scripts instead\"\n>Redis Transaction Behavior</context-update>\n

            To preview changes without modifying files:

            ctx watch --dry-run --log /tmp/ai.log\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#step-6-import-session-transcripts-optional","level":3,"title":"Step 6: Import Session Transcripts (Optional)","text":"

            If you want to browse past session transcripts, import them to the journal:

            ctx journal import --all\n

            This converts raw session data into editable Markdown files in .context/journal/. You can then enrich them with metadata using /ctx-journal-enrich-all inside your AI assistant.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Here is the condensed setup for all three tools:

            # ## Common (run once per project) ##\ncd your-project\nctx init\nsource <(ctx completion zsh)       # or bash/fish\n\n# ## Claude Code (automatic, just verify) ##\n# Start Claude Code, then ask: \"Do you remember?\"\n\n# ## Cursor ##\nctx setup cursor\n# Add the system prompt to .cursor/settings.json\n# Paste context: ctx agent --budget 4000 | pbcopy\n\n# ## Aider ##\nctx setup aider\n# Create .aider.conf.yml with read: paths\n# Run watch mode alongside: ctx watch --log /tmp/aider.log\n\n# ## Verify any Tool ##\n# Ask your AI: \"Do you remember?\"\n# Expect: specific tasks, decisions, recent context\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#tips","level":2,"title":"Tips","text":"
            • Start with ctx init (not --minimal) for your first project. The full template set gives the agent more to work with, and you can always delete files later.
            • For Claude Code, the token budget is configured in the plugin's hooks.json. To customize, adjust the --budget flag in the ctx agent hook command.
            • The --session $PPID flag isolates cooldowns per Claude Code process, so parallel sessions do not suppress each other.
            • Commit your .context/ directory to version control. Several ctx features (journals, changelogs, blog generation) rely on git history.
            • For Cursor and Copilot, keep CONVENTIONS.md visible. These tools treat open files as higher-priority context.
            • Run ctx drift periodically to catch stale references before they confuse the agent.
            • The agent playbook instructs the agent to persist context at natural milestones (completed tasks, decisions, gotchas). In practice, this works best when you reinforce the habit: a quick \"anything worth saving?\" after a debugging session goes a long way.
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#companion-tools-highly-recommended","level":2,"title":"Companion Tools (Highly Recommended)","text":"

            ctx skills can leverage external MCP servers for web search and code intelligence. ctx works without them, but they significantly improve agent behavior across sessions. The investment is small and the benefits compound. Skills like /ctx-code-review, /ctx-explain, and /ctx-refactor all become noticeably better with these tools connected.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gemini-search","level":3,"title":"Gemini Search","text":"

            Provides grounded web search with citations. Used by skills and the agent playbook as the preferred search backend (faster and more accurate than built-in web search).

            Setup: Add the Gemini Search MCP server to your Claude Code settings. See the Gemini Search MCP documentation for installation.

            Verification:

            # The agent checks this automatically during /ctx-remember\n# Manual test: ask the agent to search for something\n

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#gitnexus","level":3,"title":"GitNexus","text":"

            Provides a code knowledge graph with symbol resolution, blast radius analysis, and domain clustering. Used by skills like /ctx-refactor (impact analysis) and /ctx-code-review (dependency awareness).

            Setup: Add the GitNexus MCP server to your Claude Code settings, then index your project:

            npx gitnexus analyze\n

            Verification:

            # The agent checks this automatically during /ctx-remember\n# If the index is stale, it will suggest rehydrating\n

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#suppressing-the-check","level":3,"title":"Suppressing the Check","text":"

            If you don't use companion tools and want to skip the availability check at session start, add to .ctxrc:

            companion_check: false\n
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#future-direction","level":3,"title":"Future Direction","text":"

            The companion tool integration is evolving toward a pluggable model: bring your own search engine, bring your own code intelligence. The current integration is MCP-based and limited to Gemini Search and GitNexus. If you use a different search or code intelligence tool, skills will degrade gracefully to built-in capabilities.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#next-up","level":2,"title":"Next Up","text":"

            Keeping Context in a Separate Repo →: Store context files outside the project tree for multi-repo or open source setups.

            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multi-tool-setup/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle recipe
            • Multilingual Session Parsing: configure session header prefixes for other languages
            • CLI Reference: all commands and flags
            • Integrations: detailed per-tool integration docs
            ","path":["Recipes","Getting Started","Setup Across AI Tools"],"tags":[]},{"location":"recipes/multilingual-sessions/","level":1,"title":"Multilingual Session Parsing","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#the-problem","level":2,"title":"The Problem","text":"

            Your team works across languages. Session files written by AI tools might use headers like # Oturum: 2026-01-15 - API Düzeltme (Turkish) or # セッション: 2026-01-15 - テスト (Japanese) instead of # Session: 2026-01-15 - Fix API.

            By default, ctx only recognizes Session: as a session header prefix. Files with other prefixes are silently skipped during journal import and journal generation: They look like regular Markdown, not sessions.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#tldr","level":2,"title":"TL;DR","text":"

            Add recognized prefixes to .ctxrc:

            session_prefixes:\n  - \"Session:\"      # English (include to keep default)\n  - \"Oturum:\"       # Turkish\n  - \"セッション:\"     # Japanese\n

            Restart your session. All configured prefixes are now recognized.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#how-it-works","level":2,"title":"How It Works","text":"

            The Markdown session parser detects session files by looking for an H1 header that starts with a known prefix followed by a date:

            # Session: 2026-01-15 - Fix API Rate Limiting\n# Oturum: 2026-01-15 - API Düzeltme\n# セッション: 2026-01-15 - テスト\n

            The list of recognized prefixes comes from session_prefixes in .ctxrc. When the key is absent or empty, ctx falls back to the built-in default: [\"Session:\"].

            Date-only headers (# 2026-01-15 - Morning Work) are always recognized regardless of prefix configuration.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#configuration","level":2,"title":"Configuration","text":"","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#adding-a-language","level":3,"title":"Adding a Language","text":"

            Add the prefix with a trailing colon to your .ctxrc:

            session_prefixes:\n  - \"Session:\"\n  - \"Sesión:\"       # Spanish\n

            Include Session: Explicitly

            When you override session_prefixes, the default is replaced, not extended. If you still want English headers recognized, include \"Session:\" in your list.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#team-setup","level":3,"title":"Team Setup","text":"

            Commit .ctxrc to the repo so all team members share the same prefix list. This ensures ctx journal import and journal generation pick up sessions from all team members regardless of language.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#common-prefixes","level":3,"title":"Common Prefixes","text":"Language Prefix English Session: Turkish Oturum: Spanish Sesión: French Session: German Sitzung: Japanese セッション: Korean 세션: Portuguese Sessão: Chinese 会话:","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#verifying","level":3,"title":"Verifying","text":"

            After configuring, test with ctx journal source. Sessions with the new prefixes should appear in the output.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#what-this-does-not-do","level":2,"title":"What This Does NOT Do","text":"
            • Change the interface language: ctx output is always English. This setting only controls which session files ctx can parse.
            • Generate headers: ctx never writes session headers. The prefix list is recognition-only (input, not output).
            • Affect JSONL sessions: Claude Code JSONL transcripts don't use header prefixes. This only applies to Markdown session files in .context/sessions/.
            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/multilingual-sessions/#see-also","level":2,"title":"See Also","text":"

            See also: Setup Across AI Tools - complete multi-tool setup including Markdown session configuration.

            See also: CLI Reference - full .ctxrc field reference including session_prefixes.

            ","path":["Recipes","Getting Started","Multilingual Session Parsing"],"tags":[]},{"location":"recipes/parallel-worktrees/","level":1,"title":"Parallel Agent Development with Git Worktrees","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-problem","level":2,"title":"The Problem","text":"

            You have a large backlog (10, 20, 30 open tasks) and many of them are independent: docs work that doesn't touch Go code, a new package that doesn't overlap with existing ones, test coverage for a stable module.

            Running one agent at a time means serial execution. You want 3-4 agents working in parallel, each on its own track, without stepping on each other's files.

            Git worktrees solve this.

            Each worktree is a separate working directory with its own branch, but they share the same .git object database. Combined with ctx's persistent context, each agent session picks up the full project state and works independently.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-worktree                                   # 1. group tasks by file overlap\ngit worktree add ../myproject-docs -b work/docs # 2. create worktrees\ncd ../myproject-docs && claude                  # 3. launch agents (one per track)\n/ctx-worktree teardown docs                     # 4. merge back and clean up\n

            TASKS.md will conflict on merge: Accept all [x] completions from both sides.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-worktree Skill Create, list, and tear down worktrees /ctx-next Skill Pick tasks from the backlog for each track git worktree Command Underlying git worktree management git merge Command Merge completed tracks back to main","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-1-assess-the-backlog","level":3,"title":"Step 1: Assess the Backlog","text":"

            Start in your main checkout. Ask the agent to analyze your tasks and group them by blast radius: which files and directories each task touches.

            /ctx-worktree\nLook at TASKS.md and group the pending tasks into 2-3 independent\ntracks based on which files they'd touch. Show me the grouping\nbefore creating anything.\n

            The agent reads TASKS.md, estimates file overlap, and proposes groups:

            Proposed worktree groups:\n\n  work/docs   # recipe updates, blog post (touches: docs/)\n  work/crypto # scratchpad encryption infra (touches: internal/crypto/)\n  work/tests  # journal test coverage (touches: internal/cli/journal/)\n
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-2-create-the-worktrees","level":3,"title":"Step 2: Create the Worktrees","text":"

            Once you approve the grouping, the agent creates worktrees as sibling directories:

            Create the worktrees for those three groups.\n

            Behind the scenes:

            git worktree add ../myproject-docs -b work/docs\ngit worktree add ../myproject-crypto -b work/crypto\ngit worktree add ../myproject-tests -b work/tests\n

            Each worktree is a full working copy on its own branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-3-launch-agents","level":3,"title":"Step 3: Launch Agents","text":"

            Open a separate terminal (or editor window) for each worktree and start a Claude Code session:

            # Terminal 1\ncd ../myproject-docs\nclaude\n\n# Terminal 2\ncd ../myproject-crypto\nclaude\n\n# Terminal 3\ncd ../myproject-tests\nclaude\n

            Each agent sees the full project, including .context/, and can work independently.

            Do Not Initialize Context in Worktrees

            Do not run ctx init in worktrees: The .context directory is already tracked in git.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-4-work","level":3,"title":"Step 4: Work","text":"

            Each agent works through its assigned tasks. They can read TASKS.md to know what's assigned to their track, use /ctx-next to pick the next item, and commit normally on their work/* branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-5-merge-back","level":3,"title":"Step 5: Merge Back","text":"

            As each track finishes, return to the main checkout and merge:

            /ctx-worktree teardown docs\n

            The agent checks for uncommitted changes, merges work/docs into your current branch, removes the worktree, and deletes the branch.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-6-handle-tasksmd-conflicts","level":3,"title":"Step 6: Handle TASKS.md Conflicts","text":"

            TASKS.md will almost always conflict when merging: Multiple agents will mark different tasks as [x]. This is expected and easy to resolve:

            Accept all completions from both sides. No task should go from [x] back to [ ]. The merge resolution is always additive.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#step-7-cleanup","level":3,"title":"Step 7: Cleanup","text":"

            After all tracks are merged, verify everything is clean:

            /ctx-worktree list\n

            Should show only the main working tree. All work/* branches should be gone.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't have to use the skill directly for every step. These natural prompts work:

            • \"I have a big backlog. Can we split it across worktrees?\"
            • \"Which of these tasks can run in parallel without conflicts?\"
            • \"Merge the docs track back in.\"
            • \"Clean up all the worktrees, we're done.\"
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#what-works-differently-in-worktrees","level":2,"title":"What Works Differently in Worktrees","text":"

            The encryption key lives at ~/.ctx/.ctx.key (user-level, outside the project). Because all worktrees on the same machine share this path, ctx pad and ctx hook notify work in worktrees automatically - no special setup needed.

            One thing to watch:

            • Journal enrichment: ctx journal import and ctx journal enrich write files relative to the current working directory. Enrichments created in a worktree stay there and are discarded on teardown. Enrich journals on the main branch after merging: the JSONL session logs are always intact, and you don't lose any data.

            Context Files Will Merge Just Fine

            Tracked context files (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md) work normally; git handles them.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#tips","level":2,"title":"Tips","text":"
            • 3-4 worktrees max. Beyond that, merge complexity outweighs the parallelism benefit. The skill enforces this limit.
            • Group by package or directory, not by priority. Two high-priority tasks that touch the same files must be in the same track.
            • TASKS.md will conflict on merge. This is normal. Accept all [x] completions: The resolution is always additive.
            • Don't run ctx init in worktrees. The .context/ directory is tracked in git. Running init overwrites shared context files.
            • Name worktrees by concern, not by number. work/docs and work/crypto are more useful than work/track-1 and work/track-2.
            • Commit frequently in each worktree. Smaller commits make merge conflicts easier to resolve.
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#next-up","level":2,"title":"Next Up","text":"

            Back to the beginning: Guide Your Agent →

            Or explore the full recipe list.

            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/parallel-worktrees/#see-also","level":2,"title":"See Also","text":"
            • Running an Unattended AI Agent: for serial autonomous loops instead of parallel tracks
            • Tracking Work Across Sessions: managing the task backlog that feeds into parallelization
            • The Complete Session: the complete session workflow end-to-end, with examples
            ","path":["Recipes","Agents and Automation","Parallel Agent Development with Git Worktrees"],"tags":[]},{"location":"recipes/permission-snapshots/","level":1,"title":"Permission Snapshots","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-problem","level":2,"title":"The Problem","text":"

            Claude Code's .claude/settings.local.json accumulates one-off permissions every time you click \"Allow\". After busy sessions the file is full of session-specific entries that expand the agent's surface area beyond intent.

            Since settings.local.json is .gitignored, there is no PR review or CI check. The file drifts independently on every machine, and there is no built-in way to reset to a known-good state.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#tldr","level":2,"title":"TL;DR","text":"
            /ctx-permission-sanitize               # audit for dangerous patterns\nctx permission snapshot            # save golden image\n# ... sessions accumulate cruft ...\nctx permission restore             # reset to golden state\n
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#the-solution","level":2,"title":"The Solution","text":"

            Save a curated settings.local.json as a golden image, then restore from it to drop session-accumulated permissions. The golden file (.claude/settings.golden.json) is committed to version control and shared with the team.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Command/Skill Role in this workflow ctx permission snapshot Save settings.local.json as golden image ctx permission restore Reset settings.local.json from golden image /ctx-permission-sanitize Audit for dangerous patterns before snapshotting","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#step-by-step","level":2,"title":"Step by Step","text":"","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#1-curate-your-permissions","level":3,"title":"1. Curate Your Permissions","text":"

            Start with a clean settings.local.json. Optionally run /ctx-permission-sanitize to remove dangerous patterns first.

            Review the file manually. Every entry should be there because you decided it belongs, not because you clicked \"Allow\" once during debugging.

            See the Permission Hygiene recipe for recommended defaults.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#2-take-a-snapshot","level":3,"title":"2. Take a Snapshot","text":"
            ctx permission snapshot\n# Saved golden image: .claude/settings.golden.json\n

            This creates a byte-for-byte copy. No re-encoding, no indent changes.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#3-commit-the-golden-file","level":3,"title":"3. Commit the Golden File","text":"
            git add .claude/settings.golden.json\ngit commit -m \"Add permission golden image\"\n

            The golden file is not gitignored (unlike settings.local.json). This is intentional: it becomes a team-shared baseline.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#4-auto-restore-at-the-session-start","level":3,"title":"4. Auto-Restore at the Session Start","text":"

            Add this instruction to your CLAUDE.md:

            ## On Session Start\n\nRun `ctx permission restore` to reset permissions to the golden image.\n

            The agent will restore the golden image at the start of every session, automatically dropping any permissions accumulated during previous sessions.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#5-update-when-intentional-changes-are-made","level":3,"title":"5. Update When Intentional Changes Are Made","text":"

            When you add a new permanent permission (not a one-off debugging entry):

            # Edit settings.local.json with the new permission\n# Then update the golden image:\nctx permission snapshot\ngit add .claude/settings.golden.json\ngit commit -m \"Update permission golden image: add cargo test\"\n
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#conversational-approach","level":2,"title":"Conversational Approach","text":"

            You don't need to remember exact commands. These natural-language prompts work with agents trained on the ctx playbook:

            What you say What happens \"Save my current permissions as baseline\" Agent runs ctx permission snapshot \"Reset permissions to the golden image\" Agent runs ctx permission restore \"Clean up my permissions\" Agent runs /ctx-permission-sanitize then snapshot \"What permissions did I accumulate?\" Agent diffs local vs golden","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#next-up","level":2,"title":"Next Up","text":"

            Turning Activity into Content →: Generate blog posts, changelogs, and journal sites from your project activity.

            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/permission-snapshots/#see-also","level":2,"title":"See Also","text":"
            • Permission Hygiene: recommended defaults and maintenance workflow
            • CLI Reference: ctx permission: full command documentation
            ","path":["Recipes","Maintenance","Permission Snapshots"],"tags":[]},{"location":"recipes/publishing/","level":1,"title":"Turning Activity into Content","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-problem","level":2,"title":"The Problem","text":"

            Your .context/ directory is full of decisions, learnings, and session history.

            Your git log tells the story of a project evolving.

            But none of this is visible to anyone outside your terminal.

            You want to turn this raw activity into:

            • a browsable journal site,
            • blog posts,
            • changelog posts.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tldr","level":2,"title":"TL;DR","text":"
            ctx journal import --all             # 1. import sessions to markdown\n\n/ctx-journal-enrich-all             # 2. add metadata and tags\n\nctx journal site --serve            # 3. build and serve the journal\n\n/ctx-blog about the caching layer   # 4. draft a blog post\n/ctx-blog-changelog v0.1.0 \"v0.2\"   # 5. write a changelog post\n

            Read on for details on each stage.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal import Command Import session JSONL to editable markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx serve Command Serve any zensical directory (default: journal) ctx site feed Command Generate Atom feed from finalized blog posts make journal Makefile Shortcut for import + site rebuild /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich (recommended) /ctx-journal-enrich Skill Add metadata, summaries, and tags to one entry /ctx-blog Skill Draft a blog post from recent project activity /ctx-blog-changelog Skill Write a themed post from a commit range","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-1-import-sessions-to-markdown","level":3,"title":"Step 1: Import Sessions to Markdown","text":"

            Raw session data lives as JSONL files in Claude Code's internal storage. The first step is converting these into readable, editable markdown.

            # Import all sessions from the current project\nctx journal import --all\n\n# Import from all projects (if you work across multiple repos)\nctx journal import --all --all-projects\n\n# Import a single session by ID or slug\nctx journal import abc123\nctx journal import gleaming-wobbling-sutherland\n

            Imported files land in .context/journal/ as individual Markdown files with session metadata and the full conversation transcript.

            --all is safe by default: Only new sessions are imported. Existing files are skipped. Use --regenerate to re-import existing files (YAML frontmatter is preserved). Use --regenerate --keep-frontmatter=false -y to regenerate everything including frontmatter.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-2-enrich-entries-with-metadata","level":3,"title":"Step 2: Enrich Entries with Metadata","text":"

            Raw entries have timestamps and conversations but lack the structured metadata that makes a journal searchable. Use /ctx-journal-enrich-all to process your entire backlog at once:

            /ctx-journal-enrich-all\n

            The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

            For large backlogs (20+ entries), it can spawn subagents to process entries in parallel.

            To enrich a single entry instead:

            /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich 2026-01-24\n

            After enrichment, an entry gains YAML frontmatter:

            ---\ntitle: \"Implement Redis caching for API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n

            This metadata powers better navigation in the journal site:

            • titles replace slugs,
            • summaries appear in the index,
            • and search covers topics and technologies.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-3-generate-the-journal-site","level":3,"title":"Step 3: Generate the Journal Site","text":"

            With entries exported and enriched, generate the static site:

            # Generate site files\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally (opens at http://localhost:8000)\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

            The site is generated in .context/journal-site/ by default. It uses zensical for static site generation (pipx install zensical).

            Or use the Makefile shortcut that combines export and rebuild:

            make journal\n

            This runs ctx journal import --all followed by ctx journal site --build, then reminds you to enrich before rebuilding. To serve the built site, use make journal-serve or ctx serve (serve-only, no regeneration).

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#alternative-export-to-obsidian-vault","level":3,"title":"Alternative: Export to Obsidian Vault","text":"

            If you use Obsidian for knowledge management, generate a vault instead of (or alongside) the static site:

            ctx journal obsidian\nctx journal obsidian --output ~/vaults/ctx-journal\n

            This produces an Obsidian-ready directory with wikilinks, MOC (Map of Content) pages for topics/files/types, and a \"Related Sessions\" footer on each entry for graph connectivity. Open the output directory in Obsidian as a vault.

            The vault uses the same enriched source entries as the static site. Both outputs can coexist: The static site goes to .context/journal-site/, the vault to .context/journal-obsidian/.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-4-draft-blog-posts-from-activity","level":3,"title":"Step 4: Draft Blog Posts from Activity","text":"

            When your project reaches a milestone worth sharing, use /ctx-blog to draft a post from recent activity. The skill gathers context from multiple sources: git log, DECISIONS.md, LEARNINGS.md, completed tasks, and journal entries.

            /ctx-blog about the caching layer we just built\n/ctx-blog last week's refactoring work\n/ctx-blog lessons learned from the migration\n

            The skill gathers recent commits, decisions, and learnings; identifies a narrative arc; drafts an outline for approval; writes the full post; and saves it to docs/blog/YYYY-MM-DD-slug.md.

            Posts are written in first person with code snippets, commit references, and an honest discussion of what went wrong.

            The Output Is zensical-Flavored Markdown

            The blog skills produce Markdown tuned for a zensical site: topics: frontmatter (zensical's tag field), a docs/blog/ output path, and a banner image reference.

            The content is still standard Markdown and can be adapted to other static site generators, but the defaults assume a zensical project structure.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-5-write-changelog-posts-from-commit-ranges","level":3,"title":"Step 5: Write Changelog Posts from Commit Ranges","text":"

            For release notes or \"what changed\" posts, /ctx-blog-changelog takes a starting commit and a theme, then analyzes everything that changed:

            /ctx-blog-changelog 040ce99 \"building the journal system\"\n/ctx-blog-changelog HEAD~30 \"what's new in v0.2.0\"\n/ctx-blog-changelog v0.1.0 \"the road to v0.2.0\"\n

            The skill diffs the commit range, identifies the most-changed files, and constructs a narrative organized by theme rather than chronology, including a key commits table and before/after comparisons.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#step-6-generate-the-blog-feed","level":3,"title":"Step 6: Generate the Blog Feed","text":"

            After publishing blog posts, generate the Atom feed so readers and automation can discover new content:

            ctx site feed\n

            This scans docs/blog/ for finalized posts (reviewed_and_finalized: true), extracts title, date, author, topics, and summary, and writes a valid Atom 1.0 feed to site/feed.xml. The feed is also generated automatically as part of make site.

            The feed is available at ctx.ist/feed.xml.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#the-conversational-approach","level":2,"title":"The Conversational Approach","text":"

            You can also drive your publishing anytime with natural language:

            \"write about what we did this week\"\n\"turn today's session into a blog post\"\n\"make a changelog post covering everything since the last release\"\n\"enrich the last few journal entries\"\n

            The agent has full visibility into your .context/ state (tasks completed, decisions recorded, learnings captured), so its suggestions are grounded in what actually happened.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            The full pipeline from raw transcripts to published content:

            # 1. Import all sessions\nctx journal import --all\n\n# 2. In Claude Code: enrich all entries with metadata\n/ctx-journal-enrich-all\n\n# 3. Build and serve the journal site\nmake journal\nmake journal-serve\n\n# 3b. Or generate an Obsidian vault\nctx journal obsidian\n\n# 4. In Claude Code: draft a blog post\n/ctx-blog about the features we shipped this week\n\n# 5. In Claude Code: write a changelog post\n/ctx-blog-changelog v0.1.0 \"what's new in v0.2.0\"\n

            The journal pipeline is idempotent at every stage. You can rerun ctx journal import --all without losing enrichment. You can rebuild the site as many times as you want.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#tips","level":2,"title":"Tips","text":"
            • Import regularly. Run ctx journal import --all after each session to keep your journal current. Only new sessions are imported: Existing files are skipped by default.
            • Use batch enrichment. /ctx-journal-enrich-all filters noise (suggestion sessions, trivial sessions, multipart continuations) so you do not have to decide what is worth enriching.
            • Keep journal files in .gitignore. Session journals can contain sensitive data: file contents, commands, internal discussions, and error messages with stack traces. Add .context/journal/ and .context/journal-site/ to .gitignore.
            • Use /ctx-blog for narrative posts and /ctx-blog-changelog for release posts. One finds a story in recent activity, the other explains a commit range by theme.
            • Edit the drafts. These skills produce drafts, not final posts. Review the narrative, add your perspective, and remove anything that does not serve the reader.
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#next-up","level":2,"title":"Next Up","text":"

            Running an Unattended AI Agent →: Set up an AI agent that works through tasks overnight without you at the keyboard.

            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/publishing/#see-also","level":2,"title":"See Also","text":"
            • Session Journal: journal system, enrichment schema
            • CLI Reference: ctx journal: import, list, show session history
            • CLI Reference: ctx journal site: static site generation
            • CLI Reference: ctx journal obsidian: Obsidian vault export
            • CLI Reference: ctx serve: serve-only (no regeneration)
            • Browsing and Enriching Past Sessions: journal browsing workflow
            • The Complete Session: capturing context during a session
            ","path":["Recipes","Maintenance","Turning Activity into Content"],"tags":[]},{"location":"recipes/scratchpad-sync/","level":1,"title":"Syncing Scratchpad Notes Across Machines","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-problem","level":2,"title":"The Problem","text":"

            You work from multiple machines: a desktop and a laptop, or a local machine and a remote dev server.

            The scratchpad entries are encrypted. The ciphertext (.context/scratchpad.enc) travels with git, but the encryption key lives outside the project at ~/.ctx/.ctx.key and is never committed. Without the key on each machine, you cannot read or write entries.

            How do you distribute the key and keep the scratchpad in sync?

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tldr","level":2,"title":"TL;DR","text":"
            ctx init                                                  # 1. generates key\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key        # 2. copy key\nchmod 600 ~/.ctx/.ctx.key                                 # 3. secure it\n# Normal git push/pull syncs the encrypted scratchpad.enc\n# On conflict: ctx pad resolve → rebuild → git add + commit\n

            Finding Your Key File

            The key is always at ~/.ctx/.ctx.key - one key, one machine.

            Treat the Key like a Password

            The scratchpad key is the only thing protecting your encrypted entries.

            Store a backup in a secure enclave such as a password manager, and treat it with the same care you would give passwords, certificates, or API tokens.

            Anyone with the key can decrypt every scratchpad entry.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx init CLI command Initialize context (generates the key automatically) ctx pad add CLI command Add a scratchpad entry ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad edit CLI command Edit a scratchpad entry ctx pad resolve CLI command Show both sides of a merge conflict ctx pad merge CLI command Merge entries from other scratchpad files ctx pad import CLI command Bulk-import lines from a file ctx pad export CLI command Export blob entries to a directory scp Shell Copy the key file between machines git push / git pull Shell Sync the encrypted file via git /ctx-pad Skill Natural language interface to pad commands","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-1-initialize-on-machine-a","level":3,"title":"Step 1: Initialize on Machine A","text":"

            Run ctx init on your first machine. The key is created automatically at ~/.ctx/.ctx.key:

            ctx init\n# ...\n# Created ~/.ctx/.ctx.key (0600)\n# Created .context/scratchpad.enc\n

            The key lives outside the project directory and is never committed. The .enc file is tracked in git.

            Key Folder Change (v0.7.0+)

            If you built ctx from source or upgraded past v0.6.0, the key location changed to ~/.ctx/.ctx.key. Check these legacy folders and copy your key manually:

            # Old locations (pick whichever exists)\nls ~/.local/ctx/keys/        # pre-v0.7.0 user-level\nls .context/.ctx.key         # pre-v0.6.0 project-local\n\n# Copy to the new location\nmkdir -p ~/.ctx && chmod 700 ~/.ctx\ncp <old-key-path> ~/.ctx/.ctx.key\nchmod 600 ~/.ctx/.ctx.key\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-2-copy-the-key-to-machine-b","level":3,"title":"Step 2: Copy the Key to Machine B","text":"

            Use any secure transfer method. The key is always at ~/.ctx/.ctx.key:

            # scp - create the target directory first\nssh user@machine-b \"mkdir -p ~/.ctx && chmod 700 ~/.ctx\"\nscp ~/.ctx/.ctx.key user@machine-b:~/.ctx/.ctx.key\n\n# Or use a password manager, USB drive, etc.\n

            Set permissions on Machine B:

            chmod 600 ~/.ctx/.ctx.key\n

            Secure the Transfer

            The key is a raw 256-bit AES key. Anyone with the key can decrypt the scratchpad. Use an encrypted channel (SSH, password manager, vault).

            Never paste it in plaintext over email or chat.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-3-normal-pushpull-workflow","level":3,"title":"Step 3: Normal Push/Pull Workflow","text":"

            The encrypted file is committed, so standard git sync works:

            # Machine A: add entries and push\nctx pad add \"staging API key: sk-test-abc123\"\ngit add .context/scratchpad.enc\ngit commit -m \"Update scratchpad\"\ngit push\n\n# Machine B: pull and read\ngit pull\nctx pad\n#   1. staging API key: sk-test-abc123\n

            Both machines have the same key, so both can decrypt the same .enc file.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-4-read-and-write-from-either-machine","level":3,"title":"Step 4: Read and Write from Either Machine","text":"

            Once the key is distributed, all ctx pad commands work identically on both machines. Entries added on Machine A are visible on Machine B after a git pull, and vice versa.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#step-5-handle-merge-conflicts","level":3,"title":"Step 5: Handle Merge Conflicts","text":"

            If both machines add entries between syncs, pulling will create a merge conflict on .context/scratchpad.enc. Git cannot merge binary (encrypted) content automatically.

            The fastest approach is ctx pad merge: It reads both conflict sides, deduplicates, and writes the union:

            # Extract theirs to a temp file, then merge it in\ngit show :3:.context/scratchpad.enc > /tmp/theirs.enc\ngit checkout --ours .context/scratchpad.enc\nctx pad merge /tmp/theirs.enc\n\n# Done: Commit the resolved scratchpad:\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n

            Alternatively, use ctx pad resolve to inspect both sides manually:

            ctx pad resolve\n# === Ours (this machine) ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n#\n# === Theirs (incoming) ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n

            Then reconstruct the merged scratchpad:

            # Start fresh with all entries from both sides\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\n# Mark the conflict resolved\ngit add .context/scratchpad.enc\ngit commit -m \"Resolve scratchpad merge conflict\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#merge-conflict-walkthrough","level":2,"title":"Merge Conflict Walkthrough","text":"

            Here's a full scenario showing how conflicts arise and how to resolve them:

            1. Both machines start in sync (1 entry):

            Machine A: 1. staging API key: sk-test-abc123\nMachine B: 1. staging API key: sk-test-abc123\n

            2. Both add entries independently:

            Machine A adds: \"check DNS after deploy\"\nMachine B adds: \"new endpoint: api.example.com/v2\"\n

            3. Machine A pushes first. Machine B pulls and gets a conflict:

            git pull\n# CONFLICT (content): Merge conflict in .context/scratchpad.enc\n

            4. Machine B runs ctx pad resolve:

            ctx pad resolve\n# === Ours ===\n#   1. staging API key: sk-test-abc123\n#   2. new endpoint: api.example.com/v2\n#\n# === Theirs ===\n#   1. staging API key: sk-test-abc123\n#   2. check DNS after deploy\n

            5. Rebuild with entries from both sides and commit:

            # Clear and rebuild (or use the skill to guide you)\nctx pad add \"staging API key: sk-test-abc123\"\nctx pad add \"check DNS after deploy\"\nctx pad add \"new endpoint: api.example.com/v2\"\n\ngit add .context/scratchpad.enc\ngit commit -m \"Merge scratchpad: keep entries from both machines\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#conversational-approach","level":3,"title":"Conversational Approach","text":"

            When working with an AI assistant, you can resolve conflicts naturally:

            You: \"I have a scratchpad merge conflict. Can you resolve it?\"\n\nAgent: \"Let me extract theirs and merge it in.\"\n       [runs git show :3:.context/scratchpad.enc > /tmp/theirs.enc]\n       [runs git checkout --ours .context/scratchpad.enc]\n       [runs ctx pad merge /tmp/theirs.enc]\n       \"Merged 2 new entries (1 duplicate skipped). Want me to\n       commit the resolution?\"\n
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#tips","level":2,"title":"Tips","text":"
            • Back up the key: If you lose it, you lose access to all encrypted entries. Store a copy in your password manager.
            • One key per project: Each ctx init generates a unique key. Don't reuse keys across projects.
            • Keys work in worktrees: Because the key lives at ~/.ctx/.ctx.key (outside the project), git worktrees on the same machine share the key automatically. No special setup needed.
            • Plaintext fallback for non-sensitive projects: If encryption adds friction and you have nothing sensitive, set scratchpad_encrypt: false in .ctxrc. Merge conflicts become trivial text merges.
            • Never commit the key: The key is stored outside the project at ~/.ctx/.ctx.key and should never be copied into the repository.
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#next-up","level":2,"title":"Next Up","text":"

            Hook Output Patterns →: Choose the right output pattern for your Claude Code hooks.

            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-sync/#see-also","level":2,"title":"See Also","text":"
            • Scratchpad: feature overview, all commands, when to use scratchpad vs context files
            • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
            ","path":["Recipes","Knowledge and Tasks","Syncing Scratchpad Notes Across Machines"],"tags":[]},{"location":"recipes/scratchpad-with-claude/","level":1,"title":"Using the Scratchpad","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-problem","level":2,"title":"The Problem","text":"

            During a session you accumulate quick notes, reminders, intermediate values, and sometimes sensitive tokens. They don't fit TASKS.md (not work items) or DECISIONS.md (not decisions). They don't have the structured fields that LEARNINGS.md requires.

            Without somewhere to put them, they get lost between sessions.

            How do you capture working memory that persists across sessions without polluting your structured context files?

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tldr","level":2,"title":"TL;DR","text":"
            ctx pad add \"check DNS propagation after deploy\"\nctx pad         # list entries\nctx pad show 1  # print entry (pipe-friendly)\n

            Entries are encrypted at rest and travel with git.

            Use the /ctx-pad skill to manage entries from inside your AI session.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx pad CLI command List all scratchpad entries ctx pad show N CLI command Output raw text of entry N (pipe-friendly) ctx pad add CLI command Add a new entry ctx pad edit CLI command Replace, append to, or prepend to an entry ctx pad add --file CLI command Ingest a file as a blob entry ctx pad show N --out CLI command Extract a blob entry to a file ctx pad rm CLI command Remove entries by stable ID (supports ranges) ctx pad normalize CLI command Reassign entry IDs as 1..N ctx pad mv CLI command Reorder entries ctx pad --tag CLI command Filter entries by tag ctx pad tags CLI command List all tags with counts ctx pad import CLI command Bulk-import lines from a file (or stdin) ctx pad export CLI command Export all blob entries to a directory /ctx-pad Skill Natural language interface to all pad commands","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-1-add-a-note-naturally","level":3,"title":"Step 1: Add a Note Naturally","text":"

            You don't need to remember any syntax. Just tell your assistant what to jot down:

            You: \"jot down: check DNS propagation after deploy\"\n\nAgent: \"Added to scratchpad:\n  1. check DNS propagation after deploy\"\n

            Behind the scenes, the agent runs ctx pad add \"check DNS propagation after deploy\".

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-2-view-your-scratchpad","level":3,"title":"Step 2: View Your Scratchpad","text":"
            You: \"show my scratchpad\"\n\nAgent: \"Your scratchpad has 3 entries:\n  1. check DNS propagation after deploy\n  2. staging API endpoint: api.staging.example.com/v2\n  3. retry limit should be 5, not 3\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-3-edit-an-entry","level":3,"title":"Step 3: Edit an Entry","text":"
            You: \"update entry 2 to include the new endpoint\"\n\nAgent: \"Updated entry 2:\n  was: staging API endpoint: api.staging.example.com/v2\n  now: staging API endpoint: api.prod.example.com/v2\"\n

            For partial edits, the agent can use --append or --prepend:

            You: \"append the port number to entry 2\"\n\nAgent: \"Updated entry 2:\n  staging API endpoint: api.prod.example.com/v2 :8443\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-4-compose-entries","level":3,"title":"Step 4: Compose Entries","text":"

            Use ctx pad show N to pipe entry content into other operations:

            You: \"append the content of entry 3 to entry 1\"\n\nAgent: [runs ctx pad edit 1 --append \"$(ctx pad show 3)\"]\n       \"Done. Entry 1 is now:\n        check DNS propagation after deploy retry limit should be 5, not 3\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-5-reorder-by-priority","level":3,"title":"Step 5: Reorder by Priority","text":"
            You: \"move entry 4 to the top\"\n\nAgent: [runs ctx pad mv 4 1]\n       \"Moved entry 4 to position 1. Scratchpad reordered.\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-6-clean-up","level":3,"title":"Step 6: Clean Up","text":"
            You: \"remove entry 2, it's done\"\n\nAgent: [runs ctx pad rm 2]\n       \"Removed entry 2. 3 entries remaining.\n        (IDs are stable; remaining entries keep their IDs.)\"\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-7-store-a-file-as-a-blob","level":3,"title":"Step 7: Store a File as a Blob","text":"

            The scratchpad can hold small files (up to 64 KB) as encrypted blob entries. The file is base64-encoded and stored alongside a label you provide:

            # Ingest a file: the first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# List shows the label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-8-extract-a-blob","level":3,"title":"Step 8: Extract a Blob","text":"

            Use show --out to write the decoded file back to disk:

            # Write blob entry to a file\nctx pad show 2 --out ./recovered-deploy.yaml\n\n# Or print to stdout (for piping)\nctx pad show 2 | head -5\n

            Blob entries are encrypted identically to text entries: They're just base64-encoded before encryption. The --out flag decodes and writes the raw bytes.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-9-bulk-import-notes","level":3,"title":"Step 9: Bulk Import Notes","text":"

            When you have a file with many notes (one per line), import them in bulk instead of adding one at a time:

            # Import from a file: Each non-empty line becomes an entry\nctx pad import notes.txt\n\n# Or pipe from stdin\ngrep TODO *.go | ctx pad import -\n

            All entries are written in a single encrypt/write cycle, regardless of how many lines the file contains.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-10-export-blobs-to-disk","level":3,"title":"Step 10: Export Blobs to Disk","text":"

            Export all blob entries to a directory as individual files. Each blob's label becomes the filename:

            # Export to a directory (created if needed)\nctx pad export ./ideas\n\n# Preview what would be exported\nctx pad export --dry-run ./ideas\n\n# Force overwrite existing files\nctx pad export --force ./backup\n

            When a file already exists, a unix timestamp is prepended to the filename to avoid collisions. Use --force to overwrite instead.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#step-11-tag-entries-for-organization","level":3,"title":"Step 11: Tag Entries for Organization","text":"

            Tags let you categorize entries without any structure beyond a #word token in the text. Add them when creating or editing entries:

            You: \"jot down: check DNS propagation #later\"\nYou: \"tag entry 2 as urgent\"\n\nAgent: [runs ctx pad edit 2 --tag urgent]\n       \"Updated entry 2.\"\n

            Filter your scratchpad by tag:

            You: \"show me everything tagged later\"\n\nAgent: [runs ctx pad --tag later]\n       \"  1. check DNS propagation #later\n        3. review PR feedback #later #ci\"\n

            Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry regardless of deletions or active filters. Use ctx pad normalize to reassign IDs as 1..N.

            Exclude a tag with ~:

            ctx pad --tag ~later         # everything NOT tagged #later\nctx pad --tag later --tag ci # entries with BOTH tags (AND logic)\n

            See what tags you're using:

            You: \"what tags do I have?\"\n\nAgent: [runs ctx pad tags]\n       \"ci       1\n        later    2\n        urgent   1\"\n

            Tags work on blob entries too; they're extracted from the label:

            ctx pad add \"deploy config #prod\" --file ./deploy.yaml\nctx pad --tag prod\n#   1. deploy config #prod [BLOB]\n
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#using-ctx-pad-in-a-session","level":2,"title":"Using /ctx-pad in a Session","text":"

            Invoke the /ctx-pad skill first, then describe what you want in natural language. Without the skill prefix, the agent may route your request to TASKS.md or another context file instead of the scratchpad.

            You: /ctx-pad jot down: check DNS after deploy\nYou: /ctx-pad show my scratchpad\nYou: /ctx-pad delete entry 3\n

            Once the skill is active, it translates intent into commands:

            You say (after /ctx-pad) What the agent does \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"remember this: retry limit is 5\" ctx pad add \"retry limit is 5\" \"show my scratchpad\" / \"what's on my pad\" ctx pad \"show me entry 3\" ctx pad show 3 \"delete the third one\" / \"remove entry 3\" ctx pad rm 3 \"remove entries 3 through 5\" ctx pad rm 3-5 \"renumber my scratchpad\" ctx pad normalize \"change entry 2 to ...\" ctx pad edit 2 \"new text\" \"append ' +important' to entry 3\" ctx pad edit 3 --append \" +important\" \"prepend 'URGENT:' to entry 1\" ctx pad edit 1 --prepend \"URGENT: \" \"prioritize entry 4\" / \"move to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./ideas\" ctx pad export ./ideas \"show entries tagged later\" ctx pad --tag later \"show everything except later\" ctx pad --tag ~later \"what tags do I have\" ctx pad tags \"tag entry 5 as urgent\" ctx pad edit 5 --tag urgent

            When in Doubt, Use the CLI Directly

            The ctx pad commands work the same whether you run them yourself or let the skill invoke them.

            If the agent misroutes a request, fall back to ctx pad add \"...\" in your terminal.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#when-to-use-scratchpad-vs-context-files","level":2,"title":"When to Use Scratchpad vs Context Files","text":"Situation Use Temporary reminders (\"check X after deploy\") Scratchpad Session-start reminders (\"remind me next session\") ctx remind Working values during debugging (ports, endpoints, counts) Scratchpad Sensitive tokens or API keys (short-term storage) Scratchpad Quick notes that don't fit anywhere else Scratchpad Work items with completion tracking TASKS.md Trade-offs between alternatives with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

            Decision Guide

            • If it has structured fields (context, rationale, lesson, application), it belongs in a context file like DECISIONS.md or LEARNINGS.md.
            • If it's a work item you'll mark done, it belongs in TASKS.md.
            • If you want a message relayed VERBATIM at the next session start, it belongs in ctx remind.
            • If it's a quick note, reminder, or working value (especially if it's sensitive or ephemeral) it belongs on the scratchpad.

            Scratchpad Is Not a Junk Drawer

            The scratchpad is for working memory, not long-term storage.

            If a note is still relevant after several sessions, promote it:

            A persistent reminder becomes a task, a recurring value becomes a convention, a hard-won insight becomes a learning.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#tips","level":2,"title":"Tips","text":"
            • Entries persist across sessions: The scratchpad is committed (encrypted) to git, so entries survive session boundaries. Pick up where you left off.
            • Entries are numbered and reorderable: Use ctx pad mv to put high-priority items at the top.
            • ctx pad show N enables unix piping: Output raw entry text with no numbering prefix. Compose with --append, --prepend, or other shell tools.
            • Never mention the key file contents to the AI: The agent knows how to use ctx pad commands but should never read or print the encryption key (~/.ctx/.ctx.key) directly.
            • Encryption is transparent: You interact with plaintext; the encryption/decryption happens automatically on every read/write.
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#next-up","level":2,"title":"Next Up","text":"

            Syncing Scratchpad Notes Across Machines →: Distribute encryption keys and scratchpad data across environments.

            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scratchpad-with-claude/#see-also","level":2,"title":"See Also","text":"
            • Scratchpad: feature overview, all commands, encryption details, plaintext override
            • Persisting Decisions, Learnings, and Conventions: for structured knowledge that outlives the scratchpad
            • The Complete Session: full session lifecycle showing how the scratchpad fits into the broader workflow
            ","path":["Recipes","Knowledge and Tasks","Using the Scratchpad"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/","level":1,"title":"Scrutinizing a Plan","text":"

            When you have a plan and want it attacked, not validated, the /ctx-plan skill runs an adversarial interview. It surfaces what's weak, missing, or unexamined before you commit.

            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/#when-to-use-it","level":2,"title":"When to Use It","text":"
            • Before starting a multi-day implementation.
            • After writing a spec but before opening the first PR.
            • When the team aligned suspiciously fast on a complex change.
            • When you've drafted something and the obvious next step is to hand it to a senior reviewer.

            /ctx-plan is not a brainstorming tool. Use /ctx-brainstorm for that. /ctx-plan assumes the plan exists and asks: does it survive contact with reality?

            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/#stop-conditions","level":2,"title":"Stop Conditions","text":"

            The skill runs depth-first through assumptions, tradeoffs, and failure modes. Stop when one of these is true:

            • Plan converges. The bet is clear, the sequencing is justified, and you can articulate what would falsify it.
            • Plan dissolves. The interview surfaces a load-bearing assumption you can't defend; the plan needs rework, not more scrutiny.
            • You hit information you don't have. Pause, gather it, resume.

            If the agent loops on a single angle (asking variations of the same question), say so. The skill is designed to walk depth-first, not exhaust one branch.

            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/#worked-example","level":2,"title":"Worked Example","text":"
            You: /ctx-plan\n\nI want to migrate our queue from RabbitMQ to NATS. I've drafted a\ntwo-week plan: spike NATS in staging, port one consumer, port the\nrest, decommission RabbitMQ.\n\nAgent: One question at a time. What breaks if your message ordering\n       guarantees aren't equivalent across the two systems?\n\nYou: NATS gives us per-subject ordering by default; we don't actually\n     need cross-subject ordering, so I think we're fine.\n\nAgent: How would you notice if you were wrong? What's the loudest\n       symptom of a stray out-of-order message in production today?\n\nYou: ...probably a duplicate notification. Which we already see\n     occasionally. So we wouldn't notice immediately.\n\nAgent: Then your migration's correctness check needs to be stronger\n       than \"no incidents.\" Before you port consumer two, what\n       evidence would convince you that consumer one's ordering\n       behavior is correct under load?\n\nYou: Right. Add a dedicated assertion test in staging that replays\n     a known-out-of-order sequence and verifies the consumer\n     dedupes correctly. Otherwise we're just hoping.\n\nAgent: Good. Next: what would change your mind about NATS itself?\n       What rejected alternative is closest to coming back?\n

            The interview keeps going until the bet is articulated, the falsifiable conditions are written down, and the rejected alternatives have a recall trigger.

            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/#output","level":2,"title":"Output","text":"

            /ctx-plan produces a clearer plan, not a document. Persist the deltas via:

            • /ctx-spec if the conclusions belong in a feature spec.
            • /ctx-decision-add if a tradeoff resolved into an architectural decision.
            • /ctx-learning-add if you discovered a project-specific gotcha during the interview.

            The skill itself is in internal/assets/claude/skills/ctx-plan/SKILL.md; the working contract lives there, the recipe is the on-ramp.

            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/scrutinizing-a-plan/#see-also","level":2,"title":"See Also","text":"
            • Design Before Coding: the brainstorming counterpart, used before a plan exists.
            • ctx-spec: scaffolds a feature spec from the project template.
            ","path":["Scrutinizing a Plan"],"tags":[]},{"location":"recipes/session-archaeology/","level":1,"title":"Browsing and Enriching Past Sessions","text":"","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-problem","level":2,"title":"The Problem","text":"

            After weeks of AI-assisted development you have dozens of sessions scattered across JSONL files in ~/.claude/projects/. Finding the session where you debugged the Redis connection pool, or remembering what you decided about the caching strategy three Tuesdays ago, often means grepping raw JSON.

            There is no table of contents, no search, and no summaries.

            This recipe shows how to turn that raw session history into a browsable, searchable, and enriched journal site you can navigate in your browser.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tldr","level":2,"title":"TL;DR","text":"

            Export and Generate

            ctx journal import --all\nctx journal site --serve\n

            Enrich

            /ctx-journal-enrich-all\n

            Rebuild

            ctx journal site --serve\n

            Read on for what each stage does and why.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx journal source Command List parsed sessions with metadata ctx journal source --show Command Inspect a specific session in detail ctx journal import Command Import sessions to editable journal Markdown ctx journal site Command Generate a static site from journal entries ctx journal obsidian Command Generate an Obsidian vault from journal entries ctx journal schema check Command Validate JSONL files and report schema drift ctx journal schema dump Command Print the embedded JSONL schema definition ctx serve Command Serve any zensical directory (default: journal) /ctx-history Skill Browse sessions inside your AI assistant /ctx-journal-enrich Skill Add frontmatter metadata to a single entry /ctx-journal-enrich-all Skill Full pipeline: import if needed, then batch-enrich","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-workflow","level":2,"title":"The Workflow","text":"

            The session journal follows a four-stage pipeline.

            Each stage is idempotent and safe to re-run:

            By default, each stage skips entries that have already been processed.

            import -> enrich -> rebuild\n
            Stage Tool What it does Skips if Where Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) CLI or agent Enrich /ctx-journal-enrich-all Adds frontmatter, summaries, topic tags Frontmatter already present Agent only Rebuild ctx journal site --build Generates browsable static HTML N/A CLI only Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks N/A CLI only

            Where Do You Run Each Stage?

            Import (Steps 1 to 3) works equally well from the terminal or inside your AI assistant via /ctx-history. The CLI is fine here: the agent adds no special intelligence, it just runs the same command.

            Enrich (Step 4) requires the agent: it reads conversation content and produces structured metadata.

            Rebuild and serve (Step 5) is a terminal operation that starts a long-running server.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-1-list-your-sessions","level":3,"title":"Step 1: List Your Sessions","text":"

            Start by seeing what sessions exist for the current project:

            ctx journal source\n

            Sample output:

            Sessions (newest first)\n=======================\n\n  Slug                           Project   Date         Duration  Turns  Tokens\n  gleaming-wobbling-sutherland   ctx       2026-02-07   1h 23m    47     82,341\n  twinkly-stirring-kettle        ctx       2026-02-06   0h 45m    22     38,102\n  bright-dancing-hopper          ctx       2026-02-05   2h 10m    63     124,500\n  quiet-flowing-dijkstra         ctx       2026-02-04   0h 18m    11     15,230\n  ...\n

            Slugs Look Cryptic?

            These auto-generated slugs (gleaming-wobbling-sutherland) are hard to recognize later.

            Use /ctx-journal-enrich to add human-readable titles, topic tags, and summaries to exported journal entries, making them easier to find.

            Filter by project or tool if you work across multiple codebases:

            ctx journal source --project ctx --limit 10\nctx journal source --tool claude-code\nctx journal source --all-projects\n
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-2-inspect-a-specific-session","level":3,"title":"Step 2: Inspect a Specific Session","text":"

            Before exporting everything, inspect a single session to see its metadata and conversation summary:

            ctx journal source --show --latest\n

            Or look up a specific session by its slug, partial ID, or UUID:

            ctx journal source --show gleaming-wobbling-sutherland\nctx journal source --show twinkly\nctx journal source --show abc123\n

            Add --full to see the complete message content instead of the summary view:

            ctx journal source --show --latest --full\n

            This is useful for checking what happened before deciding whether to export and enrich it.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-3-import-sessions-to-the-journal","level":3,"title":"Step 3: Import Sessions to the Journal","text":"

            Import converts raw session data into editable Markdown files in .context/journal/:

            # Import all sessions from the current project\nctx journal import --all\n\n# Import a single session\nctx journal import gleaming-wobbling-sutherland\n\n# Include sessions from all projects\nctx journal import --all --all-projects\n

            --keep-frontmatter=false Discards Enrichments

            --keep-frontmatter=false discards enriched YAML frontmatter during regeneration.

            Back up your journal before using this flag.

            Each imported file contains session metadata (date, time, duration, model, project, git branch), a tool usage summary, and the full conversation transcript.

            Re-importing is safe. Running ctx journal import --all only imports new sessions: Existing files are never touched. Use --dry-run to preview what would be imported without writing anything.

            To re-import existing files (e.g., after a format improvement), use --regenerate: Conversation content is regenerated while preserving any YAML frontmatter you or the enrichment skill has added. You'll be prompted before any files are overwritten.

            --regenerate Replaces the Markdown Body

            --regenerate preserves YAML frontmatter but replaces the entire Markdown body with freshly generated content from the source JSONL.

            If you manually edited the conversation transcript (added notes, redacted sensitive content, restructured sections), those edits will be lost.

            BACK UP YOUR JOURNAL FIRST.

            To protect entries you've hand-edited, you can explicitly lock them:

            ctx journal lock <pattern>\n

            Locked entries are always skipped, regardless of flags.

            If you prefer to add locked: true directly in frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json:

            ctx journal sync\n

            See ctx journal lock --help and ctx journal sync --help for details.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-4-enrich-with-metadata","level":3,"title":"Step 4: Enrich with Metadata","text":"

            Raw imports have timestamps and transcripts but lack the semantic metadata that makes sessions searchable: topics, technology tags, outcome status, and summaries. The /ctx-journal-enrich* skills add this structured frontmatter.

            Locked entries are skipped by enrichment skills, just as they are by import. Lock entries you want to protect before running batch enrichment.

            Batch enrichment (recommended):

            /ctx-journal-enrich-all\n

            The skill finds all unenriched entries, filters out noise (suggestion sessions, very short sessions, multipart continuations), and processes each one by extracting titles, topics, technologies, and summaries from the conversation.

            It shows you a grouped summary before applying changes so you can scan quickly rather than reviewing one by one.

            For large backlogs (20+ entries), the skill can spawn subagents to process entries in parallel.

            Single-entry enrichment:

            /ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-02-06\n

            Each enriched entry gets YAML frontmatter like this:

            ---\ntitle: \"Implement Redis caching middleware\"\ndate: 2026-02-06\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/api/middleware/cache.go\n---\n

            The skill also generates a summary and can extract decisions, learnings, and tasks mentioned during the session.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#step-5-generate-and-serve-the-site","level":3,"title":"Step 5: Generate and Serve the Site","text":"

            With imported and enriched journal files, generate the static site:

            # Generate site structure only\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate, build, and serve locally\nctx journal site --serve\n

            Then open http://localhost:8000 to browse.

            The site includes a date-sorted index, individual session pages with full conversations, search (press /), dark mode, and enriched titles in the navigation when frontmatter exists.

            You can also serve an already-generated site without regenerating using ctx serve (serve-only, no regeneration).

            The site generator requires zensical (pipx install zensical).

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#where-does-agent-add-value","level":2,"title":"Where Does Agent Add Value?","text":"

            Export, list, and show are mechanical: The agent runs the same CLI commands you would, so you can stay in your terminal for those.

            The agent earns its keep in enrich. It reads conversation content, understands what happened, and produces structured metadata. That is agent work, not CLI work.

            You can also ask your agent to browse sessions conversationally instead of remembering flags:

            What did we work on last week?\nShow me the session about Redis.\nImport everything to the journal.\n

            This is convenient but not required: ctx journal source gives you the same inventory.

            Where the agent genuinely helps is chaining the pipeline:

            You:   What happened last Tuesday?\nAgent: Last Tuesday you worked on two sessions:\n       - bright-dancing-hopper (2h 10m): refactored the middleware\n         pipeline and added Redis caching\n       - quiet-flowing-dijkstra (18m): quick fix for a nil pointer\n         in the config loader\n       Want me to export and enrich them?\nYou:   Yes, do it.\nAgent: Exports both, enriches, then proposes frontmatter.\n

            The value is staying in one context while the agent runs import -> enrich without you manually switching tools.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A typical pipeline from raw sessions to a browsable site:

            # Terminal: import and generate\nctx journal import --all\nctx journal site --serve\n
            # AI assistant: enrich\n/ctx-journal-enrich-all\n
            # Terminal: rebuild with enrichments\nctx journal site --serve\n

            If your project includes Makefile.ctx (deployed by ctx init), use make journal to combine import and rebuild stages. Then enrich inside Claude Code, then make journal again to pick up enrichments.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#session-retention-and-cleanup","level":2,"title":"Session Retention and Cleanup","text":"

            Claude Code does not keep JSONL transcripts forever. Understanding its cleanup behavior helps you avoid losing session history.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#default-behavior","level":3,"title":"Default Behavior","text":"

            Claude Code retains session transcripts for approximately 30 days. After that, JSONL files are automatically deleted during cleanup. Once deleted, ctx journal can no longer see those sessions - the data is gone.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#the-cleanupperioddays-setting","level":3,"title":"The cleanupPeriodDays Setting","text":"

            Claude Code exposes a cleanupPeriodDays setting in its configuration (~/.claude/settings.json) that controls retention:

            Value Behavior 30 (default) Transcripts older than 30 days are deleted 60, 90, etc. Extends the retention window 0 Disables writing new transcripts entirely - not \"keep forever\"

            Setting cleanupPeriodDays To 0

            Setting this to 0 does not mean \"never delete.\" It disables transcript creation altogether. No new JSONL files are written, which means ctx journal sees nothing new. This is rarely what you want.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#why-journal-import-matters","level":3,"title":"Why Journal Import Matters","text":"

            The journal import pipeline (Steps 1-4 above) is your archival mechanism. Imported Markdown files in .context/journal/ persist independently of Claude Code's cleanup cycle. Even after the source JSONL files are deleted, your journal entries remain.

            Recommendation: import regularly - weekly, or after any session worth revisiting. A quick ctx journal import --all takes seconds and ensures nothing falls through the 30-day window.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#quick-archival-checklist","level":3,"title":"Quick Archival Checklist","text":"
            1. Run ctx journal import --all at least weekly
            2. Enrich high-value sessions with /ctx-journal-enrich before the details fade from your own memory
            3. Lock enriched entries (ctx journal lock <pattern>) to protect them from accidental regeneration
            4. Rebuild the journal site periodically to keep it current
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#tips","level":2,"title":"Tips","text":"
            • Start with /ctx-history inside your AI assistant. If you want to quickly check what happened in a recent session without leaving your editor, /ctx-history lets you browse interactively without importing.
            • Large sessions may be split automatically. Sessions with 200+ messages can be split into multiple parts (session-abc123.md, session-abc123-p2.md, session-abc123-p3.md) with navigation links between them. The site generator can handle this.
            • Suggestion sessions can be separated. Claude Code can generate short suggestion sessions for autocomplete. These may appear under a separate section in the site index, so they do not clutter your main session list.
            • Your agent is a good session browser. You do not need to remember slugs, dates, or flags. Ask \"what did we do yesterday?\" or \"find the session about Redis\" and it can map the question to recall commands.

            Journal Files Are Sensitive

            Journal files MUST be .gitignored.

            Session transcripts can contain sensitive data such as file contents, commands, error messages with stack traces, and potentially API keys.

            Add .context/journal/, .context/journal-site/, and .context/journal-obsidian/ to your .gitignore.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#next-up","level":2,"title":"Next Up","text":"

            Persisting Decisions, Learnings, and Conventions →: Record decisions, learnings, and conventions so they survive across sessions.

            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-archaeology/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: where session saving fits in the daily workflow
            • Turning Activity into Content: generating blog posts from session history
            • Session Journal: full documentation of the journal system
            • CLI Reference: ctx journal: all journal subcommands and flags
            • CLI Reference: ctx serve: serve-only (no regeneration)
            • Context Files: the .context/ directory structure
            ","path":["Recipes","Sessions","Browsing and Enriching Past Sessions"],"tags":[]},{"location":"recipes/session-ceremonies/","level":1,"title":"Session Ceremonies","text":"","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#the-problem","level":2,"title":"The Problem","text":"

            Sessions have two critical moments: the start and the end.

            • At the start, you need the agent to load context and confirm it knows what is going on.
            • At the end, you need to capture whatever the session produced before the conversation disappears.

            Most ctx skills work conversationally: \"jot down: check DNS after deploy\" is as good as /ctx-pad add \"check DNS after deploy\". But session boundaries are different. They are well-defined moments with specific requirements, and partial execution is costly.

            If the agent only half-loads context at the start, it works from stale assumptions. If it only half-persists at the end, learnings and decisions are lost.

            This Is One of the Few Times Being Explicit Matters

            Session ceremonies are the two bookend skills that mark these boundaries.

            They are the exception to the conversational rule:

            Invoke /ctx-remember and /ctx-wrap-up explicitly as slash commands.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tldr","level":2,"title":"TL;DR","text":"

            Start: /ctx-remember: load context, get a structured readback.

            End: /ctx-wrap-up: review session, propose candidates, persist approved items.

            Use the slash commands, not conversational triggers, for completeness.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#explicit-invocation-matters","level":2,"title":"Explicit Invocation Matters","text":"

            Most ctx skills encourage natural language. These two are different:

            Well-defined moments: Sessions have clear boundaries. A slash command marks the boundary unambiguously.

            Ambiguity risk: \"Do you remember?\" could mean many things. /ctx-remember means exactly one thing: load context and present a structured readback.

            Completeness: Conversational triggers risk partial execution. The agent might load some files but skip the session history, or persist one learning but forget to check for uncommitted changes. The slash command runs the full ceremony.

            Muscle memory: Typing /ctx-remember at session start and /ctx-wrap-up at session end becomes a habit, like opening and closing braces.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose /ctx-remember Skill Load context and present structured readback /ctx-wrap-up Skill Gather session signal, propose and persist context /ctx-commit Skill Commit with context capture (offered by wrap-up) ctx agent CLI Load token-budgeted context packet ctx journal source CLI List recent sessions ctx add CLI Persist learnings, decisions, conventions, tasks","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-start-ctx-remember","level":2,"title":"Session Start: /ctx-remember","text":"

            Invoke at the beginning of every session:

            /ctx-remember\n

            The skill silently:

            1. Loads the context packet via ctx agent --budget 4000
            2. Reads TASKS.md, DECISIONS.md, LEARNINGS.md
            3. Checks recent sessions via ctx journal source --limit 3

            Then presents a structured readback with four sections:

            • Last session: topic, date, what was accomplished
            • Active work: pending and in-progress tasks
            • Recent context: 1-2 relevant decisions or learnings
            • Next step: suggestion or question about what to focus on

            The readback should feel like recall, not a file system tour. If the agent says \"Let me check if there are files...\" instead of a confident summary, the skill is not working correctly.

            What about 'do you remember?'

            The conversational trigger still works. But /ctx-remember guarantees the full ceremony runs:

            • context packet,
            • file reads,
            • session history,
            • and all four readback sections.

            The conversational version may cut corners.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#session-end-ctx-wrap-up","level":2,"title":"Session End: /ctx-wrap-up","text":"

            Invoke before ending a session where meaningful work happened:

            /ctx-wrap-up\n

            The skill runs four phases:

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-1-gather-signal","level":3,"title":"Phase 1: Gather Signal","text":"

            Silently checks git diff --stat, recent commits, and scans the conversation for themes: architectural choices, gotchas, patterns established, follow-up work identified.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-2-propose-candidates","level":3,"title":"Phase 2: Propose Candidates","text":"

            Presents a structured list grouped by type:

            ## Session Wrap-Up\n\n### Learnings (2 candidates)\n1. **PyMdownx details extension breaks pre/code rendering**\n   - Context: Journal site showed broken code blocks inside details tags\n   - Lesson: details extension wraps content in <details> HTML, which\n     interferes with <pre><code> rendering\n   - Application: Use fenced code blocks instead of indented code inside\n     admonitions when details extension is active\n\n2. **Hook subprocesses cannot propagate env vars**\n   - Context: Set env var in PreToolUse hook, invisible in main session\n   - Lesson: Hooks execute in child processes; env changes don't propagate\n   - Application: Use tombstone files for hook-to-session communication\n\n### Decisions (1 candidate)\n1. **File-based cooldown tokens over env vars**\n   - Context: Need session-scoped cooldown for ctx agent auto-loading\n   - Rationale: File tokens survive across processes, simpler than IPC\n   - Consequence: Tombstone files accumulate in /tmp; need TTL cleanup\n\nPersist all? Or select which to keep?\n

            Each candidate has complete structured fields, not just a title. Empty categories are omitted.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-3-persist","level":3,"title":"Phase 3: Persist","text":"

            After you approve (all, some, or modified), the skill runs the appropriate ctx add commands and reports results.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#nudge-suppression","level":3,"title":"Nudge Suppression","text":"

            After persisting, the skill marks the session as wrapped up via ctx system mark-wrapped-up. This suppresses context checkpoint nudges for 2 hours so the wrap-up ceremony itself does not trigger noisy reminders.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#phase-4-commit-offer","level":3,"title":"Phase 4: Commit Offer","text":"

            If there are uncommitted changes, offers to run /ctx-commit. Does not auto-commit.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#when-to-skip","level":2,"title":"When to Skip","text":"

            Not every session needs ceremonies.

            Skip /ctx-remember when:

            • You are doing a quick one-off lookup (reading a file, checking a value)
            • Context was already loaded this session via /ctx-agent
            • You are continuing immediately after a previous session and context is still fresh

            Skip /ctx-wrap-up when:

            • Nothing meaningful happened (only read files, answered a question)
            • You already persisted everything manually during the session
            • The session was trivial (typo fix, quick config change)

            A good heuristic: if the session produced something a future session should know about, run /ctx-wrap-up. If not, just close.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#quick-reference","level":2,"title":"Quick Reference","text":"
            # Session start\n/ctx-remember\n\n# ... do work ...\n\n# Session end\n/ctx-wrap-up\n

            That is the complete ceremony. Two commands, bookending your session.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#relationship-to-other-skills","level":2,"title":"Relationship to Other Skills","text":"Skill When Purpose /ctx-remember Session start Load and confirm context /ctx-reflect Mid-session breakpoints Checkpoint at milestones /ctx-wrap-up Session end Full session review and persist /ctx-commit After completing work Commit with context capture

            /ctx-reflect is for mid-session checkpoints. /ctx-wrap-up is for end-of-session: it is more thorough, covers the full session arc, and includes the commit offer. If you already ran /ctx-reflect recently, /ctx-wrap-up avoids proposing the same candidates again.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#tips","level":2,"title":"Tips","text":"
            • Make it a habit: The value of ceremonies compounds over sessions. Each /ctx-wrap-up makes the next /ctx-remember richer.
            • Trust the candidates: The agent scans the full conversation. It often catches learnings you forgot about.
            • Edit before approving: If a proposed candidate is close but not quite right, tell the agent what to change. Do not settle for a vague learning when a precise one is possible.
            • Do not force empty ceremonies: If /ctx-wrap-up finds nothing worth persisting, that is fine. A session that only read files and answered questions does not need artificial learnings.
            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#next-up","level":2,"title":"Next Up","text":"

            Browsing and Enriching Past Sessions →: Export session history to a browsable journal and enrich entries with metadata.

            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-ceremonies/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: the full session workflow that ceremonies bookend
            • Persisting Decisions, Learnings, and Conventions: deep dive on what gets persisted during wrap-up
            • Detecting and Fixing Drift: keeping context files accurate between ceremonies
            • Pausing Context Hooks: skip ceremonies entirely for quick tasks that don't need them
            ","path":["Recipes","Sessions","Session Ceremonies"],"tags":[]},{"location":"recipes/session-changes/","level":1,"title":"Reviewing Session Changes","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-changed-while-you-were-away","level":2,"title":"What Changed While You Were Away?","text":"

            Between sessions, teammates commit code, context files get updated, and decisions pile up. ctx change gives you a single-command summary of everything that moved since your last session.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#quick-start","level":2,"title":"Quick Start","text":"
            # Auto-detects your last session and shows what changed\nctx change\n\n# Check what changed in the last 48 hours\nctx change --since 48h\n\n# Check since a specific date\nctx change --since 2026-03-10\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#how-reference-time-works","level":2,"title":"How Reference Time Works","text":"

            ctx change needs a reference point to compare against. It tries these sources in order:

            1. --since flag: explicit duration (24h, 72h) or date (2026-03-10, RFC3339 timestamp)
            2. Session markers: ctx-loaded-* files in .context/state/; picks the second-most-recent (your previous session start)
            3. Event log: last context-load-gate event from .context/state/events.jsonl
            4. Fallback: 24 hours ago

            The marker-based detection means ctx change usually just works without any flags: it knows when you last loaded context and shows everything after that.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#what-it-reports","level":2,"title":"What It Reports","text":"","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#context-file-changes","level":3,"title":"Context File Changes","text":"

            Any .md file in .context/ modified after the reference time:

            ### Context File Changes\n- `TASKS.md` - modified 2026-03-11 14:30\n- `DECISIONS.md` - modified 2026-03-11 09:15\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#code-changes","level":3,"title":"Code Changes","text":"

            Git activity since the reference time:

            ### Code Changes\n- **12 commits** since reference point\n- **Latest**: Fix journal enrichment ordering\n- **Directories touched**: internal, docs, specs\n- **Authors**: jose, claude\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#integrating-into-session-start","level":2,"title":"Integrating into Session Start","text":"

            Pair ctx change with the /ctx-remember ceremony for a complete session-start picture:

            # 1. Load context (this also creates the session marker)\nctx agent --budget 4000\n\n# 2. See what changed since your last session\nctx change\n

            Or script it:

            # .context/hooks/session-start.sh\nctx agent --budget 4000\necho \"---\"\nctx change\n
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#team-workflows","level":2,"title":"Team Workflows","text":"

            When multiple people share a .context/ directory, ctx change shows who changed what:

            # After pulling from remote\ngit pull\nctx change --since 72h\n

            This surfaces context file changes from teammates that you might otherwise miss in the commit log.

            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-changes/#tips","level":2,"title":"Tips","text":"
            • No changes? If nothing shows up, the reference time might be wrong. Use --since 48h to widen the window.
            • Works without git. Context file changes are detected by filesystem mtime, not git. Code changes require git.
            • Hook integration. The context-load-gate hook writes the session marker that ctx change uses for auto-detection. If you're not using the ctx plugin, markers won't exist and it falls back to the event log or 24h window.
            ","path":["Reviewing Session Changes"],"tags":[]},{"location":"recipes/session-lifecycle/","level":1,"title":"The Complete Session","text":"","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-problem","level":2,"title":"The Problem","text":"

            \"What does a full ctx session look like from start to finish?\"

            You have ctx installed and your .context/ directory initialized, but the individual commands and skills feel disconnected.

            How do they fit together into a coherent workflow?

            This recipe walks through a complete session, from opening your editor to persisting context before you close it, so you can see how each piece connects.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tldr","level":2,"title":"TL;DR","text":"
            1. Load: /ctx-remember: load context, get structured readback.
            2. Orient: /ctx-status: check file health and token usage.
            3. Pick: /ctx-next: choose what to work on.
            4. Work: implement, test, iterate.
            5. Commit: /ctx-commit: commit and capture decisions/learnings.
            6. Reflect: /ctx-reflect: identify what to persist (at milestones)
            7. Wrap up: /ctx-wrap-up: end-of-session ceremony.

            Read on for the full walkthrough with examples.

            Before You Start: Activate the Project

            ctx commands (and the skills that call them) require CTX_DIR to be declared for the shell you're working in; ctx does not walk the filesystem to find .context/. Once per shell (or via your shell rc / direnv):

            eval \"$(ctx activate)\"\n

            If you skip this, every skill below will surface an error naming the fix. See Activating a Context Directory for the full recipe.

            What Is a Readback?

            A readback is a structured summary where the agent plays back what it knows:

            • last session,
            • active tasks,
            • recent decisions.

            This way, you can confirm it loaded the right context.

            The term \"readback\" comes from aviation, where pilots repeat instructions back to air traffic control to confirm they heard correctly.

            Same idea in ctx: The agent tells you what it \"thinks\" is going on, and you correct anything that's off before the work begins.

            • Last session: topic, date, what was accomplished
            • Active work: pending and in-progress tasks
            • Recent context: 1-2 decisions or learnings that matter now
            • Next step: suggestion or question about what to focus on
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx status CLI command Quick health check on context files ctx agent CLI command Load token-budgeted context packet ctx journal source CLI command List previous sessions ctx journal source --show CLI command Inspect a specific session in detail /ctx-remember Skill Recall project context with structured readback /ctx-agent Skill Load full context packet inside the assistant /ctx-status Skill Show context summary with commentary /ctx-next Skill Suggest what to work on with rationale /ctx-commit Skill Commit code and prompt for context capture /ctx-reflect Skill Structured reflection checkpoint /ctx-history Skill Browse session history inside your AI assistant","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#the-workflow","level":2,"title":"The Workflow","text":"

            The session lifecycle has seven steps. You will not always use every step (for example, a quick bugfix might skip reflection, and a research session might skip committing), but the full arc looks like this:

            Load context > Orient > Pick a Task > Work > Commit > Reflect

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-1-load-context","level":3,"title":"Step 1: Load Context","text":"

            Start every session by loading what you know. The fastest way is a single prompt:

            Do you remember what we were working on?\n

            This triggers the /ctx-remember skill. Behind the scenes, the assistant runs ctx agent --budget 4000, reads the files listed in the context packet (TASKS.md, DECISIONS.md, LEARNINGS.md, CONVENTIONS.md), checks ctx journal source --limit 3 for recent sessions, and then presents a structured readback.

            The readback should feel like a recall, not a file system tour. If you see \"Let me check if there are files...\" instead of a confident summary, the context system is not loaded properly.

            As an alternative, if you want raw data instead of a readback, run ctx status in your terminal or invoke /ctx-status for a summarized health check showing file counts, token usage, and recent activity.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-2-orient","level":3,"title":"Step 2: Orient","text":"

            After loading context, verify you understand the current state.

            /ctx-status\n

            The status output shows which context files are populated, how many tokens they consume, and which files were recently modified. Look for:

            • Empty core files: TASKS.md or CONVENTIONS.md with no content means the context is sparse
            • High token count (over 30k): the context is bloated and might need ctx compact
            • No recent activity: files may be stale and need updating

            If the status looks healthy and the readback from Step 1 gave you enough context, skip ahead.

            If something seems off (stale tasks, missing decisions...), spend a minute reading the relevant file before proceeding.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

            With context loaded, choose a task. You can pick one yourself, or ask the assistant to recommend:

            /ctx-next\n

            The skill reads TASKS.md, checks recent sessions to avoid re-suggesting completed work, and presents 1-3 ranked recommendations with rationale.

            It prioritizes in-progress tasks over new starts (finishing is better than starting), respects explicit priority tags, and favors momentum: continuing a thread from a recent session is cheaper than context-switching.

            If you already know what you want to work on, state it directly:

            Let's work on the session enrichment feature.\n
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-4-do-the-work","level":3,"title":"Step 4: Do the Work","text":"

            This is the main body of the session: write code, fix bugs, refactor, research: whatever the task requires.

            During this phase, a few ctx-specific patterns help:

            Check decisions before choosing: when you face a design choice, check if a prior decision covers it.

            Is this consistent with our decisions?\n

            Constrain scope: keep the assistant focused on the task at hand.

            Only change files in internal/cli/session/. Nothing else.\n

            Use /ctx-implement for multistep plans: if the task has multiple steps, this skill executes them one at a time with build/test verification between each step.

            Context monitoring runs automatically: the check-context-size hook monitors context capacity at adaptive intervals. Early in a session it stays silent. After 16+ prompts it starts monitoring, and past 30 prompts it checks frequently. If context capacity is running high, it will suggest saving unsaved work. No manual invocation is needed.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-5-commit-with-context","level":3,"title":"Step 5: Commit with Context","text":"

            When the work is ready, use the context-aware commit instead of raw git commit:

            /ctx-commit\n

            The Agent May Recommend Committing

            You do not always need to invoke /ctx-commit explicitly.

            After a commit, the agent may proactively offer to capture context:

            \"We just made a trade-off there. Want me to record it as a decision?\"

            This is normal: The Agent Playbook encourages persisting at milestones, and a commit is a natural milestone.

            As an alternative, you can ask the assistant \"can we commit this?\" and it will pick up the /ctx-commit skill for you.

            The skill runs a pre-commit build check (for Go projects, go build), reviews the staged changes, drafts a commit message focused on \"why\" rather than \"what\", and then commits.

            After the commit succeeds, it prompts you:

            **Any context to capture?**\n\n- **Decision**: Did you make a design choice or trade-off?\n- **Learning**: Did you hit a gotcha or discover something?\n- **Neither**: No context to capture; we are done.\n

            If you made a decision, the skill records it with ctx add decision. If you learned something, it records it with ctx add learning including context, lesson, and application fields. This is the bridge between committing code and remembering why the code looks the way it does.

            If source code changed in areas that affect documentation, the skill also offers to check for doc drift.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-6-reflect","level":3,"title":"Step 6: Reflect","text":"

            At natural breakpoints (after finishing a feature, resolving a complex bug, or before switching tasks) pause to reflect:

            /ctx-reflect\n

            Agents Reflect at Milestones

            Agents often reflect without explicit invocation.

            After completing a significant piece of work, the agent may naturally surface items worth persisting:

            \"We discovered that $PPID resolves differently inside hooks. Should I save that as a learning?\"

            This is the agent following the Work-Reflect-Persist cycle from the Agent Playbook.

            You do not need to say /ctx-reflect for this to happen; the agent treats milestones as reflection triggers on its own.

            The skill works through a checklist: learnings discovered, decisions made, tasks completed or created, and whether there are items worth persisting. It then presents a summary with specific items to persist, each with the exact command to run:

            I would suggest persisting:\n\n- **Learning**: `$PPID` in PreToolUse hooks resolves to the Claude Code PID\n  `ctx add learning --context \"...\" --lesson \"...\" --application \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n- **Task**: mark \"Add cooldown to ctx agent\" as done\n- **Decision**: tombstone-based cooldown with 10m default\n  `ctx add decision \"...\" --session-id abc12345 --branch main --commit 68fbc00a`\n\nWant me to persist any of these?\n

            The skill asks before persisting anything. You choose what to keep.

            Not every commit needs reflection. A typo fix does not. But when you have been debugging for an hour and finally understand the root cause, that is worth a reflection checkpoint.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#step-7-persist-before-ending","level":3,"title":"Step 7: Persist Before Ending","text":"

            Before ending the session, run the wrap-up ceremony to capture outstanding learnings, decisions, conventions, and tasks:

            /ctx-wrap-up\n

            Ceremony Skills

            /ctx-remember at session start and /ctx-wrap-up at session end are ceremony skills: Invoke them as explicit slash commands for completeness, not conversationally.

            See Session Ceremonies for details on why these two skills are the exception to the conversational rule.

            The skill gathers signal from git diff, recent commits, and the conversation, then proposes structured candidates for your approval. After you select which to keep, it persists them via ctx add and offers /ctx-commit if uncommitted changes remain.

            Session transcripts are automatically captured by Claude Code and can be browsed later with ctx journal source and ctx journal source --show.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#conversational-session-flow","level":2,"title":"Conversational Session Flow","text":"

            The steps above describe the mechanics. In practice, a session with a ctx-aware agent feels more like a conversation than a command sequence.

            The agent knows the lifecycle and moves through it naturally, often without explicit invocation. Here is a realistic example:

            You: Do you remember what we were working on?

            Agent (loads context silently, presents structured readback):

            Last session (Feb 7, caching-layer): we implemented the in-memory cache with\nTTL expiration and wrote tests for the eviction policy. \n\nYou have 3 pending tasks; the biggest one is adding Redis as a backend.\n\nRecent decision:<br>\nwe chose LRU over LFU because the access pattern is mostly recency-based.\n\nWant to continue with the Redis backend, or tackle something else?\n

            You: Let's work on the Redis caching backend.

            Agent (picks up the task, begins implementation):

            Got it. I see the task in TASKS.md: \"Add Redis backend for cache layer.\"\n\nI will start with the connection pool setup. Only touching\n`internal/cache/redis/` for now.\n

            (The agent works... writes code, runs tests, iterates.)

            Agent (after implementing the connection pool and choosing a retry strategy):

            The connection pool is working. I went with exponential backoff with jitter\nfor retries instead of fixed intervals; it handles thundering herd better\nunder reconnection storms. That is a design choice worth recording. \n\nWant me to save it as a decision?\n

            You: Yes, go ahead.

            Agent:

            Done. Added to DECISIONS.md: \"Exponential backoff with jitter for Redis\nreconnection, to avoid thundering herd on pool recovery.\"\n

            (More work happens... the agent implements the cache read/write paths.)

            You: What else did we learn during this session?

            Agent:

            A few things worth capturing:\n\n* **Learning**: Redis `SET` with `NX` and `PX` flags handles atomic\n  set-if-not-exists with TTL in a single call. No need for a separate\n  `EXPIRE` command. We initially did it in two calls and hit a race condition.\n* **Learning**: The `go-redis` library's `PoolSize` default is 10 per CPU.\n  For our workload, explicit sizing at 20 was more predictable.\n* **Task update**: The connection pool subtask is done; the read/write path\n  is implemented but needs integration tests.\n\nWant me to persist these?\n

            You: Yes. And let's wrap up.

            Agent (reflects and persists... all in one flow):

            Persisted:\n\n* 2 learnings added to LEARNINGS.md\n* Marked \"connection pool setup\" done in TASKS.md\n* Added \"write integration tests for Redis cache\" as a new task\n\nContext is up to date. You are good to close.\n

            Notice What Happened

            In the above workflow, the user never typed /ctx-reflect or ctx add learning.

            The agent moved through Load, Orient, Pick, Work, Commit, and Reflect driven by natural conversation.

            \"Let's wrap up\" was enough to trigger the full reflect-and-persist flow.

            The agent surfaced persist-worthy items at milestones: after a design choice, after discovering a gotcha: without waiting to be asked.

            This is the intended experience.

            The commands and skills still exist for when you want precise control, but the agent is a proactive partner in the lifecycle, not a passive executor of slash commands.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            Quick-reference checklist for a complete session:

            • Load: /ctx-remember: load context and confirm readback
            • Orient: /ctx-status: check file health and token usage
            • Pick: /ctx-next: choose what to work on
            • Work: implement, test, iterate (scope with \"only change X\")
            • Commit: /ctx-commit: commit and capture decisions/learnings
            • Reflect: /ctx-reflect: identify what to persist (at milestones)
            • Wrap up: /ctx-wrap-up: end-of-session ceremony

            Conversational equivalents: you can drive the same lifecycle with plain language:

            Step Slash command Natural language Load /ctx-remember \"Do you remember?\" / \"What were we working on?\" Orient /ctx-status \"How's our context looking?\" Pick /ctx-next \"What should we work on?\" / \"Let's do the caching task\" Work (none) \"Only change files in internal/cache/\" Commit /ctx-commit \"Commit this\" / \"Ship it\" Reflect /ctx-reflect \"What did we learn?\" / (agent offers at milestones) Wrap up /ctx-wrap-up (use the slash command for completeness)

            The agent understands both columns.

            In practice, most sessions use a mix:

            • Explicit Commands when you want precision;
            • Natural Language when you want flow and agentic autonomy.

            The agent will also initiate steps on its own (particularly \"Reflect\") when it recognizes a milestone.

            Short sessions (quick bugfix) might only use: Load, Work, Commit.

            Long sessions should Reflect after each major milestone and persist learnings and decisions before ending.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#tips","level":2,"title":"Tips","text":"

            Persist early if context is running low. A hook monitors context capacity and notifies you when it gets high, but do not wait for the notification. If you have been working for a while and have unpersisted learnings, persist proactively.

            Browse previous sessions by topic. If you need context from a prior session, ctx journal source --show auth will match by keyword. You do not need to remember the exact date or slug.

            Reflection is optional but valuable. You can skip /ctx-reflect for small changes, but always persist learnings and decisions before ending a session where you did meaningful work. These are what the next session loads.

            Let the hook handle context loading. The PreToolUse hook runs ctx agent automatically with a cooldown, so context loads on first tool use without you asking. The /ctx-remember prompt at session start is for your benefit (to get a readback), not because the assistant needs it.

            The agent is a proactive partner, not a passive tool. A ctx-aware agent follows the Agent Playbook: it watches for milestones (completed tasks, design decisions, discovered gotchas) and offers to persist them without being asked. If you finish a tricky debugging session, it may say \"That root cause is worth saving as a learning. Want me to record it?\" before you think to ask. This is by design.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#next-up","level":2,"title":"Next Up","text":"

            Session Ceremonies →: The two bookend rituals for every session: /ctx-remember at the start, /ctx-wrap-up at the end.

            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-lifecycle/#see-also","level":2,"title":"See Also","text":"
            • Session Ceremonies: why /ctx-remember and /ctx-wrap-up are explicit slash commands, not conversational
            • CLI Reference: full documentation for all ctx commands
            • Prompting Guide: effective prompts for ctx-enabled projects
            • Tracking Work Across Sessions: deep dive on task management
            • Persisting Decisions, Learnings, and Conventions: deep dive on knowledge capture
            • Detecting and Fixing Drift: keeping context files accurate
            • Pausing Context Hooks: shortcut the full lifecycle for quick tasks that don't need ceremony overhead
            ","path":["Recipes","Sessions","The Complete Session"],"tags":[]},{"location":"recipes/session-pause/","level":1,"title":"Pausing Context Hooks","text":"","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#the-problem","level":2,"title":"The Problem","text":"

            Not every session needs the full ceremony. Quick investigations, one-off questions, small fixes unrelated to active project work: These tasks don't benefit from persistence nudges, ceremony reminders, or knowledge checks. Every hook still fires, consuming tokens and attention on work that won't produce learnings or decisions worth capturing.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tldr","level":2,"title":"TL;DR","text":"Command What it does ctx hook pause or /ctx-pause Silence all nudge hooks for this session ctx hook resume or /ctx-resume Restore normal hook behavior

            Pause is session-scoped: It only affects the current session. Other sessions (same project, different terminal) are unaffected.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-gets-paused","level":2,"title":"What Gets Paused","text":"

            All nudge and reminder hooks go silent:

            • Context size checkpoints
            • Ceremony adoption nudges
            • Persistence reminders
            • Journal maintenance reminders
            • Knowledge growth nudges
            • Map staleness nudges
            • Version update nudges
            • Resource pressure warnings
            • QA reminders
            • Post-commit nudges
            • Specs nudges
            • Backup age warnings
            • Context load gate
            • Pending reminders relay
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#what-still-fires","level":2,"title":"What Still Fires","text":"

            Security hooks always run, even when paused:

            • block-non-path-ctx: prevents ./ctx invocations
            • block-dangerous-commands: blocks sudo, force push, etc.
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#workflow","level":2,"title":"Workflow","text":"
            # 1. Session starts: Context loads normally.\n\n# 2. You realize this is a quick task\nctx hook pause\n\n# 3. Work without interruption: hooks are silent\n\n# 4. Session evolves into real work? Resume first\nctx hook resume\n\n# 5. Now wrap up normally\n# /ctx-wrap-up\n
            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#graduated-reminder","level":2,"title":"Graduated Reminder","text":"

            Paused hooks aren't completely invisible. A minimal indicator appears so you always know the state:

            Paused turns What you see 1-5 ctx:paused 6+ ctx:paused (N turns): resume with /ctx-resume

            This prevents the \"forgot I paused\" problem during long sessions.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#tips","level":2,"title":"Tips","text":"
            • Resume before wrapping up. If your quick task turns into real work, resume hooks before running /ctx-wrap-up. The wrap-up ceremony needs active hooks to capture learnings properly.

            • Initial context load is unaffected. The ~8k token startup injection (CLAUDE.md, playbook, constitution) happens before any command runs. Pause only affects hooks that fire during the session.

            • Use for quick investigations. Debugging a stack trace? Checking a git log? Answering a colleague's question? Pause, do the work, close the session. No ceremony needed.

            • Don't use for real work. If you're implementing features, fixing bugs, or making decisions: keep hooks active. The nudges exist to prevent context loss.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-pause/#see-also","level":2,"title":"See Also","text":"

            See also: Session Ceremonies: the bookend rituals that pause lets you skip when they aren't needed.

            See also: Customizing Hook Messages: if you want to change what hooks say rather than silencing them entirely.

            See also: The Complete Session: the full session workflow that pause shortcuts for quick tasks.

            ","path":["Recipes","Sessions","Pausing Context Hooks"],"tags":[]},{"location":"recipes/session-reminders/","level":1,"title":"Session Reminders","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-problem","level":2,"title":"The Problem","text":"

            You're deep in a session and realize: \"I need to refactor the swagger definitions next time.\" You could add a task, but this isn't a work item: it's a note to future-you. You could jot it on the scratchpad, but scratchpad entries don't announce themselves.

            How do you leave a message that your next session opens with?

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tldr","level":2,"title":"TL;DR","text":"
            ctx remind \"refactor the swagger definitions\"\nctx remind list\nctx remind dismiss 1       # or batch: ctx remind dismiss 1 3-5\n

            Reminders surface automatically at session start: VERBATIM, every session, until you dismiss them.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx remind CLI command Add a reminder (default action) ctx remind list CLI command Show all pending reminders ctx remind dismiss CLI command Remove a reminder by ID (or --all) /ctx-remind Skill Natural language interface to reminders","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-1-leave-a-reminder","level":3,"title":"Step 1: Leave a Reminder","text":"

            Tell your agent what to remember, or run it directly:

            You: \"remind me to refactor the swagger definitions\"\n\nAgent: [runs ctx remind \"refactor the swagger definitions\"]\n       \"Reminder set:\n         + [1] refactor the swagger definitions\"\n

            Or from the terminal:

            ctx remind \"refactor the swagger definitions\"\n
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-2-set-a-date-gate-optional","level":3,"title":"Step 2: Set a Date Gate (Optional)","text":"

            If the reminder shouldn't fire until a specific date:

            You: \"remind me to check the deploy logs after Tuesday\"\n\nAgent: [runs ctx remind \"check the deploy logs\" --after 2026-02-25]\n       \"Reminder set:\n         + [2] check the deploy logs  (after 2026-02-25)\"\n

            The reminder stays silent until that date, then fires every session.

            The agent converts natural language dates (\"tomorrow\", \"next week\", \"after the release on Friday\") to YYYY-MM-DD. If it's ambiguous, it asks.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-3-start-a-new-session","level":3,"title":"Step 3: Start a New Session","text":"

            Next session, the reminder appears automatically before anything else:

            ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n

            No action needed: The check-reminders hook fires on UserPromptSubmit and the agent relays the box verbatim.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-4-dismiss-when-done","level":3,"title":"Step 4: Dismiss When Done","text":"

            After you've acted on a reminder (or decided to skip it):

            You: \"dismiss reminder 1\"\n\nAgent: [runs ctx remind dismiss 1]\n       \"Dismissed:\n         - [1] refactor the swagger definitions\"\n\n# Batch dismiss also works:\n# \"dismiss reminders 3, 5 through 7\"\n# → ctx remind dismiss 3 5-7\n

            Or clear everything:

            ctx remind dismiss --all\n
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#step-5-check-whats-pending","level":3,"title":"Step 5: Check What's Pending","text":"
            ctx remind list\n
              [1] refactor the swagger definitions\n  [3] review auth token expiry logic\n  [4] check deploy logs  (after 2026-02-25, not yet due)\n

            Date-gated reminders that haven't reached their date show (not yet due).

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#using-ctx-remind-in-a-session","level":2,"title":"Using /ctx-remind in a Session","text":"

            Invoke the /ctx-remind skill, then describe what you want:

            You: /ctx-remind remind me to update the API docs\nYou: /ctx-remind what reminders do I have?\nYou: /ctx-remind dismiss reminder 3\n
            You say (after /ctx-remind) What the agent does \"remind me to update the API docs\" ctx remind \"update the API docs\" \"remind me next week to check staging\" ctx remind \"check staging\" --after 2026-03-02 \"what reminders do I have?\" ctx remind list \"dismiss reminder 3\" ctx remind dismiss 3 \"dismiss reminders 3, 5 through 7\" ctx remind dismiss 3 5-7 \"clear all reminders\" ctx remind dismiss --all","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#reminders-vs-scratchpad-vs-tasks","level":2,"title":"Reminders vs Scratchpad vs Tasks","text":"You want to... Use Leave a note that announces itself next session ctx remind Jot down a quick value or sensitive token ctx pad Track work with status and completion TASKS.md Record a decision or lesson for all sessions Context files

            Decision guide:

            • If it should announce itself at session start → ctx remind
            • If it's a quiet note you'll check manually → ctx pad
            • If it's a work item you'll mark done → TASKS.md

            Reminders Are Sticky Notes, Not Tasks

            A reminder has no status, no priority, no lifecycle. It's a message to \"future you\" that fires until dismissed.

            If you need tracking, use a task in TASKS.md.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#tips","level":2,"title":"Tips","text":"
            • Reminders fire every session: Unlike nudges (which throttle to once per day), reminders repeat until you dismiss them. This is intentional: You asked to be reminded.
            • Date gating is session-scoped, not clock-scoped: --after 2026-02-25 means \"don't show until sessions on or after Feb 25.\" It does not mean \"alarm at midnight on Feb 25.\"
            • The agent handles date parsing: Say \"next week\" or \"after Friday\": The agent converts it to YYYY-MM-DD. The CLI only accepts the explicit date format.
            • Reminders are committed to git: They travel with the repo. If you switch machines, your reminders follow.
            • IDs never reuse: After dismissing reminder 3, the next reminder gets ID 4 (or higher). No confusion from recycled numbers.
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#next-up","level":2,"title":"Next Up","text":"

            Using the Scratchpad →: For quiet notes and sensitive values that don't need session-start announcements.

            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/session-reminders/#see-also","level":2,"title":"See Also","text":"
            • CLI Reference: ctx remind: full command syntax and flags
            • The Complete Session: how reminders fit into the session lifecycle
            • Managing Tasks: for work items that need status tracking
            ","path":["Recipes","Sessions","Session Reminders"],"tags":[]},{"location":"recipes/state-maintenance/","level":1,"title":"State Directory Maintenance","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-problem","level":2,"title":"The Problem","text":"

            Every session creates tombstone files in .context/state/ - small markers that suppress repeat hook nudges (\"already checked context size\", \"already sent persistence reminder\"). Over days and weeks, these accumulate into hundreds of files from long-dead sessions.

            The files are harmless individually, but the clutter makes it harder to reason about state, and stale global tombstones can suppress nudges across sessions entirely.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tldr","level":2,"title":"TL;DR","text":"
            ctx prune --dry-run     # preview what would be removed\nctx prune               # prune files older than 7 days\nctx prune --days 1      # more aggressive: keep only today\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#commands-used","level":2,"title":"Commands Used","text":"Tool Type Purpose ctx prune Command Remove old per-session state files ctx status Command Quick health overview including state dir","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#understanding-state-files","level":2,"title":"Understanding State Files","text":"

            State files fall into two categories:

            Session-scoped (contain a UUID in the filename): Created per-session to suppress repeat nudges. Safe to prune once the session ends. Examples:

            context-check-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\nheartbeat-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\npersistence-nudge-11e94c1d-1639-4c04-bf77-63dcf1f50ec7\n

            Global (no UUID): Persist across sessions. ctx prune preserves these automatically. Some are legitimate state (events.jsonl, memory-import.json); others may be stale tombstones that need manual review.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#the-workflow","level":2,"title":"The Workflow","text":"","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-1-preview","level":3,"title":"Step 1: Preview","text":"

            Always dry-run first to see what would be removed:

            ctx prune --dry-run\n

            The output shows each file, its age, and a summary:

              would prune: context-check-abc123... (age: 3d)\n  would prune: heartbeat-abc123... (age: 3d)\n\nDry run - would prune 150 files (skip 70 recent, preserve 14 global)\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-2-prune","level":3,"title":"Step 2: Prune","text":"

            Choose an age threshold. The default is 7 days:

            ctx prune               # older than 7 days\nctx prune --days 3      # older than 3 days\nctx prune --days 1      # older than 1 day (aggressive)\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-3-review-global-files","level":3,"title":"Step 3: Review Global Files","text":"

            After pruning, check what prune preserved:

            ls .context/state/ | grep -v '[0-9a-f]\\{8\\}-[0-9a-f]\\{4\\}'\n

            Legitimate global files (keep):

            • events.jsonl - event log
            • memory-import.json - import tracking state

            Stale global tombstones (safe to delete):

            • Files like backup-reminded, ceremony-reminded, version-checked with no session UUID are one-shot markers. If they are from a previous session, they are stale and can be removed manually.
            rm .context/state/backup-reminded .context/state/ceremony-reminded\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#step-4-verify","level":3,"title":"Step 4: Verify","text":"
            ls .context/state/ | wc -l    # should be manageable\n
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#when-to-prune","level":2,"title":"When to Prune","text":"
            • Weekly: ctx prune with default 7-day threshold
            • After heavy parallel work: Multiple concurrent sessions create many tombstones. Prune with --days 1 afterward.
            • When state directory exceeds ~100 files: A sign that pruning hasn't run recently
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#tips","level":2,"title":"Tips","text":"

            Pruning active sessions is safe but noisy: If you prune a file belonging to a still-running session, the corresponding hook will re-fire its nudge on the next prompt. Minor UX annoyance, not data loss.

            No context files are stored in state: The state directory contains only tombstones, counters, and diagnostic data. Nothing in .context/state/ affects your decisions, learnings, tasks, or conventions.

            Test artifacts sneak in: Files like context-check-statstest or heartbeat-unknown are artifacts from development or testing. They lack UUIDs so prune preserves them. Delete manually.

            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/state-maintenance/#see-also","level":2,"title":"See Also","text":"
            • Detecting and Fixing Drift: broader context maintenance including drift detection and archival
            • Troubleshooting: diagnostic workflow using ctx doctor and event logs
            • CLI Reference: system: full flag documentation for ctx prune and related commands
            ","path":["State Directory Maintenance"],"tags":[]},{"location":"recipes/steering/","level":1,"title":"Writing Steering Files","text":"","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#writing-steering-files","level":1,"title":"Writing Steering Files","text":"

            Steering files tell your AI assistant how to behave, not what was decided or how the codebase is written. This recipe walks through writing a steering file from scratch, validating which prompts will trigger it, and syncing it out to your configured AI tools.

            Before You Start

            If you're unsure whether a rule belongs in steering/, DECISIONS.md, or CONVENTIONS.md, read the \"Steering vs decisions vs conventions\" admonition on the ctx steering reference page. The short version: if the rule is \"the AI should always do X when asked about Y,\" that's steering. Otherwise it's probably a decision or convention.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#start-here-customize-the-foundation-files","level":2,"title":"Start Here: Customize the Foundation Files","text":"

            ctx init scaffolds four foundation steering files for you the first time you initialize a project:

            File Purpose .context/steering/product.md Product context, goals, target users .context/steering/tech.md Tech stack, constraints, key dependencies .context/steering/structure.md Directory layout, naming conventions .context/steering/workflow.md Branch strategy, commit rules, pre-commit

            Each file opens with an inline HTML comment that explains the three inclusion modes, what priority means, and the tools scope. The comment is invisible in rendered markdown but visible when you edit the file. Delete it once the file is yours.

            All four default to inclusion: always and priority: 10, so they fire on every AI tool call until you customize them. If you're reading this recipe and haven't touched them yet, open each one now and replace the placeholder bullet list with actual rules for your project. That's the highest-leverage five minutes you can spend in a new ctx setup.

            What to fill in, by file:

            product.md: The elevator pitch plus hard scope:

            • One-sentence product description.
            • Primary users and their top job-to-be-done.
            • Two or three \"this is explicitly out of scope\" items so the AI doesn't wander.

            tech.md: Technology and constraints:

            • Languages and versions (Go 1.22, Node 20, etc.).
            • Frameworks and key libraries.
            • Runtime and deployment target.
            • Hard constraints: \"no CGO\", \"no network at test time\", \"no external DB for unit tests\". These are the things that burn agents when they don't know them.

            structure.md: Layout and naming:

            • Top-level directories and their purpose.
            • Where new files should go (and where they should NOT).
            • Naming conventions for packages, files, types.

            workflow.md: Process rules:

            • Branch strategy (main-only, trunk-based, feature branches).
            • Commit message format, signed-off-by requirement.
            • Pre-commit and pre-push checks.
            • Review expectations.

            After editing, the next AI tool call in Claude Code will pick up the new rules automatically via the plugin's PreToolUse hook, with no sync step and no restart. Other tools (Cursor, Cline, Kiro) need ctx steering sync to export into their native format.

            Prefer a Bare .context/steering/ Directory?

            Re-run ctx init --no-steering-init and delete the scaffolded files. ctx init leaves existing files alone, so the flag is only needed if you want to opt out of the initial scaffold.

            The rest of this recipe walks through creating an additional, scenario-specific steering file beyond the four foundation defaults.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#scenario","level":2,"title":"Scenario","text":"

            You're working on a project with a strict input-validation policy: every new API handler must validate request bodies before touching the database. You want the AI to flag this concern automatically whenever it's asked to write an HTTP handler, without you having to remind it every session.

            Claude Code Users: Pick always, Not auto

            This walkthrough uses inclusion: auto because the scenario is a scoped rule that matches a specific kind of prompt. That works natively on Cursor, Cline, and Kiro (they resolve the description keyword match themselves).

            On Claude Code, auto does not fire through the plugin's PreToolUse hook. The hook passes an empty prompt to ctx agent, so only always files match. Claude can still reach an auto file by calling the ctx_steering_get MCP tool, but that requires Claude to decide to call it; there's no automatic injection.

            If Claude Code is your tool, set inclusion: always in Step 2 instead of auto. The rule will fire on every tool call regardless of topic. You may want to narrow the rule body so the extra tokens per turn aren't wasted on unrelated work.

            See the ctx steering reference \"Prefer inclusion: always for Claude Code\" section for the full trade-off.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-1-scaffold-the-file","level":2,"title":"Step 1: Scaffold the File","text":"
            ctx steering add api-validation\n

            That creates .context/steering/api-validation.md with default frontmatter:

            ---\nname: api-validation\ndescription:\ninclusion: manual\ntools: []\npriority: 50\n---\n

            The defaults are deliberately conservative: inclusion: manual means the file won't be applied until you opt in, which keeps the rules out of the prompt until you've reviewed them.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-2-fill-in-the-rule","level":2,"title":"Step 2: Fill in the Rule","text":"

            Open the file and write the rule body plus a focused description. The description is what inclusion: auto matches against later.

            ---\nname: api-validation\ndescription: HTTP handler input validation and request parsing\ninclusion: auto\ntools: []\npriority: 20\n---\n\n# API request validation\n\nEvery new HTTP handler MUST:\n\n1. Parse request bodies into typed structs, never `map[string]any`.\n2. Validate required fields before any database call.\n3. Return 400 with a machine-readable error for validation failures.\n4. Use `context.Context` from the request for all downstream calls.\n\nPrefer existing validation helpers in `internal/validate/`\nrather than inline checks.\n

            Notes on the choices:

            • inclusion: auto: this rule should fire automatically on HTTP-handler-shaped prompts, not always.
            • priority: 20: lower than the default, so this rule appears near the top of the prompt alongside other high-priority rules.
            • Description is keyword-rich (\"HTTP handler input validation and request parsing\"); the auto matcher scores prompts against these words.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-3-preview-which-prompts-match","level":2,"title":"Step 3: Preview Which Prompts Match","text":"

            Before committing the file, validate your description catches the prompts you care about:

            ctx steering preview \"add an endpoint for updating user email\"\n

            Expected output:

            Steering files matching prompt \"add an endpoint for updating user email\":\n  api-validation       inclusion=auto     priority=20  tools=all\n

            Good, the prompt matches. Try a negative case:

            ctx steering preview \"fix a bug in the JSON renderer\"\n

            Expected: empty match (or whatever else is currently auto). If api-validation incorrectly fires for unrelated prompts, tighten the description. If it misses prompts it should catch, add more keywords.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-4-list-to-confirm-metadata","level":2,"title":"Step 4: List to Confirm Metadata","text":"
            ctx steering list\n

            Should show api-validation alongside any other files, with its inclusion mode and priority. If the list is wrong, check the frontmatter for typos.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-5-get-the-rules-in-front-of-the-ai","level":2,"title":"Step 5: Get the Rules in Front of the AI","text":"

            Steering files are authored once in .context/steering/, but how they reach the AI depends on which tool you use. There are two delivery mechanisms:

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-a-native-rules-tools-cursor-cline-kiro","level":3,"title":"Path A: Native-Rules Tools (Cursor, Cline, Kiro)","text":"

            These tools read a specific directory for rules. ctx steering sync exports your files into that directory with tool-specific frontmatter:

            ctx steering sync\n

            Depending on the active tool in .ctxrc or --tool:

            Tool Target Cursor .cursor/rules/ Cline .clinerules/ Kiro .kiro/steering/

            The sync is idempotent; unchanged files are skipped. Run it whenever you edit a steering file.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#path-b-claude-code-and-codex-hook-mcp","level":3,"title":"Path B: Claude Code and Codex (Hook + MCP)","text":"

            Claude Code and Codex have no native rules primitive, so ctx steering sync is a no-op for them; it deliberately skips both. Instead, steering reaches these tools through two non-sync channels:

            1. PreToolUse hook (automatic). The ctx setup claude-code plugin installs a hook that runs ctx agent --budget 8000 before each tool call. ctx agent loads your steering files, filters them against the active prompt, and includes matching bodies as Tier 6 of the context packet. The packet gets injected into Claude's context automatically.

            2. ctx_steering_get MCP tool (on-demand). Claude can call this MCP tool mid-task to fetch matching steering files for a specific prompt. Automatic activation comes from Claude's judgment, not a hook.

            Both channels activate when you run:

            ctx setup claude-code --write\n

            That installs the plugin, wires the hook, and registers the MCP server. After that, steering files you edit are picked up on the next tool call, with no sync step needed.

            Running ctx steering sync with Claude Code

            It won't error; it will simply report that Claude and Codex aren't sync targets and skip them. If Claude Code is your only tool, you never need to run sync. If you use both Claude Code and (say) Cursor, run sync to keep Cursor up to date; the Claude pipeline takes care of itself via the hook.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#step-6-verify-the-ai-sees-it","level":2,"title":"Step 6: Verify the AI Sees It","text":"

            Open your AI tool and ask it something the rule should fire on:

            \"Add a POST /users endpoint that accepts email and name.\"

            If the rule is working, the AI's first response should mention input validation, typed structs, and the internal/validate/ package, because that's what the steering file told it to do.

            If nothing happens, the fix depends on which path you're on:

            Path A (Cursor/Cline/Kiro):

            1. Re-run ctx steering preview with the literal prompt to confirm the match.
            2. Run ctx steering list and verify inclusion is auto, not manual.
            3. Check the tool's own config directory (e.g. .cursor/rules/); the file should be there after ctx steering sync.

            Path B (Claude Code):

            1. Re-run ctx steering preview with the literal prompt to confirm the match.
            2. Verify the plugin is installed: cat .claude/hooks.json should include ctx agent --budget 8000 under PreToolUse. If not, re-run ctx setup claude-code --write.
            3. Run ctx agent --budget 8000 manually and grep the output for your rule body. If it's there, the data is fine; if it's missing, the inclusion mode or description is at fault.
            4. As a last resort, ask Claude directly: \"Call the ctx_steering_get MCP tool with my prompt and show me the result.\" If the MCP tool returns your rule, Claude has access but isn't pulling it into the initial context packet; tighten the description keywords.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            Too-generic descriptions. description: general coding will match almost every prompt and flood the context window. Keep descriptions specific to the scenario the rule applies to.

            Overlapping rules. If two steering files match the same prompt and contradict each other, the result is confusing. Use priority to resolve, but better: merge the files or narrow the descriptions so they don't overlap.

            Putting decisions in steering. \"We decided to use PostgreSQL\" is a decision, not a rule for the AI to follow on every prompt. Record decisions with ctx add decision, not ctx steering add.

            Committing inclusion: always without thinking. Rules marked always fire on every prompt, consuming tier-6 budget permanently. Only use always for true invariants (security, safety, licensing). Everything else should be auto or manual.

            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/steering/#see-also","level":2,"title":"See Also","text":"
            • ctx steering reference: full command, flag, and frontmatter reference.
            • ctx setup: configure which tools the steering sync writes to.
            • Authoring triggers: if you want script-based automation, not rule-based prompt injection.
            ","path":["Recipes","Agents and Automation","Writing Steering Files"],"tags":[]},{"location":"recipes/system-hooks-audit/","level":1,"title":"Auditing System Hooks","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-problem","level":2,"title":"The Problem","text":"

            ctx runs 14 system hooks behind the scenes: nudging your agent to persist context, warning about resource pressure, gating commits on QA. But these hooks are invisible by design. You never see them fire. You never know if they stopped working.

            How do you verify your hooks are actually running, audit what they do, and get alerted when they go silent?

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tldr","level":2,"title":"TL;DR","text":"
            ctx system check-resources # run a hook manually\nls -la .context/logs/      # check hook execution logs\nctx hook notify setup      # get notified when hooks fire\n

            Or ask your agent: \"Are our hooks running?\"

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx system <hook> CLI command Run a system hook manually ctx sysinfo CLI command Show system resource status ctx usage CLI command Stream or dump per-session token stats ctx hook notify setup CLI command Configure webhook for audit trail ctx hook notify test CLI command Verify webhook delivery .ctxrc notify.events Configuration Subscribe to relay for full hook audit .context/logs/ Log files Local hook execution ledger","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-are-system-hooks","level":2,"title":"What Are System Hooks?","text":"

            System hooks are plumbing commands that ctx registers with your AI tool (Claude Code, Cursor, etc.) via the plugin's hooks.json. They fire automatically at specific events during your AI session:

            Event When Hooks UserPromptSubmit Before the agent sees your prompt 10 check hooks + heartbeat PreToolUse Before the agent uses a tool block-non-path-ctx, qa-reminder PostToolUse After a tool call succeeds post-commit

            You never run these manually. Your AI tool runs them for you: That's the point.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-complete-hook-catalog","level":2,"title":"The Complete Hook Catalog","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#prompt-time-checks-userpromptsubmit","level":3,"title":"Prompt-Time Checks (UserPromptSubmit)","text":"

            These fire before every prompt, but most are throttled to avoid noise.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-context-size-context-capacity-warning","level":4,"title":"check-context-size: Context Capacity Warning","text":"

            What: Adaptive prompt counter. Silent for the first 15 prompts, then nudges with increasing frequency (every 5th, then every 3rd).

            Why: Long sessions lose coherence. The nudge reminds both you and the agent to persist context before the window fills up.

            Output: VERBATIM relay box with prompt count.

            ┌─ Context Checkpoint (prompt #20) ────────────────\n│ This session is getting deep. Consider wrapping up\n│ soon. If there are unsaved learnings, decisions, or\n│ conventions, now is a good time to persist them.\n│ ⏱ Context window: ~45k tokens (~22% of 200k)\n└──────────────────────────────────────────────────\n

            Usage: Every prompt records token usage to .context/state/stats-{session}.jsonl. Monitor live with ctx usage --follow or query with ctx usage --json. Usage is recorded even during wrap-up suppression (event: suppressed).

            Billing guard: When billing_token_warn is set in .ctxrc, a one-shot warning fires if session tokens exceed the threshold. This warning is independent of all other triggers - it fires even during wrap-up suppression.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-persistence-context-staleness-nudge","level":4,"title":"check-persistence: Context Staleness Nudge","text":"

            What: Tracks when .context/*.md files were last modified. If too many prompts pass without a write, nudges the agent to persist.

            Why: Sessions produce insights that evaporate if not recorded. This catches the \"we talked about it but never wrote it down\" failure mode.

            Output: VERBATIM relay after 20+ prompts without a context file change.

            ┌─ Persistence Checkpoint (prompt #20) ───────────\n│ No context files updated in 20+ prompts.\n│ Have you discovered learnings, made decisions,\n│ established conventions, or completed tasks\n│ worth persisting?\n│\n│ Run /ctx-wrap-up to capture session context.\n└──────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-ceremonies-session-ritual-adoption","level":4,"title":"check-ceremonies: Session Ritual Adoption","text":"

            What: Scans your last 3 journal entries for /ctx-remember and /ctx-wrap-up usage. Nudges once per day if missing.

            Why: Session ceremonies are the highest-leverage habit in ctx. This hook bootstraps the habit until it becomes automatic.

            Output: Tailored nudge depending on which ceremony is missing.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-journal-unimported-session-reminder","level":4,"title":"check-journal: Unimported Session Reminder","text":"

            What: Detects unimported Claude Code sessions and unenriched journal entries. Fires once per day.

            Why: Exported sessions become searchable history. Unenriched entries lack metadata for filtering. Both decay in value over time.

            Output: VERBATIM relay with counts and exact commands.

            ┌─ Journal Reminder ─────────────────────────────\n│ You have 3 new session(s) not yet exported.\n│ 5 existing entries need enrichment.\n│\n│ Export and enrich:\n│   ctx journal import --all\n│   /ctx-journal-enrich-all\n└────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-resources-system-resource-pressure","level":4,"title":"check-resources: System Resource Pressure","text":"

            What: Monitors memory, swap, disk, and CPU load. Only fires at DANGER severity (memory >= 90%, swap >= 75%, disk >= 95%, load >= 1.5x CPU count).

            Why: Resource exhaustion mid-session can corrupt work. This provides early warning to persist and exit.

            Output: VERBATIM relay listing critical resources.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-knowledge-knowledge-file-growth","level":4,"title":"check-knowledge: Knowledge File Growth","text":"

            What: Counts entries in LEARNINGS.md, DECISIONS.md, and lines in CONVENTIONS.md. Fires once per day when thresholds are exceeded.

            Why: Large knowledge files dilute agent context. 35 learnings compete for attention; 15 focused ones get applied. Thresholds are configurable in .ctxrc.

            Default thresholds:

            # .ctxrc\nentry_count_learnings: 30\nentry_count_decisions: 20\nconvention_line_count: 200\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-version-binaryplugin-version-drift","level":4,"title":"check-version: Binary/Plugin Version Drift","text":"

            What: Compares the ctx binary version against the plugin version. Fires once per day. Also checks encryption key age for rotation nudge.

            Why: Version drift means hooks reference features the binary doesn't have. The key rotation nudge prevents indefinite key reuse.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-reminders-pending-reminder-relay","level":4,"title":"check-reminders: Pending Reminder Relay","text":"

            What: Reads .context/reminders.json and surfaces any due reminders via VERBATIM relay. No throttle: fires every session until dismissed.

            Why: Reminders are sticky notes to future-you. Unlike nudges (which throttle to once per day), reminders repeat deliberately until the user dismisses them.

            Output: VERBATIM relay box listing due reminders.

            ┌─ Reminders ──────────────────────────────────────\n│  [1] refactor the swagger definitions\n│\n│ Dismiss: ctx remind dismiss <id>\n│ Dismiss all: ctx remind dismiss --all\n└──────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-freshness-technology-constant-staleness","level":4,"title":"check-freshness: Technology Constant Staleness","text":"

            What: Stats files listed in .ctxrc freshness_files and warns if any haven't been modified in over 6 months. Daily throttle. Silent when no files are configured (opt-in via .ctxrc).

            Why: Model capabilities evolve - token budgets, attention limits, and context window sizes that were accurate 6 months ago may no longer reflect best practices. This hook reminds you to review and touch the file to confirm values are still current.

            Config (.ctxrc):

            freshness_files:\n  - path: config/thresholds.yaml\n    desc: Model token limits and batch sizes\n    review_url: https://docs.example.com/limits  # optional\n

            Each entry has a path (relative to project root), desc (what constants live there), and optional review_url (where to check current values). When review_url is set, the nudge includes \"Review against: {url}\". When absent, just \"Touch the file to mark it as reviewed.\"

            Output: VERBATIM relay listing stale files, silent otherwise.

            ┌─ Technology Constants Stale ──────────────────────\n│   config/thresholds.yaml (210 days ago)\n│     - Model token limits and batch sizes\n│   Review against: https://docs.example.com/limits\n│ Touch each file to mark it as reviewed.\n└───────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#check-map-staleness-architecture-map-drift","level":4,"title":"check-map-staleness: Architecture Map Drift","text":"

            What: Checks whether map-tracking.json is older than 30 days and there are commits touching internal/ since the last map refresh. Daily throttle prevents repeated nudges.

            Why: Architecture documentation drifts silently as code evolves. This hook detects structural changes that the map hasn't caught up with and suggests running /ctx-architecture to refresh.

            Output: VERBATIM relay when stale and modules changed, silent otherwise.

            ┌─ Architecture Map Stale ────────────────────────────\n│ ARCHITECTURE.md hasn't been refreshed since 2026-01-15\n│ and there are commits touching 12 modules.\n│ /ctx-architecture keeps architecture docs drift-free.\n│\n│ Want me to run /ctx-architecture to refresh?\n└─────────────────────────────────────────────────────\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#heartbeat-session-heartbeat-webhook","level":4,"title":"heartbeat: Session Heartbeat Webhook","text":"

            What: Fires on every prompt. Sends a webhook notification with prompt count, session ID, context modification status, and token usage telemetry. Never produces stdout.

            Why: Other hooks only send webhooks when they \"speak\" (nudge/relay). When silent, you have no visibility into session activity. The heartbeat provides a continuous session-alive signal with token consumption data for observability dashboards or liveness monitoring.

            Output: None (webhook + event log only).

            Payload:

            {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  }\n}\n

            Token fields (tokens, context_window, usage_pct) are included when usage data is available from the session JSONL file.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tool-time-hooks-pretooluse-posttooluse","level":3,"title":"Tool-Time Hooks (PreToolUse / PostToolUse)","text":"","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#block-non-path-ctx-path-enforcement-hard-gate","level":4,"title":"block-non-path-ctx: PATH Enforcement (Hard Gate)","text":"

            What: Blocks any Bash command that invokes ./ctx, ./dist/ctx, go run ./cmd/ctx, or an absolute path to ctx. Only PATH invocations are allowed.

            Why: Enforces CONSTITUTION.md's invocation invariant. Running a dev-built binary in production context causes version confusion and silent behavior drift.

            Output: Block response (prevents the tool call):

            {\"decision\": \"block\", \"reason\": \"Use 'ctx' from PATH, not './ctx'...\"}\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#qa-reminder-pre-commit-qa-gate","level":4,"title":"qa-reminder: Pre-Commit QA Gate","text":"

            What: Fires on every Edit tool use. Reminds the agent to lint and test the entire project before committing.

            Why: Agents tend to \"I'll test later\" and then commit untested code. Repetition is intentional: the hook reinforces the habit on every edit, not just before commits.

            Output: Agent directive with hard QA gate instructions.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#post-commit-context-capture-after-commit","level":4,"title":"post-commit: Context Capture After Commit","text":"

            What: Fires after any git commit (excludes --amend). Prompts the agent to offer context capture (decision? learning?) and suggest running lints/tests before pushing.

            Why: Commits are natural reflection points. The nudge converts mechanical git operations into context-capturing opportunities.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-the-local-event-log","level":2,"title":"Auditing Hooks via the Local Event Log","text":"

            If you don't need an external audit trail, enable the local event log for a self-contained record of hook activity:

            # .ctxrc\nevent_log: true\n

            Once enabled, every hook that fires writes an entry to .context/state/events.jsonl. Query it with ctx hook event:

            ctx hook event                    # last 50 events\nctx hook event --hook qa-reminder # filter by hook\nctx hook event --session <id>     # filter by session\nctx hook event --json | jq '.'    # raw JSONL for processing\n

            The event log is local, queryable, and doesn't require any external service. For a full diagnostic workflow combining event logs with structural health checks, see Troubleshooting.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#auditing-hooks-via-webhooks","level":2,"title":"Auditing Hooks via Webhooks","text":"

            The most powerful audit setup pipes all hook output to a webhook, giving you a real-time external record of what your agent is being told.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-1-set-up-the-webhook","level":3,"title":"Step 1: Set Up the Webhook","text":"
            ctx hook notify setup\n# Enter your webhook URL (Slack, Discord, ntfy.sh, IFTTT, etc.)\n

            See Webhook Notifications for service-specific setup.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-2-subscribe-to-relay-events","level":3,"title":"Step 2: Subscribe to relay Events","text":"
            # .ctxrc\nnotify:\n  events:\n    - relay   # all hook output: VERBATIM relays, directives, blocks\n    - nudge   # just the user-facing VERBATIM relays\n

            The relay event fires for every hook that produces output. This includes:

            Hook Event sent check-context-size relay + nudge check-persistence relay + nudge check-ceremonies relay + nudge check-journal relay + nudge check-resources relay + nudge check-knowledge relay + nudge check-version relay + nudge check-reminders relay + nudge check-freshness relay + nudge check-map-staleness relay + nudge heartbeat heartbeat only block-non-path-ctx relay only post-commit relay only qa-reminder relay only","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#step-3-cross-reference","level":3,"title":"Step 3: Cross-Reference","text":"

            With relay enabled, your webhook receives a JSON payload every time a hook fires:

            {\n  \"event\": \"relay\",\n  \"message\": \"check-persistence: No context updated in 20+ prompts\",\n  \"session_id\": \"b854bd9c\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"my-project\"\n}\n

            This creates an external audit trail independent of the agent. You can now cross-verify: did the agent actually relay the checkpoint the hook told it to relay?

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#verifying-hooks-actually-fire","level":2,"title":"Verifying Hooks Actually Fire","text":"

            Hooks are invisible. An invisible thing that breaks is indistinguishable from an invisible thing that never existed. Three verification methods, from simplest to most robust:

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-1-ask-the-agent","level":3,"title":"Method 1: Ask the Agent","text":"

            The simplest check. After a few prompts into a session:

            \"Did you receive any hook output this session? Print the last\ncontext checkpoint or persistence nudge you saw.\"\n

            The agent should be able to recall recent hook output from its context window. If it says \"I haven't received any hook output\", either:

            • The hooks aren't firing (check installation);
            • The session is too short (hooks throttle early);
            • The hooks fired but the agent absorbed them silently.

            Limitation: You are trusting the agent to report accurately. Agents sometimes confabulate or miss context. Use this as a quick smoke test, not definitive proof.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-2-check-the-webhook-trail","level":3,"title":"Method 2: Check the Webhook Trail","text":"

            If you have relay events enabled, check your webhook receiver. Every hook that fires sends a timestamped notification. No notification = no fire.

            This is the ground truth. The webhook is called directly by the ctx binary, not by the agent. The agent cannot fake, suppress, or modify webhook deliveries.

            Compare what the webhook received against what the agent claims to have relayed. Discrepancies mean the agent is absorbing nudges instead of surfacing them.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#method-3-read-the-local-logs","level":3,"title":"Method 3: Read the Local Logs","text":"

            Hooks that support logging write to .context/logs/:

            # Check context-size hook activity\ncat .context/logs/check-context-size.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] prompt#1 silent\n# [2026-02-22 09:17:33] [session:b854bd9c] prompt#16 CHECKPOINT\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 CHECKPOINT\n
            # Check persistence nudge activity\ncat .context/logs/check-persistence.log\n\n# Sample output:\n# [2026-02-22 09:15:00] [session:b854bd9c] init count=1 mtime=1770646611\n# [2026-02-22 09:20:01] [session:b854bd9c] prompt#20 NUDGE since_nudge=20\n

            Logs are append-only and written by the ctx binary, not the agent.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#detecting-silent-hook-failures","level":2,"title":"Detecting Silent Hook Failures","text":"

            The hardest failure mode: hooks that stop firing without error. The plugin config changes, a binary update drops a hook, or a PATH issue silently breaks execution. Nothing errors: The hook just never runs.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#the-staleness-signal","level":3,"title":"The Staleness Signal","text":"

            If .context/logs/check-context-size.log has no entries newer than 5 days but you've been running sessions daily, something is wrong. The absence of evidence is evidence of absence: but only if you control for inactivity.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#false-positive-protection","level":3,"title":"False Positive Protection","text":"

            A naive \"hooks haven't fired in N days\" alert fires incorrectly when you simply haven't used ctx. The correct check needs two inputs:

            1. Last hook fire time: from .context/logs/ or webhook history
            2. Last session activity: from journal entries or ctx journal source

            If sessions are happening but hooks aren't firing, that's a real problem. If neither sessions nor hooks are happening, that's a vacation.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#what-to-check","level":3,"title":"What to Check","text":"

            When you suspect hooks aren't firing:

            # 1. Verify the plugin is installed\nls ~/.claude/plugins/\n\n# 2. Check hook registration\ncat ~/.claude/plugins/ctx/hooks.json | head -20\n\n# 3. Run a hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-context-size\n\n# 4. Check for PATH issues\nwhich ctx\nctx --version\n
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#tips","level":2,"title":"Tips","text":"
            • Start with nudge, graduate to relay: The nudge event covers user-facing VERBATIM relays. Add relay when you want full visibility into agent directives and hard gates.
            • Webhooks are your trust anchor: The agent can ignore a nudge, but it can't suppress the webhook. If the webhook fired and the agent didn't relay, you have proof of a compliance gap.
            • Hooks are throttled by design: Most check hooks fire once per day or use adaptive frequency. Don't expect a notification every prompt: Silence usually means the throttle is working, not that the hook is broken.
            • Daily markers live in .context/state/: Throttle files are stored in .context/state/ alongside other project-scoped state. If you need to force a hook to re-fire during testing, delete the corresponding marker file.
            • The QA reminder is intentionally noisy: Unlike other hooks, qa-reminder fires on every Edit call with no throttle. This is deliberate: The commit quality degrades when the reminder fades from salience.
            • Log files are safe to commit: .context/logs/ contains only timestamps, session IDs, and status keywords. No secrets, no code.
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#next-up","level":2,"title":"Next Up","text":"

            Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/system-hooks-audit/#see-also","level":2,"title":"See Also","text":"
            • Troubleshooting: full diagnostic workflow using ctx doctor, event logs, and /ctx-doctor
            • Customizing Hook Messages: override what hooks say without changing what they do
            • Webhook Notifications: setting up and configuring the webhook system
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Detecting and Fixing Drift: structural checks that complement runtime hook auditing
            • CLI Reference: full ctx system command reference
            ","path":["Recipes","Hooks and Notifications","Auditing System Hooks"],"tags":[]},{"location":"recipes/task-management/","level":1,"title":"Tracking Work Across Sessions","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-problem","level":2,"title":"The Problem","text":"

            You have work that spans multiple sessions. Tasks get added during one session, partially finished in another, and completed days later.

            Without a system, follow-up items fall through the cracks, priorities drift, and you lose track of what was done versus what still needs doing. TASKS.md grows cluttered with completed checkboxes that obscure the remaining work.

            How do you manage work items that span multiple sessions without losing context?

            Prefer Skills over Raw Commands

            When working with an AI agent, use /ctx-task-add instead of raw ctx add task. The agent automatically picks up session ID, branch, and commit hash from its context, so no manual flags are needed.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tldr","level":2,"title":"TL;DR","text":"

            Manage Tasks:

            ctx add task \"Fix race condition\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add\nctx add task \"Write tests\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a  # add to phase\nctx task complete \"race condition\"                      # mark done\nctx task snapshot \"before-refactor\"               # backup\nctx task archive                                  # clean up\n

            Pick Up the Next Task:

            /ctx-next # pick what's next\n

            Read on for the full workflow and conversational patterns.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx add task Command Add a new task to TASKS.md ctx task complete Command Mark a task as done by number or text ctx task snapshot Command Create a point-in-time backup of TASKS.md ctx task archive Command Move completed tasks to archive file /ctx-task-add Skill AI-assisted task creation with validation /ctx-archive Skill AI-guided archival with safety checks /ctx-next Skill Pick what to work on based on priorities","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-1-add-tasks-with-priorities","level":3,"title":"Step 1: Add Tasks with Priorities","text":"

            Every piece of follow-up work gets a task. Use ctx add task from the terminal or /ctx-task-add from your AI assistant. Tasks should start with a verb and be specific enough that someone unfamiliar with the session could act on them.

            # High-priority bug found during code review\nctx add task \"Fix race condition in session cooldown\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Medium-priority feature work\nctx add task \"Add --format json flag to ctx status for CI integration\" --priority medium \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Low-priority cleanup\nctx add task \"Remove deprecated --raw flag from ctx load\" --priority low \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n

            The /ctx-task-add skill validates your task before recording it. It checks that the description is actionable, not a duplicate, and specific enough for someone else to pick up.

            If you say \"fix the bug,\" it will ask you to clarify which bug and where.

            Tasks Are Often Created Proactively

            In practice, many tasks are created proactively by the agent rather than by explicit CLI commands.

            After completing a feature, the agent will often identify follow-up work: tests, docs, edge cases, error handling, and offer to add them as tasks.

            You do not need to dictate ctx add task commands; the agent picks up on work context and suggests tasks naturally.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-2-organize-with-phase-sections","level":3,"title":"Step 2: Organize with Phase Sections","text":"

            Tasks live in phase sections inside TASKS.md.

            Phases provide logical groupings that preserve order and enable replay.

            A task does not move between sections. It stays in its phase permanently, and status is tracked via checkboxes and inline tags.

            ## Phase 1: Core CLI\n\n- [x] Implement ctx add command\n- [x] Implement ctx task complete command\n- [ ] Add --section flag to ctx add task `#priority:medium`\n\n## Phase 2: AI Integration\n\n- [ ] Implement ctx agent cooldown `#priority:high` `#in-progress`\n- [ ] Add ctx watch XML parsing `#priority:medium`\n  - Blocked by: Need to finalize agent output format\n\n## Backlog\n\n- [ ] Performance optimization for large TASKS.md files `#priority:low`\n- [ ] Add metrics dashboard to ctx status `#priority:deferred`\n

            Use --section when adding a task to a specific phase:

            ctx add task \"Add ctx watch XML parsing\" --priority medium --section \\\n    \"Phase 2: AI Integration\" \\\n    --session-id abc12345 --branch main --commit 68fbc00a\n

            Without --section, the task is inserted before the first unchecked task in TASKS.md.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-3-pick-what-to-work-on","level":3,"title":"Step 3: Pick What to Work On","text":"

            At the start of a session, or after finishing a task, use /ctx-next to get prioritized recommendations.

            The skill reads TASKS.md, checks recent sessions, and ranks candidates using explicit priority, blocking status, in-progress state, momentum from recent work, and phase order.

            You can also ask naturally: \"what should we work on?\" or \"what's the highest priority right now?\"

            /ctx-next\n

            The output looks like this:

            **1. Implement ctx agent cooldown** `#priority:high`\n\n    Still in-progress from yesterday's session. The tombstone file approach is\n    half-built. Finishing is cheaper than context-switching.\n\n**2. Add --section flag to ctx add task** `#priority:medium`\n\n    Last Phase 1 item. Quick win that unblocks organized task entry.\n\n---\n\n*Based on 8 pending tasks across 3 phases.\n\nLast session: agent-cooldown (2026-02-06).*\n

            In-progress tasks almost always come first:

            Finishing existing work takes priority over starting new work.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-4-complete-tasks","level":3,"title":"Step 4: Complete Tasks","text":"

            When a task is done, mark it complete by number or partial text match:

            # By task number (as shown in TASKS.md)\nctx task complete 3\n\n# By partial text match\nctx task complete \"agent cooldown\"\n

            The task's checkbox changes from [ ] to [x]. Tasks are never deleted: they stay in their phase section so history is preserved.

            Be Conversational

            You rarely need to run ctx task complete yourself during an interactive session.

            When you say something like \"the rate limiter is done\" or \"we finished that,\" the agent marks the task complete and moves on to suggesting what is next.

            The CLI commands are most useful for manual housekeeping, scripted workflows, or when you want precision.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-5-snapshot-before-risky-changes","level":3,"title":"Step 5: Snapshot Before Risky Changes","text":"

            Before a major refactor or any change that might break things, snapshot your current task state. This creates a copy of TASKS.md in .context/archive/ without modifying the original.

            # Default snapshot\nctx task snapshot\n\n# Named snapshot (recommended before big changes)\nctx task snapshot \"before-refactor\"\n

            This creates a file like .context/archive/tasks-before-refactor-2026-02-08-1430.md. If the refactor goes sideways, and you need to confirm what the task state looked like before you started, the snapshot is there.

            Snapshots are cheap: Take them before any change you might want to undo or review later.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#step-6-archive-when-tasksmd-gets-cluttered","level":3,"title":"Step 6: Archive When TASKS.md Gets Cluttered","text":"

            After several sessions, TASKS.md accumulates completed tasks that make it hard to see what is still pending.

            Use ctx task archive to move all [x] items to a timestamped archive file.

            Start with a dry run to preview what will be moved:

            ctx task archive --dry-run\n

            Then archive:

            ctx task archive\n

            Completed tasks move to .context/archive/tasks-2026-02-08.md. Phase headers are preserved in the archive for traceability. Pending tasks ([ ]) remain in TASKS.md.

            The /ctx-archive skill adds two safety checks before archiving: it verifies that completed tasks are genuinely cluttering the view and that nothing was marked [x] prematurely.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#letting-the-agent-drive","level":2,"title":"Letting the Agent Drive","text":"

            The steps above show the CLI commands for task management.

            In practice, most task management happens conversationally:

            An agent that has loaded the context files does not need you to type ctx add task. It tracks work naturally and offers the right operations.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#use-natural-language-instead-of-commands","level":3,"title":"Use Natural Language Instead of Commands","text":"

            These conversational prompts replace explicit commands during interactive sessions:

            Instead of typing... Just say... ctx add task \"Write tests for X\" --session-id ID --branch BR --commit HASH \"We should add tests for this: track that?\" /ctx-next \"What should we work on?\" ctx task complete \"rate limiting\" \"The rate limiter is done, what's next?\" ctx task archive \"TASKS.md is getting long, can you clean it up?\" ctx add task ... --session-id ID --branch BR --commit HASH && ctx add task ... \"Add follow-ups for what we just built.\"

            The agent translates these into the right ctx operations behind the scenes.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-creates-tasks-proactively","level":3,"title":"The Agent Creates Tasks Proactively","text":"

            After completing a piece of work, the agent does not wait for you to dictate follow-up items. It identifies them from the work context and offers:

            **Agent**: \"I've finished implementing the rate limiter middleware. I noticed\nthree follow-up items:\n\n1. Write integration tests for the rate limiter (the unit tests pass but\n   there's no end-to-end coverage)\n2. Add rate limit headers to API responses (X-RateLimit-Remaining, etc.)\n3. Document the rate limit configuration in the ops runbook\n\nWant me to add these as tasks?\"\n

            This is how an agent behaves once it has read the Agent Playbook.

            The playbook's \"Work, Reflect, Persist\" cycle means the agent asks itself after every milestone: Did I just complete a task? Did new work emerge?

            When the answer is yes, it acts.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#asking-the-agent-what-to-capture","level":3,"title":"Asking the Agent What to Capture","text":"

            You don't always know what's worth persisting.

            Instead of reviewing the session yourself and typing specific commands, ask the agent to do the thinking:

            You say What happens \"What tasks came out of this session?\" Agent reviews the work, identifies follow-ups, and offers to add them \"Are there any tasks, learnings, or decisions worth adding?\" Agent scans the full session for all three types and proposes a batch \"What should we remember from today?\" Agent surfaces everything persist-worthy: not just tasks \"Anything we should track before we move on?\" Agent checks for loose ends specific to the work just completed

            This is especially powerful after long or exploratory sessions where multiple threads were touched.

            The agent has the full conversation in context and can spot follow-ups you might miss:

            You:   \"Are there any tasks, learnings, or decisions worth adding\n        from this session?\"\n\nAgent: \"Yes. Here's what I'd suggest:\n\n  Tasks:\n  - Add retry logic to the webhook delivery (we discussed it when\n    the timeout test failed but never tracked it)\n  - Update the API docs to reflect the new rate limit headers\n\n  Learnings:\n  - net/http's default client has no timeout: we burned 15 minutes\n    on that before adding an explicit 30s timeout\n\n  Decisions:\n  - We chose token-bucket over sliding-window for rate limiting\n    because it handles bursts better and the library is simpler\n\n  Want me to add all of these?\"\n\nYou:   \"All of them.\"\n

            The agent fills in structured fields from what it observed during the session. You get the same quality entries as if you typed verbose commands, without needing to remember what happened or how to format it.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#the-agent-completes-tasks-and-moves-on","level":3,"title":"The Agent Completes Tasks and Moves On","text":"

            When you confirm that work is finished, or when the agent can see that it is, it marks the task done and suggests the next step:

            You: \n\"The cooldown logic is working now.\"\n\nAgent: \n\"Marked 'Implement ctx agent cooldown' as done. Your highest-\npriority remaining task is 'Add --section flag to ctx add task'.\n\nIt's the last item in Phase 1 and a quick win. Want to start on that?\"\n

            The agent chains completion into next-task selection.

            One sentence from you triggers two operations.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#end-of-session-capture","level":3,"title":"End-of-Session Capture","text":"

            At the end of a session, the agent reviews what happened and catches loose ends:

            You: \n\"I think we're good for today.\"\n\nAgent: \n\"Before we wrap up: during the middleware work you mentioned\nwanting to add metrics for rate limit hits. That never made it into TASKS.md.\n\nWant me to add it as a task so it's there next session?\"\n

            This is the proactive persistence model at work.

            The agent monitors the conversation for mentioned-but-not-tracked work and surfaces it before context is lost.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#a-conversational-session-example","level":3,"title":"A Conversational Session Example","text":"

            Here is what task management looks like in a real session when the agent is driving:

            You:   What's left to do?\nAgent: You have 5 pending tasks across 2 phases. The highest priority is\n       \"Implement ctx agent cooldown\": it's marked in-progress from\n       yesterday and half-built. Want to pick that up?\n\nYou:   Yeah, let's finish it.\n       [... 20 minutes of implementation work ...]\n\nYou:   That's working. Tests pass.\nAgent: Nice. I'll mark \"Implement ctx agent cooldown\" as done.\n       I noticed two things from the implementation:\n       1. The tombstone file cleanup should happen on a timer, not just\n          at startup. Want me to add that as a task?\n       2. We learned that file locks don't work reliably on NFS. I'll\n          add that to LEARNINGS.md.\n\nYou:   Yes to both. What's next?\nAgent: Added the cleanup task as high priority. Next up is \"Add --section\n       flag to ctx add task\": last item in Phase 1. It should be\n       straightforward since the flag parsing pattern already exists\n       in the codebase. Ready?\n

            It's All Conversational

            Notice what did not happen: The user never typed a ctx command.

            The agent handled task completion, follow-up creation, learning capture, and next-task selection: all from natural conversation.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"
            # Add a task\nctx add task \"Implement rate limiting for API endpoints\" --priority high \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# Add to a specific phase\nctx add task \"Write integration tests for rate limiter\" --section \"Phase 2\" \\\n  --session-id abc12345 --branch main --commit 68fbc00a\n\n# See what to work on\n# (from AI assistant) /ctx-next\n\n# Mark done by text\nctx task complete \"rate limiting\"\n\n# Mark done by number\nctx task complete 5\n\n# Snapshot before a risky refactor\nctx task snapshot \"before-middleware-rewrite\"\n\n# Archive completed tasks when the list gets long\nctx task archive --dry-run     # preview first\nctx task archive               # then archive\n
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#tips","level":2,"title":"Tips","text":"
            • Start tasks with a verb: \"Add,\" \"Fix,\" \"Implement,\" \"Investigate\": not just a topic like \"Authentication.\"
            • Include the why in the task description. Future sessions lack the context of why you added the task. \"Add rate limiting\" is worse than \"Add rate limiting to prevent abuse on the public API after the load test showed 10x traffic spikes.\"
            • Use #in-progress sparingly. Only one or two tasks should carry this tag at a time. If everything is in-progress, nothing is.
            • Snapshot before, not after. The point of a snapshot is to capture the state before a change, not to celebrate what you just finished.
            • Archive regularly. Once completed tasks outnumber pending ones, it is time to archive. A clean TASKS.md helps both you and your AI assistant focus.
            • Never delete tasks. Mark them [x] (completed) or [-] (skipped with a reason). Deletion breaks the audit trail.
            • Trust the agent's task instincts. When the agent suggests follow-up items after completing work, it is drawing on the full context of what just happened.
            • Conversational prompts beat commands in interactive sessions. Saying \"what should we work on?\" is faster and more natural than running /ctx-next. Save explicit commands for scripts, CI, and unattended runs.
            • Let the agent chain operations. A single statement like \"that's done, what's next?\" can trigger completion, follow-up identification, and next-task selection in one flow.
            • Review proactive task suggestions before moving on. The best follow-ups come from items spotted in-context right after the work completes.
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#next-up","level":2,"title":"Next Up","text":"

            Using the Scratchpad →: Store short-lived sensitive notes in an encrypted scratchpad.

            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/task-management/#see-also","level":2,"title":"See Also","text":"
            • The Complete Session: full session lifecycle including task management in context
            • Persisting Decisions, Learnings, and Conventions: capturing the \"why\" behind your work
            • Detecting and Fixing Drift: keeping TASKS.md accurate over time
            • CLI Reference: full documentation for ctx add, ctx task complete, ctx task
            • Context Files: TASKS.md: format and conventions for TASKS.md
            ","path":["Recipes","Knowledge and Tasks","Tracking Work Across Sessions"],"tags":[]},{"location":"recipes/triggers/","level":1,"title":"Authoring Lifecycle Triggers","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#authoring-lifecycle-triggers","level":1,"title":"Authoring Lifecycle Triggers","text":"

            Triggers are executable shell scripts that fire at specific events during an AI session. They're how you express \"when the AI saves a file, also do X\" or \"before the AI edits this path, check Y first.\" This recipe walks through writing your first trigger, testing it, and enabling it safely.

            Triggers Execute Arbitrary Code

            A trigger is a shell script with the executable bit set. It runs with the same privileges as your AI tool and receives JSON input on stdin. Treat triggers like pre-commit hooks:

            • Only enable scripts you have read and understand.
            • Never enable a trigger you downloaded from the internet without reviewing every line.
            • Avoid shelling out to user-controlled values (jq -r output, path field, tool field) without quoting.
            • A malicious or buggy trigger can block tool calls, corrupt context files, or exfiltrate data.

            The generated trigger template starts disabled (no executable bit) so you cannot accidentally run an unreviewed script. Enable it explicitly with ctx trigger enable.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#scenario","level":2,"title":"Scenario","text":"

            You want a pre-tool-use trigger that blocks the AI from editing anything in internal/crypto/ without explicit confirmation. Cryptographic code is sensitive, and accidental edits have caused outages before, and you want a hard gate.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-1-scaffold-the-script","level":2,"title":"Step 1: Scaffold the Script","text":"
            ctx trigger add pre-tool-use protect-crypto\n

            That creates .context/hooks/pre-tool-use/protect-crypto.sh with a template:

            #!/usr/bin/env bash\nset -euo pipefail\n\n# Read the JSON event from stdin.\npayload=$(cat)\n\n# Parse fields with jq.\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Your logic here.\n\n# Return a JSON result. action can be \"allow\", \"block\", or absent.\necho '{\"action\": \"allow\"}'\n

            Note: the directory is .context/hooks/pre-tool-use/; the on-disk layout still uses hooks/ even though the command is ctx trigger. If you ls .context/hooks/, that's where your triggers live.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-2-write-the-logic","level":2,"title":"Step 2: Write the Logic","text":"

            Open the file and replace the template body:

            #!/usr/bin/env bash\nset -euo pipefail\n\npayload=$(cat)\ntool=$(echo \"$payload\" | jq -r '.tool // empty')\npath=$(echo \"$payload\" | jq -r '.path // empty')\n\n# Only gate write-family tools.\ncase \"$tool\" in\n  write_file|edit_file|apply_patch) ;;\n  *)\n    echo '{\"action\": \"allow\"}'\n    exit 0\n    ;;\nesac\n\n# Block any path under internal/crypto/.\ncase \"$path\" in\n  internal/crypto/*|*/internal/crypto/*)\n    jq -n --arg p \"$path\" '{\n      action: \"block\",\n      message: (\"Edits to \" + $p + \" require manual review. \" +\n                \"See CONVENTIONS.md for the crypto-change process.\")\n    }'\n    exit 0\n    ;;\nesac\n\necho '{\"action\": \"allow\"}'\n

            A few things to note:

            • set -euo pipefail: any unhandled error aborts the script. Critical for a security-relevant trigger.
            • Quote everything from jq: the path field comes from the AI tool; treat it as untrusted input.
            • Explicit allow case: the default is allow. An empty or missing response is a risky default.
            • Use jq -n --arg for output construction, as it is safer than string concatenation when the message may contain special characters.
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-3-test-with-a-mock-payload","level":2,"title":"Step 3: Test with a Mock Payload","text":"

            Before enabling the trigger, test it with a realistic mock input using ctx trigger test. This runs the script against a synthetic JSON payload without actually firing any AI tool.

            # Test the \"should block\" case\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\n

            Expected: the trigger returns {\"action\":\"block\", \"message\": \"...\"}.

            # Test the \"should allow\" case\nctx trigger test pre-tool-use --tool write_file --path internal/memory/mirror.go\n

            Expected: the trigger returns {\"action\":\"allow\"}.

            # Test that non-write tools pass through\nctx trigger test pre-tool-use --tool read_file --path internal/crypto/aes.go\n

            Expected: {\"action\":\"allow\"} because the case statement only gates write-family tools.

            If any of these cases misbehave, fix the trigger before enabling it. The trigger is disabled at this point, so misbehavior doesn't affect real AI sessions.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-4-enable-it","level":2,"title":"Step 4: Enable It","text":"

            Once the test cases pass, enable the trigger:

            ctx trigger enable protect-crypto\n

            That sets the executable bit. Next time the AI starts a pre-tool-use event, the trigger will fire.

            Verify it's enabled:

            ctx trigger list\n

            Should show protect-crypto under pre-tool-use with an enabled indicator.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#step-5-iterate-safely","level":2,"title":"Step 5: Iterate Safely","text":"

            If you discover a bug after enabling, disable first, fix second:

            ctx trigger disable protect-crypto\n# ...edit the script...\nctx trigger test pre-tool-use --tool write_file --path internal/crypto/aes.go\nctx trigger enable protect-crypto\n

            Disabling simply clears the executable bit; the script stays on disk, and ctx trigger enable re-enables it without rewriting anything.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#patterns-worth-copying","level":2,"title":"Patterns Worth Copying","text":"","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#logging-not-blocking","level":3,"title":"Logging, Not Blocking","text":"

            For auditing or analytics, return {\"action\":\"allow\"} always and append to a log as a side effect:

            #!/usr/bin/env bash\nset -euo pipefail\npayload=$(cat)\necho \"$payload\" >> .context/logs/tool-use.jsonl\necho '{\"action\":\"allow\"}'\n
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#context-injection-at-session-start","level":3,"title":"Context Injection at Session Start","text":"

            A session-start trigger can prepend text to the agent's initial prompt by emitting {\"action\":\"inject\", \"content\": \"...\"} . This is useful for injecting daily standup notes, open PRs, or rotating TODOs without storing them in a steering file.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#chaining-triggers-of-the-same-type","level":3,"title":"Chaining Triggers of the Same Type","text":"

            Multiple scripts in the same type directory all run. If any returns action: block, the block wins. Keep individual triggers single-purpose and rely on composition.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            Forgetting the shebang. Without #!/usr/bin/env bash, the trigger won't execute even with the executable bit set.

            Not quoting $path. If you use $path in a command substitution or a case glob without quoting, a file name with spaces or metacharacters will break the trigger in surprising ways.

            Enabling before testing. ctx trigger enable makes the script live immediately. Always ctx trigger test first.

            Outputting non-JSON. The trigger's stdout must be valid JSON or ctx's trigger runner will log a parse error. Use jq -n to construct output rather than hand-writing JSON strings.

            Mixing hook and trigger vocabulary. The command is ctx trigger but the on-disk directory is .context/hooks/. The feature was renamed; the directory name lags behind. Don't let this confuse you; they refer to the same thing.

            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/triggers/#see-also","level":2,"title":"See Also","text":"
            • ctx trigger reference: full command, flag, and event-type reference.
            • ctx steering: persistent rules, not scripts. Use steering when the thing you want is \"tell the AI to always do X\" rather than \"run a script when Y happens.\"
            • Writing steering files: the rule-based equivalent of this recipe.
            ","path":["Recipes","Agents and Automation","Authoring Lifecycle Triggers"],"tags":[]},{"location":"recipes/troubleshooting/","level":1,"title":"Troubleshooting","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-problem","level":2,"title":"The Problem","text":"

            Something isn't working: a hook isn't firing, nudges are too noisy, context seems stale, or the agent isn't following instructions. The information to diagnose it exists (across status, drift, event logs, hook config, and session history), but assembling it manually is tedious.

            How do you figure out what's wrong and fix it?

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tldr","level":2,"title":"TL;DR","text":"
            ctx doctor                   # structural health check\nctx hook event --last 20  # recent hook activity\n# or ask: \"something seems off, can you diagnose?\"\n
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx doctor CLI command Structural health report ctx doctor --json CLI command Machine-readable health report ctx hook event CLI command Query local event log /ctx-doctor Skill Agent-driven diagnosis with analysis","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#quick-check-ctx-doctor","level":3,"title":"Quick Check: ctx doctor","text":"

            Run ctx doctor for an instant structural health report. It checks context initialization, required files, drift, hook configuration, event logging, webhooks, reminders, task completion ratio, and context token size: all in one pass:

            ctx doctor\n
            ctx doctor\n==========\n\nStructure\n  ✓ Context initialized (.context/)\n  ✓ Required files present (4/4)\n\nQuality\n  ⚠ Drift: 2 warnings (stale path in ARCHITECTURE.md, high entry count in LEARNINGS.md)\n\nHooks\n  ✓ hooks.json valid (14 hooks registered)\n  ○ Event logging disabled (enable with event_log: true in .ctxrc)\n\nState\n  ✓ No pending reminders\n  ⚠ Task completion ratio high (18/22 = 82%): consider archiving\n\nSize\n  ✓ Context size: ~4200 tokens (budget: 8000)\n\nSummary: 2 warnings, 0 errors\n

            Warnings are non-critical but worth fixing. Errors need attention. Informational notes (○) flag optional features that aren't enabled.

            For scripting:

            ctx doctor --json | jq '.warnings'\n
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#deep-dive-ctx-doctor","level":3,"title":"Deep Dive: /ctx-doctor","text":"

            When you need the agent to reason about what's wrong, use the skill. Ask naturally or invoke directly:

            Why didn't my hook fire?\nSomething seems off, can you diagnose?\n/ctx-doctor\n

            The agent follows a triage sequence:

            1. Baseline: runs ctx doctor --json for structural health
            2. Events: runs ctx hook event --json --last 100 (if event logging enabled)
            3. Correlate: connects findings across both sources
            4. Present: structured findings with evidence
            5. Suggest: actionable next steps (but doesn't auto-fix)

            The skill degrades gracefully: without event logging enabled, it still runs structural checks and notes what you'd gain by enabling it.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#raw-event-inspection","level":3,"title":"Raw Event Inspection","text":"

            For power users: ctx hook event with filters gives direct access to the event log.

            # Last 50 events (default)\nctx hook event\n\n# Events from a specific session\nctx hook event --session eb1dc9cd-0163-4853-89d0-785fbfaae3a6\n\n# Only QA reminder events\nctx hook event --hook qa-reminder\n\n# Raw JSONL for jq processing\nctx hook event --json | jq '.message'\n\n# Include rotated (older) events\nctx hook event --all --last 100\n

            Filters use AND logic: --hook qa-reminder --session abc123 returns only QA reminder events from that specific session.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#common-problems","level":2,"title":"Common Problems","text":"","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#no-context-directory-specified-for-this-project","level":3,"title":"\"No context directory specified for this project\"","text":"

            Symptoms: Any ctx command fails with Error: no context directory specified for this project (possibly with a likely-candidate hint or a candidate list depending on what's visible from your CWD).

            Cause: ctx does not walk the filesystem. It requires the target .context/ directory to be declared explicitly before any non-exempt command runs.

            Fix: bind CTX_DIR for the current shell:

            eval \"$(ctx activate)\"\n

            See Activating a Context Directory for the full recipe (one-shot CTX_DIR=... inline form, CI patterns, direnv setup).

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#ctx-not-initialized","level":3,"title":"\"ctx: Not Initialized\"","text":"

            Symptoms: After declaring CTX_DIR, the command fails with ctx: not initialized - run \"ctx init\" first.

            Cause: The declared directory exists but hasn't been initialized with template files.

            Fix:

            ctx init          # create .context/ with template files\nctx init --minimal  # or just the essentials (CONSTITUTION, TASKS, DECISIONS)\n

            Commands that work without CTX_DIR or initialization: ctx init, ctx activate, ctx deactivate, ctx setup, ctx doctor, ctx guide, ctx why, ctx config switch/status, ctx hub *, and help-only grouping commands.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#my-cli-and-my-claude-code-session-disagree-on-the-project","level":3,"title":"\"My CLI and My Claude Code Session Disagree on the Project\"","text":"

            Symptoms: A !-pragma or interactive ctx call writes to the wrong .context/; or you ran ctx remind add in shell A and the reminder shows up in project B's notifications.

            Cause: CTX_DIR is sourced from three different surfaces, and they can drift apart:

            Surface Source of CTX_DIR Bound when Claude Code hooks ${CLAUDE_PROJECT_DIR}/.context (injected) Every hook line; the project Claude is in !-pragma in chat / interactive shell Whatever the parent shell exported When you ran eval \"$(ctx activate)\" New shell tab opened mid-session Whatever your shellrc exports Login

            When these drift, the per-prompt check-anchor-drift hook fires a verbatim warning naming both values. To fix: re-run eval \"$(ctx activate)\" from inside the project the Claude Code session is editing, or close the shell tab and reopen it from the right working directory.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#my-hook-isnt-firing","level":3,"title":"\"My Hook Isn't Firing\"","text":"

            Symptoms: No nudges appearing, webhook silent, event log shows no entries for the expected hook.

            Diagnosis:

            # 1. Check if ctx is installed and on PATH\nwhich ctx && ctx --version\n\n# 2. Check if the hook is registered\ngrep \"check-persistence\" ~/.claude/plugins/ctx/hooks.json\n\n# 3. Run the hook manually to see if it errors\necho '{\"session_id\":\"test\"}' | ctx system check-persistence\n\n# 4. Check event log for the hook (if enabled)\nctx hook event --hook check-persistence\n

            Common causes:

            • Plugin is not installed: run ctx init --claude to reinstall
            • PATH issue: the hook invokes ctx from PATH; ensure it resolves
            • Throttle active: most hooks fire once per day: check .context/state/ for daily marker files
            • Hook silenced: a custom message override may be an empty file: check ctx hook message list for overrides
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#too-many-nudges","level":3,"title":"\"Too Many Nudges\"","text":"

            Symptoms: The agent is overwhelmed with hook output. Context checkpoints, persistence reminders, and QA gates fire constantly.

            Diagnosis:

            # Check how often hooks fired recently\nctx hook event --last 50\n\n# Count fires per hook\nctx hook event --json | jq -r '.detail.hook // \"unknown\"' \\\n  | sort | uniq -c | sort -rn\n

            Common causes:

            • QA reminder is noisy by design: it fires on every Edit call with no throttle. This is intentional. If it's too much, silence it with an empty override: ctx hook message edit qa-reminder gate, then empty the file
            • Long session: context checkpoint fires with increasing frequency after prompt 15. This is the system telling you the session is getting long: consider wrapping up
            • Short throttle window: if you deleted marker files in .context/state/, daily-throttled hooks will re-fire
            • Outdated Claude Code plugin: Update the plugin using Claude Code → /plugin → \"Marketplace\"
            • ctx version mismatch: Build (or download) and install the latest ctx vesion.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#context-seems-stale","level":3,"title":"\"Context Seems Stale\"","text":"

            Symptoms: The agent references outdated information, paths that don't exist, or decisions that were reversed.

            Diagnosis:

            # Structural drift check\nctx drift\n\n# Full doctor check (includes drift + more)\nctx doctor\n\n# Check when context files were last modified\nctx status --verbose\n

            Common causes:

            • Drift accumulated: stale path references in ARCHITECTURE.md or CONVENTIONS.md. Fix with ctx drift --fix or ask the agent to clean up.
            • Task backlog: too many completed tasks diluting active context. Archive with ctx task archive or ctx compact --archive.
            • Large context files: LEARNINGS.md with 40+ entries competes for attention. Consolidate with /ctx-consolidate.
            • Missing session ceremonies: if /ctx-remember and /ctx-wrap-up aren't being used, context doesn't get refreshed. See Session Ceremonies.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#the-agent-isnt-following-instructions","level":3,"title":"\"The Agent Isn't Following Instructions\"","text":"

            Symptoms: The agent ignores conventions, forgets decisions, or acts contrary to CONSTITUTION.md rules.

            Diagnosis:

            # Check context token size: Is it too large for the model?\nctx doctor --json | jq '.results[] | select(.name == \"context_size\")'\n\n# Check if context is actually being loaded\nctx hook event --hook context-load-gate\n

            Common causes:

            • Context too large: if total tokens exceed the model's effective attention, instructions get diluted. Check ctx doctor for the size check. Compact with ctx compact --archive.
            • Context not loading: if context-load-gate hasn't fired, the agent may not have received context. Verify the hook is registered.
            • Conflicting instructions: CONVENTIONS.md says one thing, AGENT_PLAYBOOK.md says another. Review both files for consistency.
            • Agent drift: the agent's behavior diverges from instructions over long sessions. This is normal. Use /ctx-reflect to re-anchor, or start a new session.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#prerequisites","level":2,"title":"Prerequisites","text":"
            • Event logging (optional but recommended): event_log: true in .ctxrc
            • ctx initialized: ctx init

            Event logging is not required for ctx doctor or /ctx-doctor to work. Both degrade gracefully: structural checks run regardless, and the skill notes when event data is unavailable.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#tips","level":2,"title":"Tips","text":"
            • Start with ctx doctor: It's the fastest way to get a comprehensive health picture. Save event log inspection for when you need to understand when and how often something happened.
            • Enable event logging early: The log is opt-in and low-cost (~250 bytes per event, 1MB rotation cap). Enable it before you need it: Diagnosing a problem without historical data is much harder.
            • Use the skill for correlation: ctx doctor tells you what is wrong. /ctx-doctor tells you why by correlating structural findings with event patterns. The agent can spot connections that individual commands miss.
            • Event log is gitignored: It's machine-local diagnostic data, not project context. Different machines produce different event streams.
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#next-up","level":2,"title":"Next Up","text":"

            Detecting and Fixing Drift →: Keep context files accurate as your codebase evolves.

            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/troubleshooting/#see-also","level":2,"title":"See Also","text":"
            • Auditing System Hooks: the complete hook catalog and webhook-based audit trails
            • Detecting and Fixing Drift: structural and semantic drift detection and repair
            • Webhook Notifications: push notifications for hook activity
            • ctx doctor CLI: full command reference
            • ctx hook event CLI: event log query reference
            • /ctx-doctor skill: agent-driven diagnosis
            ","path":["Recipes","Maintenance","Troubleshooting"],"tags":[]},{"location":"recipes/webhook-notifications/","level":1,"title":"Webhook Notifications","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-problem","level":2,"title":"The Problem","text":"

            Your agent runs autonomously (loops, implements, releases) while you are away from the terminal. You have no way to know when it finishes, hits a limit, or when a hook fires a nudge.

            How do you get notified about agent activity without watching the terminal?

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tldr","level":2,"title":"TL;DR","text":"
            ctx hook notify setup  # configure webhook URL (encrypted)\nctx hook notify test   # verify delivery\n# Hooks auto-notify on: session-end, loop-iteration, resource-danger\n
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#commands-and-skills-used","level":2,"title":"Commands and Skills Used","text":"Tool Type Purpose ctx hook notify setup CLI command Configure and encrypt webhook URL ctx hook notify test CLI command Send a test notification ctx hook notify --event <name> \"msg\" CLI command Send a notification from scripts/skills .ctxrc notify.events Configuration Filter which events reach your webhook","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-1-get-a-webhook-url","level":3,"title":"Step 1: Get a Webhook URL","text":"

            Any service that accepts HTTP POST with JSON works. Common options:

            Service How to get a URL IFTTT Create an applet with the \"Webhooks\" trigger Slack Create an Incoming Webhook Discord Channel Settings > Integrations > Webhooks ntfy.sh Use https://ntfy.sh/your-topic (no signup) Pushover Use API endpoint with your user key

            The URL contains auth tokens. ctx encrypts it; it never appears in plaintext in your repo.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-2-configure-the-webhook","level":3,"title":"Step 2: Configure the Webhook","text":"
            ctx hook notify setup\n# Enter webhook URL: https://maker.ifttt.com/trigger/ctx/json/with/key/YOUR_KEY\n# Webhook configured: https://maker.ifttt.com/***\n# Encrypted at: .context/.notify.enc\n

            This encrypts the URL with AES-256-GCM using the same key as the scratchpad (~/.ctx/.ctx.key). The encrypted file (.context/.notify.enc) is safe to commit. The key lives outside the project and is never committed.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-3-test-it","level":3,"title":"Step 3: Test It","text":"
            ctx hook notify test\n# Webhook responded: HTTP 200 OK\n

            If you see No webhook configured, run ctx hook notify setup first.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-4-configure-events","level":3,"title":"Step 4: Configure Events","text":"

            Notifications are opt-in: no events are sent unless you configure an event list in .ctxrc:

            # .ctxrc\nnotify:\n  events:\n    - loop       # loop completion or max-iteration hit\n    - nudge      # VERBATIM relay hooks (context checkpoint, persistence, etc.)\n    - relay      # all hook output (verbose, for debugging)\n    - heartbeat  # every-prompt session-alive signal with metadata\n

            Only listed events fire. Omitting an event silently drops it.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#step-5-use-in-your-own-skills","level":3,"title":"Step 5: Use in Your Own Skills","text":"

            Add ctx hook notify calls to any skill or script:

            # In a release skill\nctx hook notify --event release \"v1.2.0 released successfully\" 2>/dev/null || true\n\n# In a backup script\nctx hook notify --event backup \"Nightly backup completed\" 2>/dev/null || true\n

            The 2>/dev/null || true suffix ensures the notification never breaks your script: If there's no webhook or the HTTP call fails, it's a silent noop.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-types","level":2,"title":"Event Types","text":"

            ctx fires these events automatically:

            Event Source When loop Loop script Loop completes or hits max iterations nudge System hooks VERBATIM relay nudge is emitted (context checkpoint, persistence, ceremonies, journal, resources, knowledge, version) relay System hooks Any hook output (VERBATIM relays, agent directives, block responses) heartbeat System hook Every prompt: session-alive signal with prompt count and context modification status test ctx hook notify test Manual test notification (custom) Your skills You wire ctx hook notify --event <name> in your own scripts

            nudge vs relay: The nudge event fires only for VERBATIM relay hooks (the ones the agent is instructed to show verbatim). The relay event fires for all hook output: VERBATIM relays, agent directives, and hard gates. Subscribe to relay for debugging (\"did the agent get the post-commit nudge?\"), nudge for user-facing assurance (\"was the checkpoint emitted?\").

            Webhooks as a Hook Audit Trail

            Subscribe to relay events and you get an external record of every hook that fires, independent of the agent.

            This lets you verify hooks are running and catch cases where the agent absorbs a nudge instead of surfacing it.

            See Auditing System Hooks for the full workflow.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#payload-format","level":2,"title":"Payload Format","text":"

            Every notification sends a JSON POST:

            {\n  \"event\": \"nudge\",\n  \"message\": \"check-context-size: Context window at 82%\",\n  \"detail\": {\n    \"hook\": \"check-context-size\",\n    \"variant\": \"window\",\n    \"variables\": {\"Percentage\": 82, \"TokenCount\": \"164k\"}\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-22T14:30:00Z\",\n  \"project\": \"ctx\"\n}\n

            The detail field is a structured template reference containing the hook name, variant, and any template variables. This lets receivers filter by hook or variant without parsing rendered text. The field is omitted when no template reference applies (e.g. custom ctx hook notify calls).

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#heartbeat-payload","level":3,"title":"Heartbeat Payload","text":"

            The heartbeat event fires on every prompt with session metadata and token usage telemetry:

            {\n  \"event\": \"heartbeat\",\n  \"message\": \"heartbeat: prompt #7 (context_modified=false tokens=158k pct=79%)\",\n  \"detail\": {\n    \"hook\": \"heartbeat\",\n    \"variant\": \"pulse\",\n    \"variables\": {\n      \"prompt_count\": 7,\n      \"session_id\": \"abc123-...\",\n      \"context_modified\": false,\n      \"tokens\": 158000,\n      \"context_window\": 200000,\n      \"usage_pct\": 79\n    }\n  },\n  \"session_id\": \"abc123-...\",\n  \"timestamp\": \"2026-02-28T10:15:00Z\",\n  \"project\": \"ctx\"\n}\n

            The tokens, context_window, and usage_pct fields are included when token data is available from the session JSONL file. They are omitted when no usage data has been recorded yet (e.g. first prompt).

            Unlike other events, heartbeat fires every prompt (not throttled). Use it for observability dashboards or liveness monitoring of long-running sessions.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#security-model","level":2,"title":"Security Model","text":"Component Location Committed? Permissions Encryption key ~/.ctx/.ctx.key No (user-level) 0600 Encrypted URL .context/.notify.enc Yes (safe) 0600 Webhook URL Never on disk in plaintext N/A N/A

            The key is shared with the scratchpad. If you rotate the encryption key, re-run ctx hook notify setup to re-encrypt the webhook URL with the new key.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#key-rotation","level":2,"title":"Key Rotation","text":"

            ctx checks the age of the encryption key once per day. If it's older than 90 days (configurable via key_rotation_days), a VERBATIM nudge is emitted suggesting rotation.

            # .ctxrc\nkey_rotation_days: 30   # nudge sooner (default: 90)\n
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#worktrees","level":2,"title":"Worktrees","text":"

            The webhook URL is encrypted with the same encryption key (~/.ctx/.ctx.key). Because the key lives at the user level, it is shared across all worktrees on the same machine - notifications work in worktrees automatically.

            This means agents running in worktrees cannot send webhook alerts. For autonomous runs where worktree agents are opaque, monitor them from the terminal rather than relying on webhooks. Enrich journals and review results on the main branch after merging.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#event-log-the-local-complement","level":2,"title":"Event Log: The Local Complement","text":"

            Don't need a webhook but want diagnostic visibility? Enable event_log: true in .ctxrc. The event log writes the same payload as webhooks to a local JSONL file (.context/state/events.jsonl) that you can query without any external service:

            ctx hook event --last 20          # recent hook activity\nctx hook event --hook qa-reminder # filter by hook\n

            Webhooks and event logging are independent: you can use either, both, or neither. Webhooks give you push notifications and an external audit trail. The event log gives you local queryability and ctx doctor integration.

            See Troubleshooting for how they work together.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#tips","level":2,"title":"Tips","text":"
            • Fire-and-forget: Notifications never block. HTTP errors are silently ignored. No retry, no response parsing.
            • No webhook = no cost: When no webhook is configured, ctx hook notify exits immediately. System hooks that call notify.Send() add zero overhead.
            • Multiple projects: Each project has its own .notify.enc. You can point different projects at different webhooks.
            • Event filter is per-project: Configure notify.events in each project's .ctxrc independently.
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#next-up","level":2,"title":"Next Up","text":"

            Auditing System Hooks →: Verify your hooks are running, audit what they do, and get alerted when they go silent.

            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/webhook-notifications/#see-also","level":2,"title":"See Also","text":"
            • CLI Reference: ctx hook notify: full command reference
            • Configuration: .ctxrc settings including notify options
            • Running an Unattended AI Agent: how loops work and how notifications fit in
            • Hook Output Patterns: understanding VERBATIM relays, agent directives, and hard gates
            • Auditing System Hooks: using webhooks as an external audit trail for hook execution
            ","path":["Recipes","Hooks and Notifications","Webhook Notifications"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/","level":1,"title":"When to Use a Team of Agents","text":"","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-problem","level":2,"title":"The Problem","text":"

            You have a task, and you are wondering: \"should I throw more agents at it?\"

            More agents can mean faster results, but they also mean coordination overhead, merge conflicts, divergent mental models, and wasted tokens re-reading context.

            The wrong setup costs more than it saves.

            This recipe is a decision framework: It helps you choose between a single agent, parallel worktrees, and a full agent team, and explains what ctx provides at each level.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tldr","level":2,"title":"TL;DR","text":"
            • Single agent for most work;
            • Parallel worktrees when tasks touch disjoint file sets;
            • Agent teams only when tasks need real-time coordination. When in doubt, start with one agent.
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-spectrum","level":2,"title":"The Spectrum","text":"

            There are three modes, ordered by complexity:

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#1-single-agent-default","level":3,"title":"1. Single Agent (Default)","text":"

            One agent, one session, one branch. This is correct for most work.

            Use this when:

            • The task has linear dependencies (step 2 needs step 1's output);
            • Changes touch overlapping files;
            • You need tight feedback loops (review each change before the next);
            • The task requires deep understanding of a single area;
            • Total effort is less than a few hours of agent time.

            ctx provides: Full .context/: tasks, decisions, learnings, conventions, all in one session.

            The agent builds a coherent mental model and persists it as it goes.

            Example tasks: Bug fixes, feature implementation, refactoring a module, writing documentation for one area, debugging.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#2-parallel-worktrees-independent-tracks","level":3,"title":"2. Parallel Worktrees (Independent Tracks)","text":"

            2-4 agents, each in a separate git worktree on its own branch, working on non-overlapping parts of the codebase.

            Use this when:

            • You have 5+ independent tasks in the backlog;
            • Tasks group cleanly by directory or package;
            • File overlap between groups is zero or near-zero;
            • Each track can be completed and merged independently;
            • You want parallelism without coordination complexity.

            ctx provides: Shared .context/ via git (each worktree sees the same tasks, decisions, conventions). /ctx-worktree skill for setup and teardown. TASKS.md as a lightweight work queue.

            Example tasks: Docs + new package + test coverage (three tracks that don't touch the same files). Parallel recipe writing. Independent module development.

            See: Parallel Agent Development with Git Worktrees

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#3-agent-team-coordinated-swarm","level":3,"title":"3. Agent Team (Coordinated Swarm)","text":"

            Multiple agents communicating via messages, sharing a task list, with a lead agent coordinating. Claude Code's team/swarm feature.

            Use this when:

            • Tasks have dependencies but can still partially overlap;
            • You need research and implementation happening simultaneously;
            • The work requires different roles (researcher, implementer, tester);
            • A lead agent needs to review and integrate others' work;
            • The task is large enough that coordination cost is justified.

            ctx provides: .context/ as shared state that all agents can read. Task tracking for work assignment. Decisions and learnings as team memory that survives individual agent turnover.

            Example tasks: Large refactor across modules where a lead reviews merges. Research and implementation where one agent explores options while another builds. Multi-file feature that needs integration testing after parallel implementation.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-decision-framework","level":2,"title":"The Decision Framework","text":"

            Ask these questions in order:

            Can one agent do this in a reasonable time?\n  YES → Single agent. Stop here.\n  NO  ↓\n\nCan the work be split into non-overlapping file sets?\n  YES → Parallel worktrees (2-4 tracks)\n  NO  ↓\n\nDo the subtasks need to communicate during execution?\n  YES → Agent team with lead coordination\n  NO  → Parallel worktrees with a merge step\n
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#the-file-overlap-test","level":3,"title":"The File Overlap Test","text":"

            This is the critical decision point. Before choosing multi-agent, list the files each subtask would touch. If two subtasks modify the same file, they belong in the same track (or the same single-agent session).

            You: \"I want to parallelize these tasks. Which files would each one touch?\"\n\nAgent: [reads `TASKS.md`, analyzes codebase]\n       \"Task A touches internal/config/ and internal/cli/initialize/\n        Task B touches docs/ and site/\n        Task C touches internal/config/ and internal/cli/status/\n\n        Tasks A and C overlap on internal/config/ # they should be\n        in the same track. Task B is independent.\"\n

            When in doubt, keep things in one track. A merge conflict in a critical file costs more time than the parallelism saves.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#when-teams-make-things-worse","level":2,"title":"When Teams Make Things Worse","text":"

            \"More agents\" is not always better. Watch for these patterns:

            Merge hell: If you are spending more time resolving conflicts than the parallel work saved, you split wrong: Re-group by file overlap.

            Context divergence: Each agent builds its own mental model. After 30 minutes of independent work, agent A might make assumptions that contradict agent B's approach. Shorter tracks with frequent merges reduce this.

            Coordination theater: A lead agent spending most of its time assigning tasks, checking status, and sending messages instead of doing work. If the task list is clear enough, worktrees with no communication are cheaper.

            Re-reading overhead: Every agent reads .context/ on startup. A team of 4 agents each reading 4000 tokens of context = 16000 tokens before anyone does any work. For small tasks, that overhead dominates.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#what-ctx-gives-you-at-each-level","level":2,"title":"What ctx Gives You at Each Level","text":"ctx Feature Single Agent Worktrees Team .context/ files Full access Shared via git Shared via filesystem TASKS.md Work queue Split by track Assigned by lead Decisions/Learnings Persisted in session Persisted per branch Persisted by any agent /ctx-next Picks next task Picks within track Lead assigns /ctx-worktree N/A Setup + teardown Optional /ctx-commit Normal commits Per-branch commits Per-agent commits","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#team-composition-recipes","level":2,"title":"Team Composition Recipes","text":"

            Four practical team compositions for common workflows.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#feature-development-3-agents","level":3,"title":"Feature Development (3 Agents)","text":"Role Responsibility Architect Writes spec in specs/, breaks work into TASKS.md phases Implementer Picks tasks from TASKS.md, writes code, marks [x] done Reviewer Runs tests, ctx drift, lint; files issues as new tasks

            Coordination: TASKS.md checkboxes. Architect writes tasks before implementer starts. Reviewer runs after each implementer commit.

            Anti-pattern: All three agents editing the same file simultaneously. Sequence the work so only one agent touches a file at a time.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#consolidation-sprint-3-4-agents","level":3,"title":"Consolidation Sprint (3-4 Agents)","text":"Role Responsibility Auditor Runs ctx drift, identifies stale paths and broken refs Code Fixer Updates source code to match context (or vice versa) Doc Writer Updates ARCHITECTURE.md, CONVENTIONS.md, and docs/ Test Fixer (Optional) Fixes tests broken by the fixer's changes

            Coordination: Auditor's ctx drift output is the shared work queue. Each agent claims a subset of issues by adding #in-progress labels.

            Anti-pattern: Fixer and doc writer both editing ARCHITECTURE.md. Assign file ownership explicitly.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#release-prep-2-agents","level":3,"title":"Release Prep (2 Agents)","text":"Role Responsibility Release Notes Generates changelog from commits, writes release notes Validation Runs full test suite, lint, build across platforms

            Coordination: Both read TASKS.md to identify what shipped. Release notes agent works from git log; validation agent works from make audit.

            Anti-pattern: Release notes agent running tests \"to verify.\" Each agent stays in its lane.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#documentation-sprint-3-agents","level":3,"title":"Documentation Sprint (3 Agents)","text":"Role Responsibility Content Writes new pages, expands existing docs Cross-linker Adds nav entries, cross-references, \"See Also\" sections Verifier Builds site, checks broken links, validates rendering

            Coordination: Content agent writes files first. Cross-linker updates zensical.toml and index pages after content lands. Verifier builds after each batch.

            Antipattern: Content and cross-linker both editing zensical.toml. Batch nav updates into the cross-linker's pass.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#tips","level":2,"title":"Tips","text":"
            • Start with one agent: Only add parallelism when you have identified the bottleneck. \"This would go faster with more agents\" is usually wrong for tasks under 2 hours.
            • The 3-4 agent ceiling is real: Coordination overhead grows quadratically. 2 agents = 1 communication pair. 4 agents = 6 pairs. Beyond 4, you are managing agents more than doing work.
            • Worktrees > teams for most parallelism needs: If agents don't need to talk to each other during execution, worktrees give you parallelism with zero coordination overhead.
            • Use ctx as the shared brain: Whether it's one agent or four, the .context/ directory is the single source of truth. Decisions go in DECISIONS.md, not in chat messages between agents.
            • Merge early, merge often: Long-lived parallel branches diverge. Merge a track as soon as it's done rather than waiting for all tracks to finish.
            • TASKS.md conflicts are normal: Multiple agents completing different tasks will conflict on merge. The resolution is always additive: accept all [x] completions from both sides.
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#next-up","level":2,"title":"Next Up","text":"

            Parallel Agent Development with Git Worktrees →: Run multiple agents on independent task tracks using git worktrees.

            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#go-deeper","level":2,"title":"Go Deeper","text":"
            • CLI Reference: all commands and flags
            • Integrations: setup for Claude Code, Cursor, Aider
            • Session Journal: browse and search session history
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"recipes/when-to-use-agent-teams/#see-also","level":2,"title":"See Also","text":"
            • Parallel Agent Development with Git Worktrees: the mechanical \"how\" for worktree-based parallelism
            • Running an Unattended AI Agent: serial autonomous loops: a different scaling strategy
            • Tracking Work Across Sessions: managing the task backlog that feeds into any multi-agent setup
            ","path":["Recipes","Agents and Automation","When to Use a Team of Agents"],"tags":[]},{"location":"reference/","level":1,"title":"Reference","text":"

            Technical reference for ctx commands, skills, and internals.

            ","path":["Reference"],"tags":[]},{"location":"reference/#the-system-explains-itself","level":3,"title":"The System Explains Itself","text":"

            The 12 properties that must hold for any valid ctx implementation. Not features: constraints. The system's contract with its users and contributors.

            ","path":["Reference"],"tags":[]},{"location":"reference/#code-conventions","level":3,"title":"Code Conventions","text":"

            Common patterns and fixes for the AST compliance tests in internal/audit/. When a test fails, find the matching section.

            ","path":["Reference"],"tags":[]},{"location":"reference/#cli","level":3,"title":"CLI","text":"

            Every command, subcommand, and flag. Now a top-level section: see CLI Reference.

            ","path":["Reference"],"tags":[]},{"location":"reference/#skills","level":3,"title":"Skills","text":"

            The full skill catalog: what each skill does, when it triggers, and how skills interact with commands.

            ","path":["Reference"],"tags":[]},{"location":"reference/#tool-ecosystem","level":3,"title":"Tool Ecosystem","text":"

            How ctx compares to Cursor Rules, Aider conventions, CLAUDE.md, and other context approaches.

            ","path":["Reference"],"tags":[]},{"location":"reference/#session-journal","level":3,"title":"Session Journal","text":"

            Export, browse, and enrich your session history. Covers the journal site, Obsidian export, and the enrichment pipeline.

            ","path":["Reference"],"tags":[]},{"location":"reference/#scratchpad","level":3,"title":"Scratchpad","text":"

            Encrypted, git-tracked scratch space for short notes and sensitive values that travel with the project.

            ","path":["Reference"],"tags":[]},{"location":"reference/#version-history","level":3,"title":"Version History","text":"

            Changelog for every ctx release.

            ","path":["Reference"],"tags":[]},{"location":"reference/audit-conventions/","level":1,"title":"Code Conventions","text":"","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#code-conventions-common-patterns-and-fixes","level":1,"title":"Code Conventions: Common Patterns and Fixes","text":"

            This guide documents the code conventions enforced by internal/audit/ AST tests. Each section shows the violation pattern, the fix, and the rationale. When a test fails, find the matching section below.

            All tests skip _test.go files. The patterns apply only to production code under internal/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#variable-shadowing-bare-err-reuse","level":2,"title":"Variable Shadowing (Bare err := Reuse)","text":"

            Test: TestNoVariableShadowing

            When a function has multiple := assignments to err, each shadows the previous one. This makes it impossible to tell which error a later if err != nil is checking.

            Before:

            func Run(cmd *cobra.Command) error {\n    data, err := os.ReadFile(path) \n    if err != nil {\n        return err\n    }\n\n    result, err := json.Unmarshal(data)  // shadows first err\n    if err != nil {\n        return err\n    }\n\n    err = validate(result)  // shadows again\n    return err\n}\n

            After:

            func Run(cmd *cobra.Command) error {\n    data, readErr := os.ReadFile(path)\n    if readErr != nil {\n        return readErr\n    }\n\n    result, parseErr := json.Unmarshal(data)\n    if parseErr != nil {\n        return parseErr\n    }\n\n    validateErr := validate(result)\n    return validateErr\n}\n

            Rule: Use descriptive error names (readErr, writeErr, parseErr, walkErr, absErr, relErr) so each error site is independently identifiable.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#import-name-shadowing","level":2,"title":"Import Name Shadowing","text":"

            Test: TestNoImportNameShadowing

            When a local variable has the same name as an imported package, the import becomes inaccessible in that scope.

            Before:

            import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(session *entity.Session) {  // param shadows import\n    // session package is now unreachable here\n}\n

            After:

            import \"github.com/ActiveMemory/ctx/internal/session\"\n\nfunc process(sess *entity.Session) {\n    // session package still accessible\n}\n

            Rule: Parameters, variables, and return values must not reuse imported package names. Common renames: session -> sess, token -> tok, config -> cfg, entry -> ent.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-strings","level":2,"title":"Magic Strings","text":"

            Test: TestNoMagicStrings

            String literals in function bodies are invisible to refactoring tools and cause silent breakage when the value changes in one place but not another.

            Before (string literals):

            func loadContext() {\n    data := filepath.Join(dir, \"TASKS.md\")\n    if strings.HasSuffix(name, \".yaml\") {\n        // ...\n    }\n}\n

            After:

            func loadContext() {\n    data := filepath.Join(dir, config.FilenameTask)\n    if strings.HasSuffix(name, config.ExtYAML) {\n        // ...\n    }\n}\n

            Before (format verbs, also caught):

            func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return fmt.Sprintf(\"%x\", h[:8])\n}\n

            After:

            func EntryHash(text string) string {\n    h := sha256.Sum256([]byte(text))\n    return hex.EncodeToString(h[:cfgFmt.HashPrefixLen])\n}\n

            Before (URL schemes, also caught):

            if strings.HasPrefix(target, \"https://\") ||\n    strings.HasPrefix(target, \"http://\") {\n    return target\n}\n

            After:

            if strings.HasPrefix(target, cfgHTTP.PrefixHTTPS) ||\n    strings.HasPrefix(target, cfgHTTP.PrefixHTTP) {\n    return target\n}\n

            Exempt from this check:

            • Empty string \"\", single space \" \", indentation strings
            • Regex capture references ($1, ${name})
            • const and var definition sites (that's where constants live)
            • Struct tags
            • Import paths
            • Packages under internal/config/, internal/assets/tpl/

            Rule: If a string is used for comparison, path construction, or appears in 3+ files, it belongs in internal/config/ as a constant. Format strings belong in internal/config/ as named constants (e.g., cfgGit.FlagLastN, cfgTrace.RefFormat). User-facing prose belongs in internal/assets/ YAML files accessed via desc.Text().

            Common fix for fmt.Sprintf with format verbs:

            Pattern Fix fmt.Sprintf(\"%d\", n) strconv.Itoa(n) fmt.Sprintf(\"%d\", int64Val) strconv.FormatInt(int64Val, 10) fmt.Sprintf(\"%x\", bytes) hex.EncodeToString(bytes) fmt.Sprintf(\"%q\", s) strconv.Quote(s) fmt.Sscanf(s, \"%d\", &n) strconv.Atoi(s) fmt.Sprintf(\"-%d\", n) fmt.Sprintf(cfgGit.FlagLastN, n) \"https://\" cfgHTTP.PrefixHTTPS \"&lt;\" config constant in config/html/","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-printf-calls","level":2,"title":"Direct Printf Calls","text":"

            Test: TestNoPrintfCalls

            cmd.Printf and cmd.PrintErrf bypass the write-package formatting pipeline and scatter user-facing text across the codebase.

            Before:

            func Run(cmd *cobra.Command, args []string) {\n    cmd.Printf(\"Found %d tasks\\n\", count)\n}\n

            After:

            func Run(cmd *cobra.Command, args []string) {\n    write.TaskCount(cmd, count)\n}\n

            Rule: All formatted output goes through internal/write/ which uses cmd.Print/cmd.Println with pre-formatted strings from desc.Text().

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#raw-time-format-strings","level":2,"title":"Raw Time Format Strings","text":"

            Test: TestNoRawTimeFormats

            Inline time format strings (\"2006-01-02\", \"15:04:05\") drift when one call site is updated but others are missed.

            Before:

            func formatDate(t time.Time) string {\n    return t.Format(\"2006-01-02\")\n}\n

            After:

            func formatDate(t time.Time) string {\n    return t.Format(cfgTime.DateFormat)\n}\n

            Rule: All time format strings must use constants from internal/config/time/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#direct-flag-registration","level":2,"title":"Direct Flag Registration","text":"

            Test: TestNoFlagBindOutsideFlagbind

            Direct cobra flag calls (.Flags().StringVar(), etc.) scatter flag wiring across dozens of cmd.go files. Centralizing through internal/flagbind/ gives one place to audit flag names, defaults, and description key lookups.

            Before:

            func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    c.Flags().StringVarP(&output, \"output\", \"o\", \"\",\n        \"output format\")\n    return c\n}\n

            After:

            func Cmd() *cobra.Command {\n    var output string\n    c := &cobra.Command{Use: cmd.UseStatus}\n    flagbind.StringFlagShort(c, &output, flag.Output,\n        flag.OutputShort, cmd.DescKeyOutput)\n    return c\n}\n

            Rule: All flag registration goes through internal/flagbind/. If the helper you need doesn't exist, add it to flagbind/flag.go before using it.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#todo-comments","level":2,"title":"TODO Comments","text":"

            Test: TestNoTODOComments

            TODO, FIXME, HACK, and XXX comments in production code are invisible to project tracking. They accumulate silently and never get addressed.

            Before:

            // TODO: handle pagination\nfunc listEntries() []Entry {\n

            After:

            Remove the comment and add a task to .context/TASKS.md:

            - [ ] Handle pagination in listEntries (internal/task/task.go)\n

            Rule: Deferred work lives in TASKS.md, not in source comments.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#dead-exports","level":2,"title":"Dead Exports","text":"

            Test: TestNoDeadExports

            Exported symbols with zero references outside their definition file are dead weight. They increase API surface, confuse contributors, and cost maintenance.

            Fix: Either delete the export (preferred) or demote it to unexported if it's still used within the file.

            If the symbol existed for historical reasons and might be needed again, move it to quarantine/deadcode/ with a .dead extension. This preserves the code in git without polluting the live codebase:

            quarantine/deadcode/internal/config/flag/flag.go.dead\n

            Each .dead file includes a header:

            // Dead exports quarantined from internal/config/flag/flag.go\n// Quarantined: 2026-04-02\n// Restore from git history if needed.\n

            Rule: If a test-only allowlist entry is needed (the export exists only for test use), add the fully qualified symbol to testOnlyExports in dead_exports_test.go. Keep this list small; prefer eliminating the export.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#core-package-structure","level":2,"title":"Core Package Structure","text":"

            Test: TestCoreStructure

            core/ directories under internal/cli/ must contain only doc.go and test files at the top level. All domain logic lives in subpackages. This prevents core/ from becoming a god package.

            Before:

            internal/cli/dep/core/\n    go.go           # violation: logic at core/ level\n    python.go       # violation\n    node.go         # violation\n    types.go        # violation\n

            After:

            internal/cli/dep/core/\n    doc.go          # package doc only\n    golang/\n        golang.go\n        golang_test.go\n        doc.go\n    python/\n        python.go\n        python_test.go\n        doc.go\n    node/\n        node.go\n        node_test.go\n        doc.go\n

            Rule: Extract each logical unit into its own subpackage under core/. Each subpackage gets a doc.go. The subpackage name should match the domain concept (golang, check, fix, store), not a generic label (util, helper).

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cross-package-types","level":2,"title":"Cross-Package Types","text":"

            Test: TestCrossPackageTypes

            When a type defined in one package is used from a different module (e.g., cli/doctor importing a type from cli/notify), the type has crossed its module boundary. Cross-cutting types belong in internal/entity/ for discoverability.

            Before:

            // internal/cli/notify/core/types.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/cli/notify/core\"\nfunc check(p core.NotifyPayload) { ... }\n

            After:

            // internal/entity/notify.go\ntype NotifyPayload struct { ... }\n\n// internal/cli/doctor/core/check/check.go\nimport \"github.com/ActiveMemory/ctx/internal/entity\"\nfunc check(p entity.NotifyPayload) { ... }\n

            Exempt: Types inside entity/, proto/, core/ subpackages, and config/ packages. Same-module usage (e.g., cli/doctor/cmd/ using cli/doctor/core/) is not flagged.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#type-file-convention","level":2,"title":"Type File Convention","text":"

            Test: TestTypeFileConvention, TestTypeFileConventionReport

            Exported types in core/ subpackages should live in types.go (the convention from CONVENTIONS.md), not scattered across implementation files. This makes type definitions discoverable. TestTypeFileConventionReport generates a diagnostic summary of all type placements for triage.

            Exception: entity/ organizes by domain (task.go, session.go), proto/ uses schema.go, and err/ packages colocate error types with their domain context.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-yaml-linkage","level":2,"title":"DescKey / YAML Linkage","text":"

            Test: TestDescKeyYAMLLinkage

            Every DescKey constant must have a corresponding key in the YAML asset files, and every YAML key must have a corresponding DescKey constant. Orphans in either direction mean dead text or runtime panics.

            Fix for orphan YAML key: Delete the YAML entry, or add the corresponding DescKey constant in config/embed/{text,cmd,flag}/.

            Fix for orphan DescKey: Delete the constant, or add the corresponding entry in the YAML file under internal/assets/commands/text/, cmd/, or flag/.

            If the orphan YAML entry was once valid but the feature was removed, move the YAML entry to a .dead file in quarantine/deadcode/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#package-doc-quality","level":2,"title":"Package Doc Quality","text":"

            Test: TestPackageDocQuality

            Every package under internal/ must have a doc.go with a meaningful package doc comment (at least 8 lines of real content). One-liners and file-list patterns (// - foo.go, // Source files:) are flagged because they drift as files change.

            Template:

            //   /    ctx:                         https://ctx.ist\n// ,'`./    do you remember?\n// `.,'\\\n//   \\    Copyright 2026-present Context contributors.\n//                 SPDX-License-Identifier: Apache-2.0\n\n// Package mypackage does X.\n//\n// It handles Y by doing Z. The main entry point is [FunctionName]\n// which accepts A and returns B.\n//\n// Configuration is read from [config.SomeConstant]. Output is\n// written through [write.SomeHelper].\n//\n// This package is used by [parentpackage] during the W lifecycle\n// phase.\npackage mypackage\n
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-regex-compilation","level":2,"title":"Inline Regex Compilation","text":"

            Test: TestNoInlineRegexpCompile

            regexp.MustCompile and regexp.Compile inside function bodies recompile the pattern on every call. Compiled patterns belong at package level.

            Before:

            func parse(s string) bool {\n    re := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n    return re.MatchString(s)\n}\n

            After:

            // In internal/config/regex/regex.go:\n// DatePattern matches ISO date format (YYYY-MM-DD).\nvar DatePattern = regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}`)\n\n// In calling package:\nfunc parse(s string) bool {\n    return regex.DatePattern.MatchString(s)\n}\n

            Rule: All compiled regexes live in internal/config/regex/ as package-level var declarations. Two tests enforce this: TestNoInlineRegexpCompile catches function-body compilation, and TestNoRegexpOutsideRegexPkg catches package-level compilation outside config/regex/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#doc-comments","level":2,"title":"Doc Comments","text":"

            Test: TestDocComments

            All functions (exported and unexported), structs, and package-level variables must have a doc comment. Config packages allow group doc comments for const blocks.

            Before:

            func buildIndex(entries []Entry) map[string]int {\n

            After:

            // buildIndex maps entry names to their position in the\n// ordered slice for O(1) lookup during reconciliation.\n//\n// Parameters:\n//   - entries: ordered slice of entries to index\n//\n// Returns:\n//   - map[string]int: name-to-position mapping\nfunc buildIndex(entries []Entry) map[string]int {\n

            Rule: Every function, struct, and package-level var gets a doc comment in godoc format. Functions include Parameters: and Returns: sections. Structs with 2+ fields document every field. See CONVENTIONS.md for the full template.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#line-length","level":2,"title":"Line Length","text":"

            Test: TestLineLength

            Lines in non-test Go files must not exceed 80 characters. This is a hard check, not a suggestion.

            Before:

            _ = trace.Record(fmt.Sprintf(cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum), state.Dir())\n

            After:

            ref := fmt.Sprintf(\n    cfgTrace.RefFormat, cfgTrace.RefTypeTask, matchedNum,\n)\n_ = trace.Record(ref, state.Dir())\n

            Rule: Break at natural points: function arguments, struct fields, chained calls. Long strings (URLs, struct tags) are the rare acceptable exception.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#literal-whitespace","level":2,"title":"Literal Whitespace","text":"

            Test: TestNoLiteralWhitespace

            Bare whitespace string and byte literals (\"\\n\", \"\\r\\n\", \"\\t\") must not appear outside internal/config/token/. All other packages use the token constants.

            Before:

            output := strings.Join(lines, \"\\n\")\n

            After:

            output := strings.Join(lines, token.Newline)\n

            Rule: Whitespace literals are defined once in internal/config/token/. Use token.Newline, token.Tab, token.CRLF, etc.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#magic-numeric-values","level":2,"title":"Magic Numeric Values","text":"

            Test: TestNoMagicValues

            Numeric literals in function bodies need constants, with narrow exceptions.

            Before:

            if len(entries) > 100 {\n    entries = entries[:100]\n}\n

            After:

            if len(entries) > config.MaxEntries {\n    entries = entries[:config.MaxEntries]\n}\n

            Exempt: 0, 1, -1, 2-10, strconv radix/bitsize args (10, 32, 64 in strconv.Parse*/Format*), octal permissions (caught separately by TestNoRawPermissions), and const/var definition sites.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#inline-separators","level":2,"title":"Inline Separators","text":"

            Test: TestNoInlineSeparators

            strings.Join calls must use token constants for their separator argument, not string literals.

            Before:

            result := strings.Join(parts, \", \")\n

            After:

            result := strings.Join(parts, token.CommaSep)\n

            Rule: Separator strings live in internal/config/token/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stuttery-function-names","level":2,"title":"Stuttery Function Names","text":"

            Test: TestNoStutteryFunctions

            Function names must not redundantly include their package name as a PascalCase word boundary. Go callers already write pkg.Function, so pkg.PkgFunction stutters.

            Before:

            // In package write\nfunc WriteJournal(cmd *cobra.Command, ...) {\n

            After:

            // In package write\nfunc Journal(cmd *cobra.Command, ...) {\n

            Exempt: Identity functions like write.Write / write.write.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#predicate-naming-no-ishascan-prefix","level":2,"title":"Predicate Naming (No Is/Has/Can Prefix)","text":"

            Test: None (manual review convention)

            Exported methods that return bool must not use Is, Has, or Can prefixes. The predicate reads more naturally without them, especially at call sites where the package name provides context.

            Before:

            func IsCompleted(t *Task) bool { ... }\nfunc HasChildren(n *Node) bool { ... }\nfunc IsExemptPackage(path string) bool { ... }\n

            After:

            func Completed(t *Task) bool { ... }\nfunc Children(n *Node) bool { ... }  // or: ChildCount > 0\nfunc ExemptPackage(path string) bool { ... }\n

            Rule: Drop the prefix. Private helpers may use prefixes when it reads more naturally (isValid in a local context is fine). This convention applies to exported methods and package-level functions. See CONVENTIONS.md \"Predicates\" section.

            This is not yet enforced by an AST test; it requires semantic understanding of return types and naming intent that makes automated detection fragile. Apply during code review.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#mixed-visibility","level":2,"title":"Mixed Visibility","text":"

            Test: TestNoMixedVisibility

            Files with exported functions must not also contain unexported functions. Public API and private helpers live in separate files.

            Before:

            load.go\n    func Load() { ... }        // exported\n    func parseHeader() { ... } // unexported, violation\n

            After:

            load.go\n    func Load() { ... }        // exported only\nparse.go\n    func parseHeader() { ... } // private helper\n

            Exempt: Files with exactly one function, doc.go, test files.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#stray-errgo-files","level":2,"title":"Stray Err.Go Files","text":"

            Test: TestNoStrayErrFiles

            err.go files must only exist under internal/err/. Error constructors anywhere else create a broken-window pattern where contributors add local error definitions when they see a local err.go.

            Fix: Move the error constructor to internal/err/<domain>/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#cli-cmd-structure","level":2,"title":"CLI Cmd Structure","text":"

            Test: TestCLICmdStructure

            Each cmd/$sub/ directory under internal/cli/ may contain only cmd.go, run.go, doc.go, and test files. Extra .go files (helpers, output formatters, types) belong in the corresponding core/ subpackage.

            Before:

            internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\n    format.go   # violation: helper in cmd dir\n

            After:

            internal/cli/doctor/cmd/root/\n    cmd.go\n    run.go\ninternal/cli/doctor/core/format/\n    format.go\n    doc.go\n
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#desckey-namespace","level":2,"title":"DescKey Namespace","text":"

            Test: TestUseConstantsOnlyInCobraUse, TestDescKeyOnlyInLookupCalls, TestNoWrongNamespaceLookup

            Three tests enforce DescKey/Use constant discipline:

            1. Use* constants appear only in cobra Use: struct field assignments, never as arguments to desc.Text() or elsewhere.
            2. DescKey* constants are passed only to assets.CommandDesc(), assets.FlagDesc(), or desc.Text(), never to cobra Use:.
            3. No cross-namespace lookups: TextDescKey must not be passed to CommandDesc(), FlagDescKey must not be passed to Text(), etc.
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#yaml-examples-registry-linkage","level":2,"title":"YAML Examples / Registry Linkage","text":"

            Test: TestExamplesYAMLLinkage, TestRegistryYAMLLinkage

            Every key in examples.yaml and registry.yaml must match a known entry type constant. Prevents orphan entries that are never rendered.

            Fix: Delete the orphan YAML entry, or add the corresponding constant in config/entry/.

            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#other-enforced-patterns","level":2,"title":"Other Enforced Patterns","text":"

            These tests follow the same fix approach: extract the operation to its designated package:

            Test Violation Fix TestNoNakedErrors fmt.Errorf/errors.New outside internal/err/ Add error constructor to internal/err/<domain>/ TestNoRawFileIO Direct os.ReadFile, os.Create, etc. Use io.SafeReadFile, io.SafeWriteFile, etc. TestNoRawLogging Direct fmt.Fprintf(os.Stderr, ...) Use log/warn.Warn() or log/event.Append() TestNoExecOutsideExecPkg exec.Command outside internal/exec/ Add command to internal/exec/<domain>/ TestNoCmdPrintOutsideWrite cmd.Print* outside internal/write/ Add output helper to internal/write/<domain>/ TestNoRawPermissions Octal literals (0644, 0755) Use config/fs.PermFile, config/fs.PermExec, etc. TestNoErrorsAs errors.As() Use errors.AsType() (generic, Go 1.23+) TestNoStringConcatPaths dir + \"/\" + file Use filepath.Join(dir, file)","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/audit-conventions/#general-fix-workflow","level":2,"title":"General Fix Workflow","text":"

            When an audit test fails:

            1. Read the error message. It includes file:line and a description of the violation.
            2. Find the matching section above. The test name maps directly to a section.
            3. Apply the pattern. Most fixes are mechanical: extract to the right package, rename a variable, or replace a literal with a constant.
            4. Run make test before committing. Audit tests run as part of go test ./internal/audit/.
            5. Don't add allowlist entries as a first resort. Fix the code. Allowlists exist only for genuinely unfixable cases (test-only exports, config packages that are definitionally exempt).
            ","path":["Reference","Code Conventions"],"tags":[]},{"location":"reference/comparison/","level":1,"title":"Tool Ecosystem","text":"","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#high-level-mental-model","level":2,"title":"High-Level Mental Model","text":"

            Many tools help AI think.

            ctx helps AI remember.

            • Not by storing thoughts,
            • but by preserving intent.
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#how-ctx-differs-from-similar-tools","level":2,"title":"How ctx Differs from Similar Tools","text":"

            There are many tools in the AI ecosystem that touch parts of the context problem:

            • Some manage prompts.
            • Some retrieve data.
            • Some provide runtime context objects.
            • Some offer enterprise platforms.

            ctx focuses on a different layer entirely.

            This page explains where ctx fits, and where it intentionally does not.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#the-core-distinction","level":2,"title":"The Core Distinction","text":"

            Most tools treat context as input.

            ctx treats context as infrastructure.

            That single difference explains nearly all of ctx's design choices.

            Question Most tools ctx Where does context live? In prompts or APIs In files How long does it last? One request / one session Across time Who can read it? The model Humans and tools How is it updated? Implicitly Explicitly Is it inspectable? Rarely Always","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#prompt-management-tools","level":2,"title":"Prompt Management Tools","text":"

            Examples include:

            • prompt templates;
            • reusable system prompts;
            • prompt libraries;
            • prompt versioning tools.

            These tools help you start a session.

            They do not help you continue one.

            Prompt tools:

            • inject text at session start;
            • are ephemeral by design;
            • do not evolve with the project.

            ctx:

            • persists knowledge over time;
            • accumulates decisions and learnings;
            • makes the context part of the repository itself.

            Prompt tooling and ctx are complementary; not competing. Yet, they operate in different layers.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#retrieval-augmented-generation-rag","level":2,"title":"Retrieval-Augmented Generation (RAG)","text":"

            RAG systems typically:

            • index documents
            • embed text
            • retrieve chunks dynamically at runtime

            They are excellent for:

            • large knowledge bases
            • static documentation
            • reference material

            RAG answers questions like:

            \"What information might be relevant right now?\"

            ctx answers a different question:

            \"What have we already decided, learned, or committed to?\"

            Here are some key differences:

            RAG ctx Statistical relevance Intentional relevance Embedding-based File-based Opaque retrieval Explicit structure Runtime query Persistent memory

            ctx does not replace RAG. Instead, it defines a persistent context layer that RAG can optionally augment.

            RAG belongs to the data plane; ctx defines the context control plane.

            It focuses on project memory, not knowledge search.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#agent-frameworks","level":2,"title":"Agent Frameworks","text":"

            Agent frameworks often provide:

            • task loops
            • tool orchestration
            • planner/executor patterns
            • autonomous iteration

            These systems are powerful, but they typically assume that:

            • memory is external
            • context is injected
            • state is transient

            Agent frameworks answer:

            \"How should the agent act?\"

            ctx answers:

            \"What should the agent remember?\"

            Without persistent context, agents tend to:

            • rediscover decisions
            • repeat mistakes
            • lose architectural intent

            This is why ctx pairs well with autonomous loop workflows:

            • The loop provides iteration
            • ctx provides continuity

            Together, loops become cumulative instead of forgetful.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#sdk-level-context-objects","level":2,"title":"SDK-Level Context Objects","text":"

            Some SDKs expose \"context\" objects that exist:

            • inside a process
            • during a request
            • for the lifetime of a call chain

            These are extremely useful and completely different.

            SDK context objects:

            • are in-memory
            • disappear when the process ends
            • are not shared across sessions

            ctx:

            • survives process restarts
            • survives new chats
            • survives new days

            They share a name, not a purpose.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#enterprise-context-platforms","level":2,"title":"Enterprise Context Platforms","text":"

            Enterprise platforms often provide:

            • centralized context services
            • dashboards
            • access control
            • organizational knowledge layers

            These tools are designed for:

            • teams
            • governance
            • compliance
            • managed environments

            ctx is intentionally:

            • local-first: context lives next to your code, not behind a service boundary.
            • file-based: everything important is a markdown file you can read, diff, grep, and version-control.
            • single-binary core: the context persistence path (init, add, agent, status, drift, load, sync, compact, task, decision, learning, and their siblings) is a single Go binary with no required runtime dependencies. Optional integrations (ctx trace (needs git), ctx serve (needs zensical), the ctx Hub (needs a running hub), Claude Code plugin (needs claude)) are opt-in and each declares its dependency explicitly.
            • CLI-driven: every feature is reachable from the command line and scriptable.
            • developer-controlled: no auto-updating cloud service, no telemetry, no account to sign up for.

            The core ctx binary does not require:

            • a server
            • a database
            • an account
            • a SaaS backend
            • network connectivity (for core operations)

            ctx optimizes for individual and small-team workflows where context should live next to code; not behind a service boundary.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#specific-tool-comparisons","level":2,"title":"Specific Tool Comparisons","text":"

            Users often evaluate ctx against specific tools they already use. These comparisons clarify where responsibilities overlap, where they diverge, and where the tools are genuinely complementary.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#claude-code-memory-anthropic-auto-memory","level":3,"title":"Claude Code Memory / Anthropic Auto-Memory","text":"

            Anthropic's auto-memory is tool-managed memory (L2): the model decides what to remember, stores it automatically, and retrieves it implicitly. ctx is system memory (L3): humans and agents explicitly curate decisions, learnings, and tasks in inspectable files.

            Auto-memory is convenient - you do not configure anything. But it is also opaque: you cannot see what was stored, edit it precisely, or share it across tools. ctx files are plain Markdown in your repository, visible in diffs and code review.

            The two are complementary. ctx can absorb auto-memory as an input source (importing what the model remembered into structured context files) while providing the durable, inspectable layer that auto-memory lacks.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cursorrules-clauderules","level":3,"title":".Cursorrules / .Claude/rules","text":"

            Static rule files (.cursorrules, .claude/rules/) declare conventions: coding style, forbidden patterns, preferred libraries. They are effective for what to do and load automatically at session start.

            ctx adds dimensions that rule files do not cover: architectural decisions with rationale, learnings discovered during development, active tasks, and a constitution that governs agent behavior. Critically, ctx context accumulates - each session can add to it, and token budgeting ensures only the most relevant context is injected.

            Use rule files for static conventions. Use ctx for evolving project memory.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#aider-read-watch","level":3,"title":"Aider --read / --watch","text":"

            Aider's --read flag injects file contents at session start; --watch reloads them on change. The concept is similar to ctx's \"load\" step: make the agent aware of specific files.

            The differences emerge beyond loading. Aider has no persistence model -- nothing the agent learns during a session is written back. There is no token budgeting (large files consume the full context window), no priority ordering across file types, and no structured format for decisions or learnings. ctx provides the full lifecycle: load, accumulate, persist, and budget.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#copilot-workspace","level":3,"title":"Copilot @Workspace","text":"

            GitHub Copilot's @workspace performs workspace-wide code search. It answers \"what code exists?\" - finding function definitions, usages, and file structure across the repository.

            ctx answers a different question: \"what did we decide?\" It stores architectural intent, not code indices. Copilot's workspace search and ctx's project memory are orthogonal; one finds code, the other preserves the reasoning behind it.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#cline-memory","level":3,"title":"Cline Memory","text":"

            Cline's memory bank stores session context within the Cline extension. The motivation is similar to ctx: help the agent remember across sessions.

            The key difference is portability. Cline memory is tied to Cline - it does not transfer to Claude Code, Cursor, Aider, or any other tool. ctx is tool-agnostic: context lives in plain files that any editor, agent, or script can read. Switching tools does not mean losing memory.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-a-good-fit","level":2,"title":"When ctx Is a Good Fit","text":"

            ctx works best when:

            • you want AI work to compound over time;
            • architectural decisions matter;
            • context must be inspectable;
            • humans and AI must share the same source of truth;
            • Git history should include why, not just what.
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#when-ctx-is-not-the-right-tool","level":2,"title":"When ctx Is Not the Right Tool","text":"

            ctx is probably not what you want if:

            • you only need one-off prompts;
            • you rely exclusively on RAG;
            • you want autonomous agents without a human-readable state;
            • you require centralized enterprise control;
            • you want black-box memory systems,

            These are valid goals; just different ones.

            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/comparison/#further-reading","level":2,"title":"Further Reading","text":"
            • You Can't Import Expertise: why project-specific context matters more than generic best practices
            ","path":["Reference","Tool Ecosystem"],"tags":[]},{"location":"reference/design-invariants/","level":1,"title":"Invariants","text":"","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-system-explains-itself","level":1,"title":"The System Explains Itself","text":"

            These are the properties that must hold for any valid ctx implementation.

            • These are not features.
            • These are constraints.

            A change that violates an invariant is a category error, not an improvement.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#cognitive-state-tiers","level":2,"title":"Cognitive State Tiers","text":"

            ctx distinguishes between three forms of state:

            • Authoritative state: Versioned, inspectable artifacts that define intent and survive time.
            • Delivery views: Deterministic assemblies of the authoritative state for a specific budget or workflow.
            • Ephemeral working state: Local, transient, or sensitive data that assists interaction but does not define system truth.

            The invariants below apply primarily to the authoritative cognitive state.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#1-cognitive-state-is-explicit","level":2,"title":"1. Cognitive State Is Explicit","text":"

            All authoritative context lives in artifacts that can be inspected, reviewed, and versioned.

            If something is important, it must exist as a file: Not only in a prompt, a chat, or a model's hidden memory.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#2-assembly-is-reproducible","level":2,"title":"2. Assembly Is Reproducible","text":"

            Given the same:

            • repository state,
            • configuration,
            • and inputs,

            context assembly produces the same result.

            Heuristics may rank or filter for delivery under constraints.

            They do not alter the authoritative state.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#3-the-authoritative-state-is-human-readable","level":2,"title":"3. The Authoritative State Is Human-Readable","text":"

            The authoritative cognitive state must be stored in formats that a human can:

            • read,
            • diff,
            • review,
            • and edit directly.

            Sensitive working memory may be encrypted at rest. However, encryption must not become the only representation of authoritative knowledge.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#4-artifacts-outlive-sessions","level":2,"title":"4. Artifacts Outlive Sessions","text":"

            Sessions are transient.

            Knowledge persists.

            Reasoning, decisions, and outcomes must remain available after the interaction that produced them has ended.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#5-authority-is-user-defined","level":2,"title":"5. Authority Is User-Defined","text":"

            What enters the authoritative context is an explicit human decision.

            Models may suggest.

            Automation may assist.

            Selection is never implicit.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#6-operation-is-local-first","level":2,"title":"6. Operation Is Local-First","text":"

            The core system must function without requiring network access or a remote service.

            External systems may extend ctx.

            They must not be required for its operation.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#7-versioning-is-the-memory-model","level":2,"title":"7. Versioning Is the Memory Model","text":"

            The evolution of the authoritative cognitive state must be:

            • preserved,
            • inspectable,
            • and branchable.

            Ephemeral and sensitive working state may use different retention and diff strategies by design.

            Understanding includes understanding how we arrived here.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#8-structure-enables-scale","level":2,"title":"8. Structure Enables Scale","text":"

            Unstructured accumulation is not memory.

            Authoritative cognitive state must have a defined layout that:

            • communicates intent,
            • supports navigation,
            • and prevents drift.
            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#9-verification-is-the-scoreboard","level":2,"title":"9. Verification Is the Scoreboard","text":"

            Claims without recorded outcomes are noise.

            Reality (observed and captured) is the only signal that compounds.

            This invariant defines a required direction:

            The authoritative state must be able to record expectation and result.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#10-capture-once-reuse-indefinitely","level":2,"title":"10. Capture Once, Reuse Indefinitely","text":"

            Work that has already produced understanding must not be re-derived from scratch.

            Explored paths, rejected options, and validated conclusions are permanent assets.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#11-policies-are-encoded-not-remembered","level":2,"title":"11. Policies Are Encoded, Not Remembered","text":"

            Alignment must not depend on recall or goodwill.

            Constraints that matter must exist in machine-readable form and participate in context assembly.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#12-the-system-explains-itself","level":2,"title":"12. The System Explains Itself","text":"

            From the repository state alone it must be possible to determine:

            • what was authoritative,
            • what constraints applied.

            Delivery views may be optimized.

            They must not become the only explanation.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#non-goals","level":1,"title":"Non-Goals","text":"

            To avoid category errors, ctx does not attempt to be:

            • a skill,
            • a prompt management tool,
            • a chat history viewer,
            • an autonomous agent runtime,
            • a vector database,
            • a hosted memory service.

            Such systems may integrate with ctx.

            They do not define it.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#implications-for-contributions","level":1,"title":"Implications for Contributions","text":"

            Valid contributions:

            • strengthen an invariant,
            • reduce the cost of maintaining an invariant,
            • or extend the system without violating invariants.

            Invalid contributions:

            • introduce hidden authoritative state,
            • replace reproducible assembly with non-reproducible behavior,
            • make core operation depend on external services,
            • reduce human inspectability of authoritative state,
            • or bypass explicit user authority over what becomes authoritative.
            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/design-invariants/#the-contract","level":1,"title":"The Contract","text":"

            Everything else (commands, skills, layouts, integrations, optimizations) is an implementation detail.

            These invariants are the system.

            ","path":["Reference","Invariants"],"tags":[]},{"location":"reference/scratchpad/","level":1,"title":"Scratchpad","text":"","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#what-is-ctx-scratchpad","level":2,"title":"What Is ctx Scratchpad?","text":"

            A one-liner scratchpad, encrypted at rest, synced via git.

            Quick notes that don't fit decisions, learnings, or tasks: reminders, intermediate values, sensitive tokens, working memory during debugging. Entries are numbered, reorderable, and persist across sessions.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#encrypted-by-default","level":2,"title":"Encrypted by Default","text":"

            Scratchpad entries are encrypted with AES-256-GCM before touching the disk.

            Component Path Git status Encryption key ~/.ctx/.ctx.key User-level, 0600 permissions Encrypted data .context/scratchpad.enc Committed

            The key is generated automatically during ctx init (256-bit via crypto/rand) and stored at ~/.ctx/.ctx.key. One key per machine, shared across all projects.

            The ciphertext format is [12-byte nonce][ciphertext+tag]. No external dependencies: Go stdlib only.

            Because the key is .gitignored and the data is committed, you get:

            • At-rest encryption: the .enc file is opaque without the key
            • Git sync: push/pull the encrypted file like any other tracked file
            • Key separation: the key never leaves the machine unless you copy it
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#commands","level":2,"title":"Commands","text":"Command Purpose ctx pad List all entries (numbered 1-based) ctx pad show N Output raw text of entry N (no prefix, pipe-friendly) ctx pad add \"text\" Append a new entry ctx pad rm ID [ID...] Remove entries by stable ID (supports ranges: 3-5) ctx pad edit N \"text\" Replace entry N with new text ctx pad edit N --append \"text\" Append text to the end of entry N ctx pad edit N --prepend \"text\" Prepend text to the beginning of entry N ctx pad edit N --tag tagname Add a tag to entry N ctx pad add TEXT --file PATH Ingest a file as a blob entry (TEXT is the label) ctx pad show N --out PATH Write decoded blob content to a file ctx pad normalize Reassign entry IDs as 1..N ctx pad mv N M Move entry from position N to position M ctx pad resolve Show both sides of a merge conflict for resolution ctx pad import FILE Bulk-import lines from a file (or stdin with -) ctx pad import --blob DIR Import directory files as blob entries ctx pad export [DIR] Export all blob entries to a directory as files ctx pad merge FILE... Merge entries from other scratchpad files into current ctx pad --tag TAG List entries filtered by tag (prefix with ~ to exclude) ctx pad tags List all tags with counts ctx pad tags --json List all tags with counts as JSON

            All commands decrypt on read, operate on plaintext in memory, and re-encrypt on write. The key file is never printed to stdout.

            For blob entries, --append, --prepend, and --tag modify the label while preserving the blob data.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#examples","level":3,"title":"Examples","text":"
            # Add a note\nctx pad add \"check DNS propagation after deploy\"\n\n# List everything\nctx pad\n#   1. check DNS propagation after deploy\n#   2. staging API key: sk-test-abc123\n\n# Show raw text (for piping)\nctx pad show 2\n# sk-test-abc123\n\n# Compose entries\nctx pad edit 1 --append \"$(ctx pad show 2)\"\n\n# Reorder\nctx pad mv 2 1\n\n# Clean up (IDs are stable; they don't shift when entries are deleted)\nctx pad rm 2\n
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#tags","level":2,"title":"Tags","text":"

            Entries can contain #word tags for lightweight categorization. Tags are convention-based: any #word token in an entry's text is a tag. No special syntax to add or remove them; use the existing add and edit commands.

            # Add tagged entries\nctx pad add \"check DNS propagation #later\"\nctx pad add \"deploy hotfix #urgent\"\nctx pad add \"review PR #later #ci\"\n\n# Filter by tag\nctx pad --tag later\n#   1. check DNS propagation #later\n#   3. review PR #later #ci\n\n# Exclude a tag\nctx pad --tag ~later\n#   2. deploy hotfix #urgent\n\n# Multiple filters (AND logic)\nctx pad --tag later --tag ci\n#   3. review PR #later #ci\n\n# List all tags with counts\nctx pad tags\n# ci       1\n# later    2\n# urgent   1\n\n# JSON output\nctx pad tags --json\n# [{\"tag\":\"ci\",\"count\":1},{\"tag\":\"later\",\"count\":2},{\"tag\":\"urgent\",\"count\":1}]\n\n# Add a tag to an existing entry\nctx pad edit 1 --tag done\n\n# Combine with other operations\nctx pad edit 1 --append \"checked\" --tag done\n\n# Remove a tag (replace entry text without the tag)\nctx pad edit 1 \"check DNS propagation\"\n

            Entry IDs are stable; they don't shift when other entries are deleted, so ctx pad rm 3 always targets the same entry. Use ctx pad normalize to reassign IDs as 1..N if gaps bother you. Tags are case-sensitive and support letters, digits, hyphens, and underscores (#high-priority, #v2, #my_tag).

            For blob entries, tags are extracted from the label only.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#bulk-import-and-export","level":2,"title":"Bulk Import and Export","text":"

            Import lines from a file in bulk (each non-empty line becomes an entry):

            # Import from a file\nctx pad import notes.txt\n\n# Import from stdin\ngrep TODO *.go | ctx pad import -\n

            Export all blob entries to a directory as files:

            # Export to a directory\nctx pad export ./ideas\n\n# Preview without writing\nctx pad export --dry-run\n\n# Overwrite existing files\nctx pad export --force ./backup\n
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#merging-scratchpads","level":2,"title":"Merging Scratchpads","text":"

            Combine entries from other scratchpad files into your current pad. Useful when merging work from parallel worktrees, other machines, or teammates:

            # Merge from a worktree's encrypted scratchpad\nctx pad merge worktree/.context/scratchpad.enc\n\n# Merge from multiple sources (encrypted and plaintext)\nctx pad merge pad-a.enc notes.md\n\n# Merge a foreign encrypted pad using its key\nctx pad merge --key /other/.ctx.key foreign.enc\n\n# Preview without writing\nctx pad merge --dry-run pad-a.enc pad-b.md\n

            Each input file is auto-detected as encrypted or plaintext: decryption is attempted first, and on failure the file is parsed as plain text. Entries are deduplicated by exact content, so running merge twice with the same file is safe.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#file-blobs","level":2,"title":"File Blobs","text":"

            The scratchpad can store small files (up to 64 KB) as blob entries. Files are base64-encoded and stored with a human-readable label.

            # Ingest a file: first argument is the label\nctx pad add \"deploy config\" --file ./deploy.yaml\n\n# Listing shows label with a [BLOB] marker\nctx pad\n#   1. check DNS propagation after deploy\n#   2. deploy config [BLOB]\n\n# Extract to a file\nctx pad show 2 --out ./recovered.yaml\n\n# Or print decoded content to stdout\nctx pad show 2\n

            Blob entries are encrypted identically to text entries. The internal format is label:::base64data: You never need to construct this manually.

            Constraint Value Max file size (pre-encoding) 64 KB Storage format label:::base64(content) Display label [BLOB] in listings

            When Should You Use Blobs

            Blobs are for small files you want encrypted and portable: config snippets, key fragments, deployment manifests, test fixtures. For anything larger than 64 KB, use the filesystem directly.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#using-with-ai","level":2,"title":"Using with AI","text":"

            Use Natural Language

            As in many ctx features, the ctx scratchpad can also be used with natural langauge. You don't have to memorize the CLI commands.

            CLI gives you \"precision\", whereas natural language gives you flow.

            The /ctx-pad skill maps natural language to ctx pad commands. You don't need to remember the syntax:

            You say What happens \"jot down: check DNS after deploy\" ctx pad add \"check DNS after deploy\" \"show my scratchpad\" ctx pad \"delete the third entry\" ctx pad rm 3 \"update entry 2 to include the new endpoint\" ctx pad edit 2 \"...\" \"move entry 4 to the top\" ctx pad mv 4 1 \"import my notes from notes.txt\" ctx pad import notes.txt \"export all blobs to ./backup\" ctx pad export ./backup \"merge the scratchpad from the worktree\" ctx pad merge worktree/.context/scratchpad.enc

            The skill handles the translation. You describe what you want in plain English; the agent picks the right command.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#worktrees","level":2,"title":"Worktrees","text":"

            The encryption key lives at ~/.ctx/.ctx.key (outside the project directory). Because all worktrees on the same machine share this path, ctx pad works in worktrees automatically - no special setup needed.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#key-distribution","level":2,"title":"Key Distribution","text":"

            The encryption key (~/.ctx/.ctx.key) stays on the machine where it was generated. ctx never transmits it.

            To share the scratchpad across machines:

            1. Copy the key manually: scp, USB drive, password manager.
            2. Push/pull the .enc file via git as usual.
            3. Both machines can now read and write the same scratchpad.

            Never Commit the Key

            The key is .gitignored by default. If you override this, anyone with repo access can decrypt your scratchpad.

            Treat the key like an SSH private key.

            See the Syncing Scratchpad Notes Across Machines recipe for a step-by-step walkthrough.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#plaintext-override","level":2,"title":"Plaintext Override","text":"

            For projects where encryption is unnecessary, disable it in .ctxrc:

            scratchpad_encrypt: false\n

            In plaintext mode:

            • Entries are stored in .context/scratchpad.md instead of .enc.
            • No key is generated or required.
            • All ctx pad commands work identically.
            • The file is human-readable and diffable.

            When Should You Use Plaintext

            Plaintext mode is useful for non-sensitive projects, solo work where encryption adds friction, or when you want scratchpad entries visible in git diff.

            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#when-should-you-use-scratchpad-versus-context-files","level":2,"title":"When Should You Use Scratchpad versus Context Files","text":"Use case Where it goes Temporary reminders (\"check X after deploy\") Scratchpad Working values during debugging Scratchpad Sensitive tokens or API keys (short-term) Scratchpad Quick notes that don't fit anywhere else Scratchpad Items that are not directly relevant to the project Scratchpad Things that you want to keep near, but also hidden Scratchpad Work items with completion tracking TASKS.md Trade-offs with rationale DECISIONS.md Reusable lessons with context/lesson/application LEARNINGS.md Codified patterns and standards CONVENTIONS.md

            Rule of thumb:

            • If it needs structure or will be referenced months later, use a context file (i.e. DECISIONS.md, LEARNINGS.md, TASKS.md).
            • If it is working memory for the current session or week, use the scratchpad.
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/scratchpad/#see-also","level":2,"title":"See Also","text":"
            • Syncing Scratchpad Notes Across Machines: Key distribution, push/pull workflow, merge conflict resolution
            • Using the Scratchpad: Natural language examples, blob workflow, when to use scratchpad vs context files
            • Context Files: Format and conventions for all .context/ files
            • Security: Trust model and permission hygiene
            ","path":["Reference","Scratchpad"],"tags":[]},{"location":"reference/session-journal/","level":1,"title":"Session Journal","text":"

            Important Security Note

            Session journals contain sensitive data such as file contents, commands, API keys, internal discussions, error messages with stack traces, and more.

            The .context/journal-site/ and .context/journal-obsidian/ directories MUST be .gitignored.

            • DO NOT host your journal publicly.
            • DO NOT commit your journal files to version control.
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#browse-your-session-history","level":2,"title":"Browse Your Session History","text":"

            ctx's Session Journal turns your AI coding sessions into a browsable, searchable, and editable archive.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#quick-start","level":2,"title":"Quick Start","text":"

            After using ctx for a couple of sessions, you can generate a journal site with:

            # Import all sessions to markdown\nctx journal import --all\n\n# Generate and serve the journal site\nctx journal site --serve\n

            Then open http://localhost:8000 to browse your sessions.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#what-you-get","level":2,"title":"What You Get","text":"

            The Session Journal gives you:

            • Browsable history: Navigate through all your AI sessions by date
            • Full conversations: See every message, tool use, and result
            • Token usage: Track how many tokens each session consumed
            • Search: Find sessions by content, project, or date
            • Dark mode: Easy on the eyes for late-night archaeology

            Each session page includes the following sections:

            Section Content Metadata Date, time, duration, model, project, git branch Summary Space for your notes (editable) Tool Usage Which tools were used and how often Conversation Full transcript with timestamps","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-workflow","level":2,"title":"The Workflow","text":"","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#1-import-sessions","level":3,"title":"1. Import Sessions","text":"
            # Import all sessions from current project (only new files)\nctx journal import --all\n\n# Import sessions from all projects\nctx journal import --all --all-projects\n\n# Import a specific session by ID (always writes)\nctx journal import abc123\n\n# Preview what would be imported\nctx journal import --all --dry-run\n\n# Re-import existing (regenerates conversation, preserves YAML frontmatter)\nctx journal import --all --regenerate\n\n# Discard frontmatter during regeneration\nctx journal import --all --regenerate --keep-frontmatter=false -y\n

            Imported sessions go to .context/journal/ as editable Markdown files.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#2-generate-the-site","level":3,"title":"2. Generate the Site","text":"
            # Generate site structure\nctx journal site\n\n# Generate and build static HTML\nctx journal site --build\n\n# Generate and serve locally\nctx journal site --serve\n\n# Custom output directory\nctx journal site --output ~/my-journal\n

            The site is generated in .context/journal-site/ by default.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#3-browse-and-search","level":3,"title":"3. Browse and Search","text":"

            Open http://localhost:8000 after running --serve.

            • Use the sidebar to navigate by date
            • Use search (/ key) to find specific content
            • Click any session to see the full conversation
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#editing-sessions","level":2,"title":"Editing Sessions","text":"

            Imported sessions are plain Markdown in .context/journal/. You can:

            • Add summaries: Fill in the ## Summary section
            • Add notes: Insert your own commentary anywhere
            • Highlight key moments: Use Markdown formatting
            • Delete noise: Remove irrelevant tool outputs

            After editing, regenerate the site:

            ctx journal site --serve\n
            Safe by Default

            Running ctx journal import --all only imports new sessions. Existing files are skipped entirely (your edits and enrichments are never touched).

            Use --regenerate to re-import existing files. Conversation content is regenerated, but YAML frontmatter (topics, type, outcome, etc.) is preserved. You'll be prompted before any existing files are overwritten; add -y to skip the prompt.

            Use --keep-frontmatter=false to discard enriched frontmatter during regeneration.

            Locked entries (via ctx journal lock) are always skipped, regardless of flags. If you prefer to add locked: true to frontmatter during enrichment, run ctx journal sync to propagate the lock state to .state.json.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#large-sessions","level":2,"title":"Large Sessions","text":"

            Sessions with many messages (200+) are automatically split into multiple parts for better browser performance. Navigation links connect the parts:

            session-abc123.md      (Part 1 of 3)\nsession-abc123-p2.md   (Part 2 of 3)\nsession-abc123-p3.md   (Part 3 of 3)\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#suggestion-sessions","level":2,"title":"Suggestion Sessions","text":"

            Claude Code generates \"suggestion\" sessions for auto-complete prompts. These are separated in the index under a \"Suggestions\" section to keep your main session list focused.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enriching-journal-entries","level":2,"title":"Enriching Journal Entries","text":"

            Raw imported sessions contain basic metadata (date, time, project) but lack the structured information needed for effective search, filtering, and analysis. Journal enrichment adds semantic metadata that transforms a flat archive into a searchable knowledge base.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#why-enrich","level":3,"title":"Why Enrich?","text":"

            Without enrichment, you have timestamps and raw conversations. With enrichment:

            • Find sessions by topic: \"Show me all auth-related sessions\"
            • Filter by outcome: \"What did I abandon vs complete?\"
            • Track technology usage: \"When did I last work with PostgreSQL?\"
            • Identify key files: Jump directly to the files discussed
            • Get summaries: Understand what happened without reading transcripts
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#the-frontmatter-schema","level":3,"title":"The Frontmatter Schema","text":"

            Enriched entries begin with YAML frontmatter:

            ---\ntitle: \"Implement caching layer\"\ndate: 2026-01-27\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - performance\ntechnologies:\n  - go\n  - redis\nlibraries:\n  - go-redis/redis\nkey_files:\n  - internal/cache/redis.go\n  - internal/cache/memory.go\n---\n
            Field Required Description title Yes Descriptive title (not the session slug) date Yes Session date (YYYY-MM-DD) type Yes Session type (see below) outcome Yes How the session ended (see below) topics No Subject areas discussed technologies No Languages, databases, frameworks libraries No Specific packages or libraries used key_files No Important files created or modified

            Type values:

            Type When to use feature Building new functionality bugfix Fixing broken behavior refactor Restructuring without behavior change exploration Research, learning, experimentation debugging Investigating issues documentation Writing docs, comments, README

            Outcome values:

            Outcome Meaning completed Goal achieved partial Some progress, work continues abandoned Stopped pursuing this approach blocked Waiting on external dependency","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-ctx-journal-enrich","level":3,"title":"Using /ctx-journal-enrich","text":"

            The /ctx-journal-enrich skill automates enrichment by analyzing conversation content and proposing metadata.

            Invoke by session identifier:

            /ctx-journal-enrich twinkly-stirring-kettle\n/ctx-journal-enrich twinkly\n/ctx-journal-enrich 2026-01-24\n/ctx-journal-enrich 76fe2ab9\n

            The skill will:

            1. Check if locked - locked entries are skipped (same as export);
            2. Find the matching journal file;
            3. Read and analyze the conversation;
            4. Propose frontmatter (type, topics, outcome, technologies);
            5. Generate a 2-3 sentence summary;
            6. Extract decisions, learnings, and tasks mentioned;
            7. Show a diff and ask for confirmation before writing.
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#before-and-after","level":3,"title":"Before and After","text":"

            Before enrichment:

            # twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\n[Add your summary of this session]\n\n## Conversation\n...\n

            After enrichment:

            ---\ntitle: \"Add Redis caching to API endpoints\"\ndate: 2026-01-24\ntype: feature\noutcome: completed\ntopics:\n  - caching\n  - api-performance\ntechnologies:\n  - go\n  - redis\nkey_files:\n  - internal/api/middleware/cache.go\n  - internal/cache/redis.go\n---\n\n# twinkly-stirring-kettle\n\n**ID**: abc123-def456\n**Date**: 2026-01-24\n**Time**: 14:30:00\n...\n\n## Summary\n\nImplemented Redis-based caching middleware for frequently accessed API endpoints.\nAdded cache invalidation on writes and configurable TTL per route. Reduced\n the average response time from 200ms to 15ms for cached routes.\n\n## Decisions\n\n* Used Redis over in-memory cache for horizontal scaling\n* Chose per-route TTL configuration over global setting\n\n## Learnings\n\n* Redis WATCH command prevents race conditions during cache invalidation\n\n## Conversation\n...\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#enrichment-and-site-generation","level":3,"title":"Enrichment and Site Generation","text":"

            The journal site generator uses enriched metadata for better organization:

            • Titles appear in navigation instead of slugs
            • Summaries provide context in the index
            • Topics enable filtering (when using search)
            • Types allow grouping by work category

            Future improvements will add topic-based navigation and outcome filtering to the generated site.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#batch-enrichment","level":3,"title":"Batch Enrichment","text":"

            To enrich multiple sessions, process them one at a time:

            # List unenriched sessions (those without frontmatter)\ngrep -L \"^---$\" .context/journal/*.md | head -10\n

            Then run /ctx-journal-enrich on each. Enrichment is intentionally interactive to ensure accuracy.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#obsidian-vault-export","level":2,"title":"Obsidian Vault Export","text":"

            If you use Obsidian for knowledge management, you can export your journal as an Obsidian vault instead of (or alongside) the static site:

            ctx journal obsidian\n

            This generates a vault in .context/journal-obsidian/ with:

            • Wikilinks ([[target|display]]) instead of Markdown links
            • MOC pages (Map of Content) for topics, key files, and session types
            • Related sessions footer per entry: links to entries sharing the same topics
            • Transformed frontmatter: topics renamed to tags (Obsidian-recognized), aliases added from title for search
            • Graph-optimized structure: MOC hubs and cross-linked entries create dense graph connectivity

            To use: open the output directory in Obsidian (\"Open folder as vault\").

            # Custom output directory\nctx journal obsidian --output ~/vaults/ctx-journal\n

            Static Site vs Obsidian Vault

            Use ctx journal site when you want a web-browsable archive with search and dark mode. Use ctx journal obsidian when you want graph view, backlinks, and tag-based navigation inside Obsidian. Both use the same enriched source entries: you can generate both.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#full-pipeline","level":2,"title":"Full Pipeline","text":"

            The complete journal workflow has four stages. Each is idempotent: safe to re-run, and stages skip already-processed entries.

            import → enrich → rebuild\n
            Stage Command / Skill What it does Skips if Import ctx journal import --all Converts session JSONL to Markdown File already exists (safe default) Enrich /ctx-journal-enrich Adds frontmatter, summaries, topics Frontmatter already present Rebuild ctx journal site --build Generates static HTML site (never) Obsidian ctx journal obsidian Generates Obsidian vault with wikilinks (never)

            One-Command Pipeline

            /ctx-journal-enrich-all handles import automatically - it detects unimported sessions and imports them before enriching. You only need to run ctx journal site --build afterward.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#using-make-journal","level":3,"title":"Using make journal","text":"

            If your project includes Makefile.ctx (deployed by ctx init), the first and last stages are combined:

            make journal           # import + rebuild\n

            After it runs, it reminds you to enrich in Claude Code:

            Next steps (in Claude Code):\n  /ctx-journal-enrich-all # imports if needed + adds metadata per entry\n\nThen re-run: make journal\n

            Rendering Issues?

            If individual entries have rendering problems (broken fences, malformed lists), check the programmatic normalization in the import pipeline. Most cases are handled automatically during ctx journal import.

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#tips","level":2,"title":"Tips","text":"

            Daily workflow:

            # Import, browse, then enrich in Claude Code\nmake journal && make journal-serve\n# Then in Claude Code: /ctx-journal-enrich <session>\n

            After a productive session:

            # Import just that session and add notes\nctx journal import <session-id>\n# Edit .context/journal/<session>.md\n# Regenerate: ctx journal site\n

            Searching across all sessions:

            # Use grep on the journal directory\ngrep -r \"authentication\" .context/journal/\n

            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#requirements","level":2,"title":"Requirements","text":"Use pipx for zensical

            pip install zensical may install a non-functional stub on system Python. Using venv has other issues too.

            These issues especially happen on Mac OSX.

            Use pipx install zensical, which creates an isolated environment and handles Python version management automatically.

            The journal site uses zensical for static site generation:

            pipx install zensical\n
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/session-journal/#see-also","level":2,"title":"See Also","text":"
            • ctx journal: Session discovery and listing
            • ctx journal site: Static site generation
            • ctx journal obsidian: Obsidian vault export
            • Context Files: The .context/ directory structure
            ","path":["Reference","Session Journal"],"tags":[]},{"location":"reference/skills/","level":1,"title":"Skills","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skills","level":2,"title":"Skills","text":"

            Skills are slash commands that run inside your AI assistant (e.g., /ctx-next), as opposed to CLI commands that run in your terminal (e.g., ctx status).

            Skills give your agent structured workflows: It knows what to read, what to run, and when to ask. Most wrap one or more ctx CLI commands with opinionated behavior on top.

            Skills Are Best Used Conversationally

            The beauty of ctx is that it's designed to be intuitive and conversational, allowing you to interact with your AI assistant naturally. That's why you don't have to memorize many of these skills.

            See the Prompting Guide for natural-language triggers that invoke these skills conversationally.

            However, when you need a more precise control, you have the option to invoke the relevant skills directly.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#all-skills","level":2,"title":"All Skills","text":"Skill Description Type /ctx-remember Recall project context and present structured readback user-invocable /ctx-wrap-up End-of-session context persistence ceremony user-invocable /ctx-status Show context summary with interpretation user-invocable /ctx-agent Load full context packet for AI consumption user-invocable /ctx-next Suggest 1-3 concrete next actions with rationale user-invocable /ctx-commit Commit with integrated context persistence user-invocable /ctx-reflect Pause and reflect on session progress user-invocable /ctx-task-add Add actionable task to TASKS.md user-invocable /ctx-decision-add Record architectural decision with rationale user-invocable /ctx-learning-add Record gotchas and lessons learned user-invocable /ctx-convention-add Record coding convention for consistency user-invocable /ctx-archive Archive completed tasks from TASKS.md user-invocable /ctx-pad Manage encrypted scratchpad entries user-invocable /ctx-history Browse and import AI session history user-invocable /ctx-journal-enrich Enrich single journal entry with metadata user-invocable /ctx-journal-enrich-all Full journal pipeline: export if needed, then batch-enrich user-invocable /ctx-blog Generate blog post draft from project activity user-invocable /ctx-blog-changelog Generate themed blog post from a commit range user-invocable /ctx-consolidate Consolidate redundant learnings or decisions user-invocable /ctx-drift Detect and fix context drift user-invocable /ctx-prompt Apply, list, and manage saved prompt templates user-invocable /ctx-prompt-audit Analyze prompting patterns for improvement user-invocable /ctx-link-check Audit docs for dead internal and external links user-invocable /ctx-permission-sanitize Audit Claude Code permissions for security risks user-invocable /ctx-brainstorm Structured design dialogue before implementation user-invocable /ctx-spec Scaffold a feature spec from a project template user-invocable /ctx-plan-import Import Claude Code plan files into project specs user-invocable /ctx-implement Execute a plan step-by-step with verification user-invocable /ctx-loop Generate autonomous loop script user-invocable /ctx-worktree Manage git worktrees for parallel agents user-invocable /ctx-architecture Build and maintain architecture maps user-invocable /ctx-architecture-failure-analysis Adversarial failure analysis for correctness bugs user-invocable /ctx-remind Manage session-scoped reminders user-invocable /ctx-doctor Troubleshoot ctx behavior with health checks and event analysis user-invocable /ctx-skill-audit Audit skills against Anthropic prompting best practices user-invocable /ctx-skill-create Create, improve, and test skills user-invocable /ctx-pause Pause context hooks for this session user-invocable /ctx-resume Resume context hooks after a pause user-invocable","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-lifecycle","level":2,"title":"Session Lifecycle","text":"

            Skills for starting, running, and ending a productive session.

            Session Ceremonies

            Two skills in this group are ceremony skills: /ctx-remember (session start) and /ctx-wrap-up (session end). Unlike other skills that work conversationally, these should be invoked as explicit slash commands for completeness. See Session Ceremonies.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remember","level":3,"title":"/ctx-remember","text":"

            Recall project context and present a structured readback. Ceremony skill: invoke explicitly at session start.

            Wraps: ctx agent --budget 4000, ctx journal source --limit 3, reads TASKS.md, DECISIONS.md, LEARNINGS.md

            See also: Session Ceremonies, The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-status","level":3,"title":"/ctx-status","text":"

            Show context summary (files, token budget, tasks, recent activity) with interpreted suggestions.

            Wraps: ctx status [--verbose] [--json]

            See also: The Complete Session, ctx status CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-agent","level":3,"title":"/ctx-agent","text":"

            Load the full context packet optimized for AI consumption. Also runs automatically via the PreToolUse hook with cooldown.

            Wraps: ctx agent [--budget] [--format] [--cooldown] [--session]

            See also: The Complete Session, ctx agent CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-next","level":3,"title":"/ctx-next","text":"

            Suggest 1-3 concrete next actions ranked by priority, momentum, and unblocked status.

            Wraps: reads TASKS.md, ctx journal source --limit 3

            See also: The Complete Session, Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-commit","level":3,"title":"/ctx-commit","text":"

            Commit code with integrated context persistence: pre-commit checks, staged files, Co-Authored-By trailer, and a post-commit prompt to capture decisions and learnings.

            Wraps: git add, git commit, optionally chains to /ctx-decision-add and /ctx-learning-add

            See also: The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-reflect","level":3,"title":"/ctx-reflect","text":"

            Pause and reflect on session progress. Walks through a checklist of learnings, decisions, task completions, and session notes to persist.

            Wraps: chains to ctx add learning, ctx add decision, manual TASKS.md updates

            See also: The Complete Session, Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-wrap-up","level":3,"title":"/ctx-wrap-up","text":"

            End-of-session context persistence ceremony. Gathers signal from git diff, recent commits, and conversation themes. Proposes candidates (learnings, decisions, conventions, tasks) with complete structured fields for user approval, then persists via ctx add. Offers /ctx-commit if uncommitted changes remain. Ceremony skill: invoke explicitly at session end.

            Wraps: git diff --stat, git log, ctx add learning, ctx add decision, ctx add convention, ctx add task, chains to /ctx-commit

            See also: Session Ceremonies, The Complete Session

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#context-persistence","level":2,"title":"Context Persistence","text":"

            Skills for recording work artifacts: tasks, decisions, learnings, conventions: into .context/ files.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-task-add","level":3,"title":"/ctx-task-add","text":"

            Add an actionable task with optional priority and phase section.

            Wraps: ctx add task \"description\" [--priority high|medium|low] --session-id ID --branch BR --commit HASH

            See also: Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-decision-add","level":3,"title":"/ctx-decision-add","text":"

            Record an architectural decision with context, rationale, and consequence. Supports Y-statement (lightweight) and full ADR formats.

            Wraps: ctx add decision \"title\" --context \"...\" --rationale \"...\" --consequence \"...\" --session-id ID --branch BR --commit HASH

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-learning-add","level":3,"title":"/ctx-learning-add","text":"

            Record a project-specific gotcha, bug, or unexpected behavior. Filters for insights that are searchable, project-specific, and required real effort to discover.

            Wraps: ctx add learning \"title\" --context \"...\" --lesson \"...\" --application \"...\" --session-id ID --branch BR --commit HASH

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-convention-add","level":3,"title":"/ctx-convention-add","text":"

            Record a coding convention that should be standardized across sessions. Targets patterns seen 2-3+ times.

            Wraps: ctx add convention \"rule\" --section \"Name\"

            See also: Persisting Decisions, Learnings, and Conventions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-archive","level":3,"title":"/ctx-archive","text":"

            Archive completed tasks from TASKS.md to a timestamped file in .context/archive/. Preserves phase headers for traceability.

            Wraps: ctx task archive [--dry-run]

            See also: Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#scratchpad","level":2,"title":"Scratchpad","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pad","level":3,"title":"/ctx-pad","text":"

            Manage the encrypted scratchpad: add, remove, edit, and reorder one-liner notes. Encrypted at rest with AES-256-GCM.

            Wraps: ctx pad, ctx pad add, ctx pad rm, ctx pad edit, ctx pad mv, ctx pad import, ctx pad export, ctx pad merge

            See also: Scratchpad, Using the Scratchpad

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#journal-history","level":2,"title":"Journal & History","text":"

            Skills for browsing, exporting, and enriching your AI session history into a structured journal.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-history","level":3,"title":"/ctx-history","text":"

            Browse, inspect, and import AI session history. List recent sessions, show details by slug or ID, and import to .context/journal/.

            Wraps: ctx journal source, ctx journal source --show, ctx journal import

            See also: Browsing and Enriching Past Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich","level":3,"title":"/ctx-journal-enrich","text":"

            Enrich a single journal entry with YAML frontmatter: title, type, outcome, topics, technologies, and summary. Shows diff before writing.

            Wraps: reads and edits .context/journal/*.md files

            See also: Browsing and Enriching Past Sessions, Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-journal-enrich-all","level":3,"title":"/ctx-journal-enrich-all","text":"

            Full journal pipeline: imports unimported sessions first, then batch-enriches all unenriched entries. Filters out short sessions and continuations. Can spawn subagents for large backlogs.

            Wraps: ctx journal import --all + iterates /ctx-journal-enrich

            See also: Browsing and Enriching Past Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#content-creation","level":2,"title":"Content Creation","text":"

            Skills for turning project activity into publishable content.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog","level":3,"title":"/ctx-blog","text":"

            Generate a blog post draft from recent project activity: git history, decisions, learnings, tasks, and journal entries. Requires a narrative arc (problem, approach, outcome).

            Wraps: reads git log, DECISIONS.md, LEARNINGS.md, TASKS.md, journal entries; writes to docs/blog/

            See also: Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-blog-changelog","level":3,"title":"/ctx-blog-changelog","text":"

            Generate a themed blog post from a commit range. Takes a starting commit and unifying theme, analyzes diffs and journal entries from that period.

            Wraps: git log, git diff --stat; writes to docs/blog/

            See also: Turning Activity into Content

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#auditing-health","level":2,"title":"Auditing & Health","text":"

            Skills for detecting drift, auditing alignment, and improving prompt quality.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-consolidate","level":3,"title":"/ctx-consolidate","text":"

            Consolidate redundant entries in LEARNINGS.md or DECISIONS.md. Groups overlapping entries by keyword similarity, presents candidates, and (with user approval) merges groups into denser combined entries. Originals are archived, not deleted.

            Wraps: reads LEARNINGS.md and DECISIONS.md, writes consolidated entries, archives originals, runs ctx reindex

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-drift","level":3,"title":"/ctx-drift","text":"

            Detect and fix context drift: stale paths, missing files, file age staleness, task accumulation, entry count warnings, and constitution violations via ctx drift. Also detects skill drift against canonical templates.

            Wraps: ctx drift [--fix]

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-prompt-audit","level":3,"title":"/ctx-prompt-audit","text":"

            Analyze recent prompting patterns to identify vague or ineffective prompts. Reviews 3-5 journal entries and suggests rewrites with positive observations.

            Wraps: reads .context/journal/ entries

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-doctor","level":3,"title":"/ctx-doctor","text":"

            Troubleshoot ctx behavior. Runs structural health checks via ctx doctor, analyzes event log patterns via ctx hook event, and presents findings with suggested actions. The CLI provides the structural baseline; the agent adds semantic analysis of event patterns and correlations.

            Wraps: ctx doctor --json, ctx hook event --json --last 100, ctx remind list, ctx hook message list, reads .ctxrc

            Trigger phrases: \"diagnose\", \"troubleshoot\", \"doctor\", \"health check\", \"why didn't my hook fire?\", \"hooks seem broken\", \"something seems off\"

            Graceful degradation: If event_log is not enabled, the skill still works but with reduced capability. It runs structural checks and notes: \"Enable event_log: true in .ctxrc for hook-level diagnostics.\"

            See also: Troubleshooting, ctx doctor CLI, ctx hook event CLI

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-link-check","level":3,"title":"/ctx-link-check","text":"

            Scan all markdown files under docs/ for broken links. Three passes: internal links (verify file targets exist on disk), external links (HTTP HEAD with timeout, report failures as warnings), and image references. Resolves relative paths, strips anchors before checking, and skips localhost/example URLs.

            Wraps: Glob + Grep to scan, curl for external checks

            Trigger phrases: \"check links\", \"audit links\", \"any broken links?\", \"dead links\"

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-permission-sanitize","level":3,"title":"/ctx-permission-sanitize","text":"

            Audit .claude/settings.local.json for dangerous permissions across four risk categories: hook bypass (Critical), destructive commands (High), config injection vectors (High), and overly broad patterns (Medium). Reports findings by severity and offers specific fix actions with user confirmation.

            Wraps: reads .claude/settings.local.json, edits with confirmation

            Trigger phrases: \"audit permissions\", \"are my permissions safe?\", \"sanitize permissions\", \"check settings\"

            See also: Claude Code Permission Hygiene

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#planning-execution","level":2,"title":"Planning & Execution","text":"

            Skills for structured design, implementation, and parallel agent workflows.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-brainstorm","level":3,"title":"/ctx-brainstorm","text":"

            Transform raw ideas into clear, validated designs through structured dialogue before any implementation begins. Follows a gated process: understand context, clarify the idea (one question at a time), surface non-functional requirements, lock understanding with user confirmation, explore 2-3 design approaches with trade-offs, stress-test the chosen approach, and present the detailed design.

            Wraps: reads DECISIONS.md, relevant source files; chains to /ctx-decision-add for recording design choices

            Trigger phrases: \"let's brainstorm\", \"design this\", \"think through\", \"before we build\", \"what approach should we take?\"

            See also: /ctx-spec

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-spec","level":3,"title":"/ctx-spec","text":"

            Scaffold a feature spec from the project template and walk through each section with the user. Covers: problem, approach, happy path, edge cases, validation rules, error handling, interface, implementation, configuration, testing, and non-goals. Spends extra time on edge cases and error handling.

            Wraps: reads specs/tpl/spec-template.md, writes to specs/, optionally chains to /ctx-task-add

            Trigger phrases: \"spec this out\", \"write a spec\", \"create a spec\", \"design document\"

            See also: /ctx-brainstorm, /ctx-plan-import

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-plan-import","level":3,"title":"/ctx-plan-import","text":"

            Import Claude Code plan files (~/.claude/plans/*.md) into the project's specs/ directory. Lists plans with dates and H1 titles, supports filtering (--today, --since, --all), slugifies headings for filenames, and optionally creates tasks referencing each imported spec.

            Wraps: reads ~/.claude/plans/*.md, writes to specs/, optionally chains to /ctx-task-add

            See also: Importing Claude Code Plans, Tracking Work Across Sessions

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-implement","level":3,"title":"/ctx-implement","text":"

            Execute a multi-step plan with build and test verification at each step. Loads a plan from a file or conversation context, breaks it into atomic steps, and checkpoints after every 3-5 steps.

            Wraps: reads plan file, runs verification commands (go build, go test, etc.)

            See also: Running an Unattended AI Agent

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-loop","level":3,"title":"/ctx-loop","text":"

            Generate a ready-to-run shell script for autonomous AI iteration. Supports Claude Code, Aider, and generic tool templates with configurable completion signals.

            Wraps: ctx loop [--tool] [--prompt] [--max-iterations] [--completion] [--output]

            See also: Autonomous Loops, Running an Unattended AI Agent

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-worktree","level":3,"title":"/ctx-worktree","text":"

            Manage git worktrees for parallel agent development. Create sibling worktrees on dedicated branches, analyze task blast radius for grouping, and tear down with merge.

            Wraps: git worktree add, git worktree list, git worktree remove, git merge

            See also: Parallel Agent Development with Git Worktrees

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture","level":3,"title":"/ctx-architecture","text":"

            Build and maintain architecture maps incrementally. Creates or refreshes ARCHITECTURE.md (succinct project map, loaded at session start) and DETAILED_DESIGN.md (deep per-module reference, consulted on-demand). Coverage is tracked in map-tracking.json so each run extends the map rather than re-analyzing everything.

            Wraps: ctx status, git log, reads source files; writes ARCHITECTURE.md, DETAILED_DESIGN.md, map-tracking.json

            See also: Detecting and Fixing Drift

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-architecture-failure-analysis","level":3,"title":"/ctx-architecture-failure-analysis","text":"

            Adversarial failure analysis that generates falsifiable incident hypotheses against architecture artifacts. Hunts for correctness bugs that survive code review and tests: race conditions, ordering assumptions, cache staleness, error swallowing, ownership gaps, idempotency failures, state machine drift, and scaling cliffs.

            Requires /ctx-architecture artifacts as input. Reads ARCHITECTURE.md, DETAILED_DESIGN*.md, and map-tracking.json, then systematically applies 9 failure categories to every mutation point. Each finding carries an evidence standard (code path, trigger, failure path, silence reason, code evidence), a confidence level, and an explicit risk score. A mandatory challenge phase attempts to disprove each finding before it is accepted.

            Produces .context/DANGER-ZONES.md with ranked findings split into Critical (risk >= 7, silent/cascading) and Elevated tiers.

            Wraps: reads architecture artifacts, source code; writes DANGER-ZONES.md. Optionally uses GitNexus for blast radius and Gemini Search for cross-referencing known failure patterns.

            Relationship:

            Skill Mode /ctx-architecture Map what exists /ctx-architecture-enrich Improve map fidelity /ctx-architecture-failure-analysis Generate falsifiable incident hypotheses","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-remind","level":3,"title":"/ctx-remind","text":"

            Manage session-scoped reminders via natural language. Translates user intent (\"remind me to refactor swagger\") into the corresponding ctx remind command. Handles date conversion for --after flags.

            Wraps: ctx remind, ctx remind list, ctx remind dismiss

            See also: Session Reminders

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#skill-authoring","level":2,"title":"Skill Authoring","text":"","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-audit","level":3,"title":"/ctx-skill-audit","text":"

            Audit one or more skills against Anthropic prompting best practices. Checks audit dimensions: positive framing, motivation, phantom references, examples, subagent guards, scope, and descriptions. Reports findings by severity with concrete fix suggestions.

            Wraps: reads internal/assets/claude/skills/*/SKILL.md or .claude/skills/*/SKILL.md, references anthropic-best-practices.md

            Trigger phrases: \"audit this skill\", \"check skill quality\", \"review the skills\", \"are our skills any good?\"

            See also: /ctx-skill-create, Contributing

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-skill-create","level":3,"title":"/ctx-skill-create","text":"

            Create, improve, and test skills. Guides the full lifecycle: capture intent, interview for edge cases, draft the SKILL.md, test with realistic prompts, review results with the user, and iterate. Applies core principles: the agent is already smart (only add what it does not know), the description is the trigger (make it specific and \"pushy\"), and explain the why instead of rigid directives.

            Wraps: reads/writes .claude/skills/ and internal/assets/claude/skills/

            Trigger phrases: \"create a skill\", \"turn this into a skill\", \"make a slash command\", \"this should be a skill\", \"improve this skill\", \"the skill isn't triggering\"

            See also: Contributing

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#session-control","level":2,"title":"Session Control","text":"

            Skills for controlling hook behavior during a session.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-pause","level":3,"title":"/ctx-pause","text":"

            Pause all context nudge and reminder hooks for the current session. Security hooks still fire. Use for quick investigations or tasks that don't need ceremony overhead.

            Wraps: ctx hook pause

            Trigger phrases: \"pause ctx\", \"pause context\", \"stop the nudges\", \"quiet mode\"

            See also: Pausing Context Hooks

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#ctx-resume","level":3,"title":"/ctx-resume","text":"

            Resume context hooks after a pause. Restores normal nudge, reminder, and ceremony behavior. Silent no-op if not paused.

            Wraps: ctx hook resume

            Trigger phrases: \"resume ctx\", \"resume context\", \"turn nudges back on\", \"unpause\"

            See also: Pausing Context Hooks

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/skills/#project-specific-skills","level":2,"title":"Project-Specific Skills","text":"

            The ctx plugin ships the skills listed above. Teams can add their own project-specific skills to .claude/skills/ in the project root: These are separate from plugin-shipped skills and are scoped to the project.

            Project-specific skills follow the same format and are invoked the same way.

            Custom skills are not covered in this reference.

            ","path":["Reference","Skills"],"tags":[]},{"location":"reference/versions/","level":1,"title":"Version History","text":"","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#version-history","level":2,"title":"Version History","text":"

            Documentation snapshots for each release.

            Tap the corresponding view docs to view the docs as they were at that release.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#releases","level":2,"title":"Releases","text":"Version Release Date Documentation v0.8.0 2026-03-23 view docs v0.6.0 2026-02-16 view docs v0.3.0 2026-02-07 view docs v0.2.0 2026-02-01 view docs v0.1.2 2026-01-27 view docs v0.1.1 2026-01-26 view docs v0.1.0 2026-01-25 view docs","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v080-the-architecture-release","level":3,"title":"v0.8.0: The Architecture Release","text":"

            MCP server for tool-agnostic AI integration. Memory bridge connecting Claude Code auto-memory to .context/. Complete CLI restructuring into cmd/ + core/ taxonomy. All user-facing strings externalized to YAML. fatih/color removed; two direct dependencies remain.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v060-the-integration-release","level":3,"title":"v0.6.0: The Integration Release","text":"

            Plugin architecture: hooks and skills converted from shell scripts to Go subcommands, shipped as a Claude Code marketplace plugin. Multi-tool hook generation for Cursor, Aider, Copilot, and Windsurf. Webhook notifications with encrypted URL storage.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v030-the-discipline-release","level":3,"title":"v0.3.0: The Discipline Release","text":"

            Journal static site generation via zensical. 49-skill audit and fix pass (positive framing, phantom reference removal, scope tightening). Context consolidation skill. golangci-lint v2 migration.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v020-the-archaeology-release","level":3,"title":"v0.2.0: The Archaeology Release","text":"

            Session journal system: ctx journal import converts Claude Code JSONL transcripts to browsable Markdown. Constants refactor with semantic prefixes (Dir*, File*, Filename*). CRLF handling for Windows compatibility.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v012","level":3,"title":"v0.1.2","text":"

            Default Claude Code permissions deployed on ctx init. Prompting guide published as a standalone documentation page.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v011","level":3,"title":"v0.1.1","text":"

            Bug fixes: hook schema key format corrected, JSON unicode escaping fixed in context file output.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#v010-initial-release","level":3,"title":"v0.1.0: Initial Release","text":"

            CLI with 15 subcommands, 6 context file types (CONSTITUTION, TASKS, CONVENTIONS, ARCHITECTURE, DECISIONS, LEARNINGS), Makefile build system, and Claude Code hook integration.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#latest","level":2,"title":"Latest","text":"

            The main documentation always reflects the latest development version.

            For the most recent stable release, see v0.8.0.

            ","path":["Reference","Version History"],"tags":[]},{"location":"reference/versions/#changelog","level":2,"title":"Changelog","text":"

            For detailed changes between versions, see the GitHub Releases page.

            ","path":["Reference","Version History"],"tags":[]},{"location":"security/","level":1,"title":"Security","text":"

            Security model, agent hardening, and vulnerability reporting.

            ","path":["Security"],"tags":[]},{"location":"security/#security-design","level":3,"title":"Security Design","text":"

            Trust model, what ctx does for security, permission hygiene, state file management, and the log-first audit trail principle. Read first to understand the security boundaries.

            ","path":["Security"],"tags":[]},{"location":"security/#securing-ai-agents","level":3,"title":"Securing AI Agents","text":"

            Defense in depth for unattended AI agents: five layers of protection, each with a known bypass, strength in combination.

            ","path":["Security"],"tags":[]},{"location":"security/#reporting-vulnerabilities","level":3,"title":"Reporting Vulnerabilities","text":"

            How to report a security issue: email, GitHub private reporting, PGP-encrypted submissions, what to include, and the response timeline.

            ","path":["Security"],"tags":[]},{"location":"security/agent-security/","level":1,"title":"Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#defense-in-depth-securing-ai-agents","level":1,"title":"Defense in Depth: Securing AI Agents","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-problem","level":2,"title":"The Problem","text":"

            An unattended AI agent with unrestricted access to your machine is an unattended shell with unrestricted access to your machine.

            This is not a theoretical concern. AI coding agents execute shell commands, write files, make network requests, and modify project configuration. When running autonomously (overnight, in a loop, without a human watching), the attack surface is the full capability set of the operating system user account.

            The risk is not that the AI is malicious. The risk is that the AI is controllable: it follows instructions from context, and context can be poisoned.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#threat-model","level":2,"title":"Threat Model","text":"","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#how-agents-get-compromised","level":3,"title":"How Agents Get Compromised","text":"

            AI agents follow instructions from multiple sources: system prompts, project files, conversation history, and tool outputs. An attacker who can inject content into any of these sources can redirect the agent's behavior.

            Vector How it works Prompt injection via dependencies A malicious package includes instructions in its README, changelog, or error output. The agent reads these during installation or debugging and follows them. Prompt injection via fetched content The agent fetches a URL (documentation, API response, Stack Overflow answer) containing embedded instructions. Poisoned project files A contributor adds adversarial instructions to CLAUDE.md, .cursorrules, or .context/ files. The agent loads these at session start. Self-modification between iterations In an autonomous loop, the agent modifies its own configuration files. The next iteration loads the modified config with no human review. Tool output injection A command's output (error messages, log lines, file contents) contains instructions the agent interprets and follows.","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#what-can-a-compromised-agent-do","level":3,"title":"What Can a Compromised Agent Do","text":"

            Depends entirely on what permissions and access the agent has:

            Access level Potential impact Unrestricted shell Execute any command, install software, modify system files Network access Exfiltrate source code, credentials, or context files to external servers Docker socket Escape container isolation by spawning privileged sibling containers SSH keys Pivot to other machines, push to remote repositories, access production systems Write access to own config Disable its own guardrails for the next iteration","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#the-defense-layers","level":2,"title":"The Defense Layers","text":"

            No single layer is sufficient. Each layer catches what the others miss.

            Layer 1: Soft instructions     (CONSTITUTION.md, playbook)\nLayer 2: Application controls  (permission allowlist, tool restrictions)\nLayer 3: OS-level isolation    (user accounts, filesystem, containers)\nLayer 4: Network controls      (firewall rules, airgap)\nLayer 5: Infrastructure        (VM isolation, resource limits)\n
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-1-soft-instructions-probabilistic","level":3,"title":"Layer 1: Soft Instructions (Probabilistic)","text":"

            Markdown files like CONSTITUTION.md and the Agent Playbook tell the agent what to do and what not to do. These are probabilistic: the agent usually follows them, but there is no enforcement mechanism.

            What it catches: Most common mistakes. An agent that has been told \"never delete production data\" will usually not delete production data.

            What it misses: Prompt injection. A sufficiently crafted injection can override soft instructions. Long context windows dilute attention on rules stated early. Edge cases where instructions are ambiguous.

            Verdict: Necessary but not sufficient. Good for the common case. Do not rely on it for security boundaries.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-2-application-controls-deterministic-at-runtime-mutable-across-iterations","level":3,"title":"Layer 2: Application Controls (Deterministic at Runtime, Mutable across Iterations)","text":"

            AI tool runtimes (Claude Code, Cursor, etc.) provide permission systems: tool allowlists, command restrictions, confirmation prompts.

            For Claude Code, ctx init writes both an allowlist and an explicit deny list into .claude/settings.local.json. The golden images live in internal/assets/permissions/:

            Allowlist (allow.txt): only these tools run without confirmation:

            Bash(ctx:*)\nSkill(ctx-convention-add)\nSkill(ctx-decision-add)\n... # all bundled ctx-* skills\n

            Deny list (deny.txt): these are blocked even if the agent requests them:

            # Dangerous operations\nBash(sudo *)\nBash(git push *)\nBash(git push)\nBash(rm -rf /*)\nBash(rm -rf ~*)\nBash(curl *)\nBash(wget *)\nBash(chmod 777 *)\n\n# Sensitive file reads\nRead(**/.env)\nRead(**/.env.*)\nRead(**/*credentials*)\nRead(**/*secret*)\nRead(**/*.pem)\nRead(**/*.key)\n\n# Sensitive file edits\nEdit(**/.env)\nEdit(**/.env.*)\n

            What it catches: The agent cannot run commands outside the allowlist, and the deny list blocks dangerous operations even if a future allowlist change were to widen access. If rm, curl, sudo, or docker are not allowed and sudo/curl/wget are explicitly denied, the agent cannot invoke them regardless of what any prompt says.

            What it misses: The agent can modify the allowlist itself. In an autonomous loop, if the agent writes to .claude/settings.local.json, and the next iteration loads the modified config, then the protection is effectively lost. The application enforces the rules, but the application reads the rules from files the agent can write.

            Verdict: Strong first layer. Must be combined with self-modification prevention (Layer 3).

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-3-os-level-isolation-deterministic-and-unbypassable","level":3,"title":"Layer 3: OS-Level Isolation (Deterministic and Unbypassable)","text":"

            The operating system enforces access controls that no application-level trick can override. An unprivileged user cannot read files owned by root. A process without CAP_NET_RAW cannot open raw sockets. These are kernel boundaries.

            Control Purpose Dedicated user account No sudo, no privileged group membership (docker, wheel, adm). The agent cannot escalate privileges. Filesystem permissions Project directory writable; everything else read-only or inaccessible. Agent cannot reach other projects, home directories, or system config. Immutable config files CLAUDE.md, .claude/settings.local.json, and .context/CONSTITUTION.md owned by a different user or marked immutable (chattr +i on Linux). The agent cannot modify its own guardrails.

            What it catches: Privilege escalation, self-modification, lateral movement to other projects or users.

            What it misses: Actions within the agent's legitimate scope. If the agent has write access to source code (which it needs to do its job), it can introduce vulnerabilities in the code itself.

            Verdict: Essential. This is the layer that makes the other layers trustworthy.

            OS-level isolation does not make the agent safe; it makes the other layers meaningful.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-4-network-controls","level":3,"title":"Layer 4: Network Controls","text":"

            An agent that cannot reach the internet cannot exfiltrate data. It also cannot ingest new instructions mid-loop from external documents, API responses, or hostile content.

            Scenario Recommended control Agent does not need the internet --network=none (container) or outbound firewall drop-all Agent needs to fetch dependencies Allow specific registries (npmjs.com, proxy.golang.org, pypi.org) via firewall rules. Block everything else. Agent needs API access Allow specific API endpoints only. Use an HTTP proxy with allowlisting.

            What it catches: Data exfiltration, phone-home payloads, downloading additional tools, and instruction injection via fetched content.

            What it misses: Nothing, if the agent genuinely does not need the network. The tradeoff is that many real workloads need dependency resolution, so a full airgap requires pre-populated caches.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#layer-5-infrastructure-isolation","level":3,"title":"Layer 5: Infrastructure Isolation","text":"

            The strongest boundary is a separate machine (or something that behaves like one).

            The moment you stop arguing about prompts and start arguing about kernels, you are finally doing security.

            Containers (Docker, Podman):

            docker run --rm \\\n  --network=none \\\n  --cap-drop=ALL \\\n  --memory=4g \\\n  --cpus=2 \\\n  -v /path/to/project:/workspace \\\n  -w /workspace \\\n  your-dev-image \\\n  ./loop.sh\n

            Docker Socket Is Sudo Access

            Critical: never mount the Docker socket (/var/run/docker.sock).

            An agent with socket access can spawn sibling containers with full host access, effectively escaping the sandbox.

            Use rootless Docker or Podman to eliminate this escalation path.

            Virtual machines: The strongest isolation. The guest kernel has no visibility into the host OS. No shared folders, no filesystem passthrough, no SSH keys to other machines.

            Resource limits: CPU, memory, and disk quotas prevent a runaway agent from consuming all resources. Use ulimit, cgroup limits, or container resource constraints.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#putting-it-all-together","level":2,"title":"Putting It All Together","text":"

            A defense-in-depth setup for overnight autonomous runs:

            Layer Implementation Stops Soft instructions CONSTITUTION.md with \"never delete tests\", \"always run tests before committing\" Common mistakes (probabilistic) Application allowlist .claude/settings.local.json with explicit tool permissions Unauthorized commands (deterministic within runtime) Immutable config chattr +i on CLAUDE.md, .claude/, CONSTITUTION.md Self-modification between iterations Unprivileged user Dedicated user, no sudo, no docker group Privilege escalation Container --cap-drop=ALL --network=none, rootless, no socket mount Host escape, network exfiltration Resource limits --memory=4g --cpus=2, disk quotas Resource exhaustion

            Each layer is straightforward: The strength is in the combination.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#common-mistakes","level":2,"title":"Common Mistakes","text":"

            \"I'll just use --dangerously-skip-permissions\": This disables Layer 2 entirely. Without Layers 3-5, you have no protection at all. Only use this flag inside a properly isolated container or VM.

            \"The agent is sandboxed in Docker\": A Docker container with the Docker socket mounted, running as root, with --privileged, and full network access is not sandboxed. It is a root shell with extra steps.

            \"CONSTITUTION.md says not to do that\": Markdown is a suggestion. It works most of the time. It is not a security boundary. Do not use it as one.

            \"I reviewed the CLAUDE.md, it's fine\": The agent can modify CLAUDE.md during iteration N. Iteration N+1 loads the modified version. Unless the file is immutable, your review is stale.

            \"The agent only has access to this one project\": Does the project directory contain .env files, SSH keys, API tokens, or credentials? Does it have a .git/config with push access to a remote? Filesystem isolation means isolating what is in the directory too.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-security-considerations","level":2,"title":"Team Security Considerations","text":"

            When multiple developers share a .context/ directory, security considerations extend beyond single-agent hardening.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#code-review-for-context-files","level":3,"title":"Code Review for Context Files","text":"

            Treat .context/ changes like code changes. Context files influence agent behavior (a modified CONSTITUTION.md or CONVENTIONS.md changes what every agent on the team will do next session). Review them in PRs with the same scrutiny you apply to production code.

            Watch for:

            • Weakened constitutional rules (removed constraints, softened language)
            • New decisions that contradict existing ones without acknowledging it
            • Learnings that encode incorrect assumptions
            • Task additions that bypass the team's prioritization process
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#gitignore-patterns","level":3,"title":"Gitignore Patterns","text":"

            ctx init configures .gitignore automatically, but verify these patterns are in place:

            • Always gitignored: .ctx.key (encryption key), .context/logs/, .context/journal/
            • Team decision: scratchpad.enc (encrypted, safe to commit for shared scratchpad state); .gitignore if scratchpads are personal
            • Never committed: .env, credentials, API keys (enforced by drift secret detection)
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#multi-developer-context-sharing","level":3,"title":"Multi-Developer Context Sharing","text":"

            CONSTITUTION.md is the shared contract. All team members and their agents inherit it. Changes require team consensus, not unilateral edits.

            When multiple agents write to the same context files concurrently (e.g., two developers adding learnings simultaneously), git merge conflicts are expected. Resolution is typically additive: accept both additions. Destructive resolution (dropping one side) loses context.

            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#team-conventions-for-context-management","level":3,"title":"Team Conventions for Context Management","text":"

            Establish and document:

            • Who reviews context changes: Same reviewers as code, or a designated context owner?
            • How to resolve conflicting decisions: If two sessions record contradictory decisions, which wins? Default: the later one must explicitly supersede the earlier one with rationale.
            • Frequency of context maintenance: Weekly ctx drift checks, monthly consolidation passes, archival after each milestone.
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#checklist","level":2,"title":"Checklist","text":"

            Before running an unattended AI agent:

            • Agent runs as a dedicated unprivileged user (no sudo, no docker group)
            • Agent's config files are immutable or owned by a different user
            • Permission allowlist restricts tools to the project's toolchain
            • Container drops all capabilities (--cap-drop=ALL)
            • Docker socket is NOT mounted
            • Network is disabled or restricted to specific domains
            • Resource limits are set (memory, CPU, disk)
            • No SSH keys, API tokens, or credentials are accessible to the agent
            • Project directory does not contain .env or secrets files
            • Iteration cap is set (--max-iterations)
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/agent-security/#further-reading","level":2,"title":"Further Reading","text":"
            • Running an Unattended AI Agent: the ctx recipe for autonomous loops, including step-by-step permissions and isolation setup
            • Security: ctx's own trust model and vulnerability reporting
            • Autonomous Loops: full documentation of the loop pattern, prompt templates, and troubleshooting
            ","path":["Security","Securing AI Agents"],"tags":[]},{"location":"security/design/","level":1,"title":"Security Design","text":"

            How ctx thinks about security: trust boundaries, what the system does and does not do for you, the engineering principle behind the audit trail, and the permission hygiene workflow.

            For vulnerability disclosure, see Reporting Vulnerabilities.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#trust-model","level":2,"title":"Trust Model","text":"

            ctx operates within a single trust boundary: the local filesystem.

            The person who authors .context/ files is the same person who runs the agent that reads them. There is no remote input, no shared state, and no server component.

            This means:

            • ctx does not sanitize context files for prompt injection. This is a deliberate design choice, not an oversight. The files are authored by the developer who owns the machine: sanitizing their own instructions back to them would be counterproductive.
            • If you place adversarial instructions in your own .context/ files, your agent will follow them. This is expected behavior. You control the context; the agent trusts it.

            Shared Repositories

            In shared repositories, .context/ files should be reviewed in code review (the same way you would review CI/CD config or Makefiles). A malicious contributor could add harmful instructions to CONSTITUTION.md or TASKS.md.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#what-ctx-does-for-security","level":2,"title":"What ctx Does for Security","text":"

            ctx is designed with security in mind:

            • No secrets in context: The constitution explicitly forbids storing secrets, tokens, API keys, or credentials in .context/ files.
            • Local only: ctx runs entirely locally with no external network calls.
            • No code execution: ctx reads and writes Markdown files only; it does not execute arbitrary code.
            • Git-tracked: Core context files are meant to be committed, so they should never contain sensitive data. Exception: sessions/ and journal/ contain raw conversation data and should be gitignored.
            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#permission-hygiene","level":2,"title":"Permission Hygiene","text":"

            Claude Code evaluates permissions in deny → ask → allow order. ctx init automatically populates permissions.deny with rules that block dangerous operations before the allow list is ever consulted.

            Default deny rules block:

            • sudo, git push, rm -rf /, rm -rf ~, curl, wget, chmod 777
            • Read / Edit of .env, credentials, secrets, .pem, .key files

            Even with deny rules in place, the allow list accumulates one-off permissions over time. Periodically review for:

            • Destructive commands: git reset --hard, git clean -f, etc.
            • Config injection vectors: permissions that allow modifying files controlling agent behavior (CLAUDE.md, settings.local.json).
            • Broad wildcards: overly permissive patterns that pre-approve more than intended.

            For the full hygiene workflow, see the Claude Code Permission Hygiene recipe.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#state-file-management","level":2,"title":"State File Management","text":"

            Hook state files (throttle markers, prompt counters, pause markers) are stored in .context/state/, which is project-scoped and gitignored. State files are automatically managed by the hooks that create them; no manual cleanup is needed.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#log-first-audit-trail","level":2,"title":"Log-First Audit Trail","text":"

            The event log (.context/state/events.jsonl) is the authoritative record of what ctx hooks did during a session. Several audit-adjacent features depend on that log being trustworthy, not merely best-effort:

            • ctx event / ctx system view-events replays session history from the log.
            • Webhook notifications give operators a real-time signal that assumes every notification corresponds to a logged event.
            • Drift, freshness, and map-staleness checks count events over time and surface regressions.

            A log that silently drops entries while the rest of the system claims success is worse than no log at all: operators see a green TUI and a webhook notification and conclude \"it happened,\" even when the audit trail never landed. The codebase treats this as a correctness problem, not a UX polish problem.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#the-rule","level":3,"title":"The Rule","text":"

            Any code path that emits an observable side effect (webhook, stdout marker, throttle-file touch, state mutation) must append the corresponding event-log entry first and gate the side effect on the append succeeding. If the log write fails, the side effect must not fire.

            In code, this shape:

            if appendErr := event.Append(channel, msg, sessionID, ref); appendErr != nil {\n    return appendErr // do NOT send the webhook or touch the marker\n}\nif sendErr := notify.Send(channel, msg, sessionID, ref); sendErr != nil {\n    return sendErr\n}\n// downstream side effects (marker touch, stdout, etc.)\n

            The nudge.Relay helper in internal/cli/system/core/nudge enforces this for the common \"log + webhook\" pair. Hook Run functions that compose their own sequence (session_event, heartbeat, several check_* hooks) follow the same ordering explicitly.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#known-gaps","level":3,"title":"Known Gaps","text":"
            • Nudge webhooks have no log channel. nudge.EmitAndRelay sends a \"nudge\" notification before the \"relay\" event is logged. The nudge leg is fire-and-forget because no event-log channel records nudges today. A future refactor may add one; until then this is the one documented exception.
            • ctx agent --cooldown and ctx doctor propagate rather than gate. They surface real errors to the caller (usually Cobra) rather than deciding what to do with them locally. Editors that invoke these commands may display errors in an ugly way; the ugliness is the correct signal (something persisted is broken), not a defect to smooth over.
            • Verbose hook logs in core/log.Message stay best-effort. That logger captures per-hook activity (how many prompts, which percent, etc.) for debugging; it is NOT the event audit trail. Its failures go to stderr via log/warn.Warn rather than propagating, because losing an operational log line is not a correctness problem.
            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#background","level":3,"title":"Background","text":"

            The error returns on event.Append, io.AppendBytes, nudge.Relay, and cooldown.Active / cooldown.TouchTombstone were introduced as part of the resolver-tightening refactor. Before that change, most hook paths called these helpers and silently discarded their errors. The principle above was extracted from the observation that every user-visible correctness problem hit during the refactor traced back to some function saying \"this succeeded\" when the underlying write never landed.

            ","path":["Security","Security Design"],"tags":[]},{"location":"security/design/#best-practices","level":2,"title":"Best Practices","text":"
            1. Review before committing: Always review .context/ files before committing.
            2. Use .gitignore: If you must store sensitive notes locally, add them to .gitignore.
            3. Drift detection: Run ctx drift to check for potential issues.
            4. Permission audit: Review .claude/settings.local.json after busy sessions.
            ","path":["Security","Security Design"],"tags":[]},{"location":"security/hub/","level":1,"title":"Hub Security Model","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#ctx-hub-security-model","level":1,"title":"ctx Hub: Security Model","text":"

            What the hub defends against, what it does not defend against, and the concrete mechanisms in play.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#threat-model","level":2,"title":"Threat Model","text":"

            The hub is designed for trusted cross-project knowledge sharing within a team or homelab. It assumes:

            • The hub host is trusted. Anyone with root on that box can read every entry ever published.
            • Network is semi-trusted. Hub traffic is gRPC over TCP; TLS is strongly recommended but not mandatory.
            • Client machines are trusted enough to hold a per-project client token. Losing a client token is roughly equivalent to losing an API key: scoped damage, not total compromise.
            • Entry content is not secret. Decisions, learnings, and conventions may be indexed by AI agents, rendered in docs, shared across projects. Do not push credentials or PII into the hub.

            The hub is not a secure messaging system, a secrets store, or a compliance-grade audit log. If your threat model needs those, use a dedicated tool and keep the hub for knowledge sharing.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#mechanisms","level":2,"title":"Mechanisms","text":"","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#bearer-tokens","level":3,"title":"Bearer Tokens","text":"

            All RPCs except Register require a bearer token in gRPC metadata. Two kinds of tokens exist:

            Kind Format Scope Lifetime Admin token ctx_adm_... Register new projects Manual rotate Client token ctx_cli_... Publish, Sync, Listen, Status Project lifetime

            Tokens are compared in constant time (crypto/subtle) to prevent timing oracles, and looked up via an O(1) hash map so the comparison cost does not depend on the total number of registered clients.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#client-side-encryption-at-rest","level":3,"title":"Client-Side Encryption at Rest","text":"

            .context/.connect.enc stores the client token and hub address, encrypted with AES-256-GCM using the same scheme the notification subsystem uses. The key is derived from ctx's local keyring (see internal/crypto).

            An attacker with read access to the project directory cannot learn the client token without also breaking ctx's local keyring.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#hub-side-token-storage","level":3,"title":"Hub-Side Token Storage","text":"

            Tokens Are Stored in Plaintext on the Hub Host

            <data-dir>/clients.json currently stores client tokens verbatim, not hashed. Anyone with read access to the hub's data directory sees every registered client's token and can impersonate any project that has ever registered.

            Mitigations today:

            • Run the hub as an unprivileged user and lock the data directory with chmod 700 <data-dir>.
            • Use the systemd unit in Operations, which enables ProtectSystem=strict, NoNewPrivileges=true, and a dedicated user.
            • Never expose <data-dir> over NFS, SMB, or shared filesystems.
            • Treat <data-dir> the same way you'd treat /etc/shadow: back it up encrypted, never check it into version control.

            Hashing clients.json and moving to keyring-backed storage is tracked as a follow-up in the PR #60 task group. Until that lands, assume a hub host compromise equals total hub compromise.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#input-validation","level":3,"title":"Input Validation","text":"

            Every published entry is validated before it touches the log:

            • Type must be one of: decision, learning, convention, task. Unknown types are rejected.
            • ID and Origin are required and non-empty.
            • Content size is capped at 1 MB. Reasonable for text, hostile for attempts to fill the disk.
            • Duplicate project registration is rejected; a client that replays an old Register call gets an error, not a second token.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#no-script-execution","level":3,"title":"No Script Execution","text":"

            The hub never interprets entry content. There is no expression language, no template evaluation, no markdown rendering at ingest. Content is stored as bytes and fanned out to clients verbatim.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#audit-trail","level":3,"title":"Audit Trail","text":"

            entries.jsonl is append-only. Every accepted publish is recorded with the publishing project's origin tag and sequence number. Nothing is ever deleted by the hub; retention is managed manually by the operator (see log rotation).

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#what-the-hub-does-not-defend-against","level":2,"title":"What the Hub Does Not Defend Against","text":"
            • Untrusted entry senders. A client with a valid token can publish anything (within the 1 MB cap). There is no content validation beyond shape.
            • Denial of service from a registered client. A misbehaving client can publish until disk is full. Monitor entries.jsonl growth.
            • Network eavesdropping without TLS. Plain gRPC leaks entry content and tokens. Use a TLS-terminating reverse proxy (see Multi-machine recipe).
            • Host compromise. Root on the hub host = access to every entry and every token. Harden the host.
            • Accidental secret upload. The hub will happily fan out a decision containing an API key. Sanitize content before publishing.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#operational-hardening-checklist","level":2,"title":"Operational Hardening Checklist","text":"
            • Run the hub as an unprivileged user with NoNewPrivileges=true and ProtectSystem=strict (see the systemd unit in Operations).
            • Terminate TLS in front of the hub for anything beyond a trusted LAN.
            • Restrict the listen port with firewall rules to the client subnet only.
            • Back up <data-dir>/admin.token to a secrets manager; do not leave it in shell history.
            • Rotate the admin token when a team member with access leaves. Client tokens keep working across rotations.
            • Monitor entries.jsonl growth; alert on sudden spikes.
            • Run NTP on all clients to prevent entry-timestamp skew.
            • Do not publish from machines you do not trust.
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#responsible-disclosure","level":2,"title":"Responsible Disclosure","text":"

            Security issues in the hub follow the same process as the rest of ctx; see Reporting.

            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/hub/#see-also","level":2,"title":"See Also","text":"
            • ctx Hub Operations
            • ctx Hub failure modes
            • HA cluster recipe
            ","path":["Security","Hub Security Model"],"tags":[]},{"location":"security/reporting/","level":1,"title":"Reporting Vulnerabilities","text":"

            Disclosure process for security issues in ctx. For the broader security model (trust boundaries, audit trail, permission hygiene), see Security Design.

            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#reporting-vulnerabilities","level":2,"title":"Reporting Vulnerabilities","text":"

            At ctx we take security very seriously.

            If you discover a security vulnerability in ctx, please report it responsibly.

            Do NOT open a public issue for security vulnerabilities.

            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#email","level":3,"title":"Email","text":"

            Send details to security@ctx.ist.

            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#github-private-reporting","level":3,"title":"GitHub Private Reporting","text":"
            1. Go to the Security tab;
            2. Click \"Report a Vulnerability\";
            3. Provide a detailed description.
            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#encrypted-reports-optional","level":3,"title":"Encrypted Reports (Optional)","text":"

            If your report contains sensitive details (proof-of-concept exploits, credentials, or internal system information), you can encrypt your message with our PGP key:

            • In-repo: SECURITY_KEY.asc
            • Keybase: keybase.io/alekhinejose
            # Import the key\ngpg --import SECURITY_KEY.asc\n\n# Encrypt your report\ngpg --armor --encrypt --recipient security@ctx.ist report.txt\n

            Encryption is optional. Unencrypted reports to security@ctx.ist or via GitHub Private Reporting are perfectly fine.

            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#what-to-include","level":3,"title":"What to Include","text":"
            • Description of the vulnerability,
            • Steps to reproduce,
            • Potential impact,
            • Suggested fix (if any).
            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#attribution","level":2,"title":"Attribution","text":"

            We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities (unless they prefer to remain anonymous).

            ","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"security/reporting/#response-timeline","level":2,"title":"Response Timeline","text":"

            Open Source, Best-Effort Timelines

            ctx is a volunteer-maintained open source project.

            The timelines below are guidelines, not guarantees, and depend on contributor availability.

            We will address security reports on a best-effort basis and prioritize them by severity.

            Stage Timeframe Acknowledgment Within 48 hours Initial assessment Within 7 days Resolution target Within 30 days (depending on severity)","path":["Security","Reporting Vulnerabilities"],"tags":[]},{"location":"thesis/","level":1,"title":"Context as State","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#a-persistence-layer-for-human-ai-cognition","level":2,"title":"A Persistence Layer for Human-AI Cognition","text":"

            Jose Alekhinne - jose@ctx.ist

            February 2026

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#abstract","level":3,"title":"Abstract","text":"

            As AI tools evolve from code-completion utilities into reasoning collaborators, the knowledge that governs their behavior becomes as important as the code they produce; yet, that knowledge is routinely discarded at the end of every session.

            AI-assisted development systems assemble context at prompt time using heuristic retrieval from mutable sources: recent files, semantic search results, session history. These approaches optimize relevance at the moment of generation but do not persist the cognitive state that produced decisions. Reasoning is not reproducible, intent is lost across sessions, and teams cannot audit the knowledge that constrains automated behavior.

            This paper argues that context should be treated as deterministic, version-controlled state rather than as a transient query result. We ground this argument in three sources of evidence: a landscape analysis of 17 systems spanning AI coding assistants, agent frameworks, and knowledge stores; a taxonomy of five primitive categories that reveals irrecoverable architectural trade-offs; and an experience report from ctx, a persistence layer for AI-assisted development, which developed itself using its own persistence model across 389 sessions over 33 days. We define a three-tier model for cognitive state: authoritative knowledge, delivery views, and ephemeral state. Then we present six design invariants empirically validated by 56 independent rejection decisions observed across the analyzed landscape. We show that context determinism applies to assembly, not to model output, and that the curation cost this model requires is offset by compounding returns in reproducibility, auditability, and team cognition.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#1-introduction","level":2,"title":"1. Introduction","text":"

            The introduction of large language models into software development has shifted the primary interface from code execution to interactive reasoning. In this environment, the correctness of an output depends not only on source code but on the context supplied to the model: the conventions, decisions, architectural constraints, and domain knowledge that bound the space of acceptable responses.

            Current systems treat context as a query result assembled at the moment of interaction. A developer begins a session; the tool retrieves what it estimates to be relevant from chat history, recent files, and vector stores; the model generates output conditioned on this transient assembly; the session ends, and the context evaporates. The next session begins the cycle again.

            This model has improved substantially over the past year. CLAUDE.md files, Cursor rules, Copilot's memory system, and tools such as Mem0, Letta, and Kindex each address aspects of the persistence problem. Yet across 17 systems we analyzed spanning AI coding assistants, agent frameworks, autonomous coding agents, and purpose-built knowledge stores, no system provides all five of the following properties simultaneously: deterministic context assembly, human-readable file-based persistence, token-budgeted delivery, a single-binary core with zero required runtime dependencies for the persistence path, and local-first operation.

            This paper does not propose a universal replacement for retrieval-centric workflows. It defines a persistence layer (embodied in ctx (https://ctx.ist)) whose advantages emerge under specific operational conditions: when reproducibility is a requirement, when knowledge must outlive sessions and individuals, when teams require shared cognitive authority, or when offline operation is necessary.

            The trade-offs (manual curation cost, reduced automatic recall, coarser granularity) are intentional and mirror the trade-offs accepted by systems that favor reproducibility over convenience, such as reproducible builds and immutable infrastructure 1 6.

            The contribution is threefold: a three-tier model for cognitive state that resolves the ambiguity between authoritative knowledge and ephemeral session artifacts; six design invariants empirically grounded in a cross-system landscape analysis; and an experience report demonstrating that the model produces compounding returns when applied to its own development.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#2-the-limits-of-prompt-time-context","level":2,"title":"2. The Limits of Prompt-Time Context","text":"

            Prompt-time assembly pipelines typically consist of corpus selection, retrieval, ranking, and truncation. These pipelines are probabilistic and time-dependent, producing three failure modes that compound over the lifetime of a project.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#21-non-reproducibility","level":3,"title":"2.1 Non-Reproducibility","text":"

            If context is derived from mutable sources using heuristic ranking, identical requests at different times receive different inputs. A developer who asks \"What is our authentication strategy?\" on Tuesday may receive a different context window than the same question on Thursday: Not because the strategy changed, but because the retrieval heuristic surfaced different fragments.

            Reproducibility (the ability to reconstruct the exact inputs that produced a given output) is a foundational property of reliable systems. Its loss in AI-assisted development mirrors the historical evolution from ad-hoc builds to deterministic build systems 1 2. The build community learned that when outputs depend on implicit state (environment variables, system clocks, network-fetched dependencies), debugging becomes archaeology. The same principle applies when AI outputs depend on non-deterministic context retrieval.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#22-opaque-knowledge","level":3,"title":"2.2 Opaque Knowledge","text":"

            Embedding-based memory increases recall but reduces inspectability. When a vector store determines that a code snippet is \"similar\" to the current query, the ranking function is opaque: the developer cannot inspect why that snippet was chosen, whether a more relevant artifact was excluded, or whether the ranking will remain stable. This prevents deterministic debugging, policy auditing, and causal attribution (properties that information retrieval theory identifies as fundamental trade-offs of probabilistic ranking) 3.

            In practice, this opacity manifests as a compliance ceiling. In our experience developing a context management system (detailed in Section 7), soft instructions (directives that ask an AI agent to read specific files or follow specific procedures) achieve approximately 75-85% compliance. The remaining 15-25% represents cases where the agent exercises judgment about whether the instruction applies, effectively applying a second ranking function on top of the explicit directive. When 100% compliance is required, instruction is insufficient; the content must be injected directly, removing the agent's option to skip it.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#23-loss-of-intent","level":3,"title":"2.3 Loss of Intent","text":"

            Session transcripts record interaction but not cognition. A transcript captures what was said but not which assumptions were accepted, which alternatives were rejected, or which constraints governed the decision. The distinction matters: a decision to use PostgreSQL recorded as a one-line note (\"Use PostgreSQL\") teaches a model what was decided; a structured record with context, rationale, and consequences teaches it why (and why is what prevents the model from unknowingly reversing the decision in a future session) 4.

            Session transcripts provide history. Cognitive state requires something more: the persistent, structured representation of the knowledge required for correct decision-making.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#3-cognitive-state-a-three-tier-model","level":2,"title":"3. Cognitive State: A Three-Tier Model","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#31-definitions","level":3,"title":"3.1 Definitions","text":"

            We define cognitive state as the authoritative, persistent representation of the knowledge required for correct decision-making within a project. It is human-authored or human-ratified, versioned, inspectable, and reproducible. It is distinct from logs, transcripts, retrieval results, and model-generated summaries.

            Previous formulations of this idea have treated cognitive state as a monolithic concept. In practice, a three-tier model better captures the operational reality:

            Tier 1: Authoritative State: The canonical knowledge that the system treats as ground truth. In a concrete implementation, this corresponds to a set of human-curated files with defined schemas: a constitution (inviolable rules), conventions (code patterns), an architecture document (system structure), decision records (choices with rationale), learnings (captured experience), a task list (current work), a glossary (domain terminology), and an agent playbook (operating instructions). Each file has a single purpose, a defined lifecycle, and a distinct update frequency. Authoritative state is version-controlled alongside code and reviewed through the same mechanisms (diffs, pull requests, blame annotations).

            Tier 2: Delivery Views: Derived representations of authoritative state, assembled for consumption by a model. A delivery view is produced by a deterministic assembly function that takes the authoritative state, a token budget, and an inclusion policy as inputs and produces a context window as output. The same authoritative state, budget, and policy must always produce the same delivery view. Delivery views are ephemeral (they exist only for the duration of a session), but their construction is reproducible.

            Tier 3: Ephemeral State: Session transcripts, scratchpad notes, draft journal entries, and other artifacts that exist during or immediately after a session but are not authoritative. Ephemeral state is the raw material from which authoritative state may be extracted through human review, but it is never consumed directly by the assembly function.

            This three-tier model resolves confusion present in earlier formulations: the claim that AI output is a deterministic function of the repository state. The corrected claim is that context selection is deterministic (the delivery view is a function of authoritative state), but model output remains stochastic, conditioned on the deterministic context. Formally:

            delivery_view = assemble(authoritative_state, budget, policy)\noutput = model(delivery_view)   # stochastic\n

            The persistence layer's contribution is making assemble reproducible, not making model deterministic.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#32-separation-of-concerns","level":3,"title":"3.2 Separation of Concerns","text":"

            The decision to separate authoritative state into distinct files with distinct purposes is not cosmetic. Different types of knowledge have different lifecycles:

            Knowledge Type Update Frequency Read Frequency Load Priority Example Constitution Rarely Every session Always \"Never commit secrets to git\" Tasks Every session Session start Always \"Implement token budget CLI flag\" Conventions Weekly Before coding High \"All errors use structured logging with severity levels\" Decisions When decided When questioning Medium \"Use PostgreSQL over MySQL (see ADR-003)\" Learnings When learned When stuck Medium \"Hook scripts >50ms degrade interactive UX\" Architecture When changed When designing On demand \"Three-layer pipeline: ingest → enrich → assemble\" Journal Every session Rarely Never auto \"Session 247: Removed dead-end session copy layer\"

            A monolithic context file would force the assembly function to load everything or nothing. Separation enables progressive disclosure: the minimum context that matters for the current moment, with the option to load more when needed. A normal session loads the constitution, tasks, and conventions; a deep investigation loads decision history and journal entries from specific dates.

            The budget mechanism is the constraint that makes separation valuable. Without a budget, the default behavior is to load everything, which destroys the attention density that makes loaded context useful. With a budget, the assembly function must prioritize ruthlessly: constitution first (always full), then tasks and conventions (budget-capped), then decisions and learnings (scored by recency). Entries that do not fit receive title-only summaries rather than being silently dropped (an application of the \"tell me what you don't know\" pattern identified independently by four systems in our landscape analysis).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#4-design-invariants","level":2,"title":"4. Design Invariants","text":"

            The following six invariants define the constraints that a cognitive state persistence layer must satisfy. They are not axioms chosen a priori; they are empirically grounded properties whose violation was independently identified as producing complexity costs across the 17 systems we analyzed.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-1-markdown-on-filesystem-persistence","level":3,"title":"Invariant 1: Markdown-on-Filesystem Persistence","text":"

            Context files must be human-readable, git-diffable, and editable with any text editor. No database. No binary storage.

            Validation: 11 independent rejection decisions across the analyzed landscape protected this property. Systems that adopted embedded records, binary serialization, or knowledge graphs as their core primitive consistently traded away the ability for a developer to run cat DECISIONS.md and understand the system's knowledge. The inspection cost of opaque storage compounds over the lifetime of a project: every debugging session, every audit, every onboarding conversation requires specialized tooling to access knowledge that could have been a text file.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-2-zero-runtime-dependencies","level":3,"title":"Invariant 2: Zero Runtime Dependencies","text":"

            The tool must work with no installed runtimes, no running services, and no API keys for core functionality.

            Validation: 13 independent rejection decisions protected this property (the most frequently defended invariant). Systems that required databases (PostgreSQL, SQLite, Redis), embedding models, server daemons, container runtimes, or cloud APIs for core operation introduced failure modes proportional to their dependency count. A persistence layer that depends on infrastructure is not a persistence layer; it is a service. Services have uptime requirements, version compatibility matrices, and operational costs that simple file operations do not.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-3-deterministic-context-assembly","level":3,"title":"Invariant 3: Deterministic Context Assembly","text":"

            The same files plus the same budget must produce the same output. No embedding-based retrieval, no LLM-driven selection, no wall-clock-dependent scoring in the assembly path.

            Validation: 6 independent rejection decisions protected this property. Non-deterministic assembly (whether from embedding variance, LLM-based selection, or time-dependent scoring) destroys the ability to reproduce a context window and therefore to diagnose why a model produced a given output. Determinism in the assembly path is what makes the persistence layer auditable.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-4-human-authority-over-persistent-state","level":3,"title":"Invariant 4: Human Authority over Persistent State","text":"

            The agent may propose changes to context files but must not unilaterally modify them. All persistent changes go through human-reviewable git commits.

            Validation: 6 independent rejection decisions protected this property. Systems that allowed agents to self-modify their memory (writing freeform notes, auto-pruning old entries, generating summaries as ground truth) consistently produced lower-quality persistent context than systems that enforced human review. Structure is a feature, not a limitation: across the landscape, the pattern \"structured beats freeform\" was independently discovered by four systems that evolved from freeform LLM summaries to typed schemas with required fields.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-5-local-first-air-gap-capable","level":3,"title":"Invariant 5: Local-First, Air-Gap Capable","text":"

            Core functionality must work offline with no network access. Cloud services may be used for optional features but never for core context management.

            Validation: 7 independent rejection decisions protected this property. Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios. A filesystem-native model continues to function under all conditions where the repository is accessible.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#invariant-6-no-default-telemetry","level":3,"title":"Invariant 6: No Default Telemetry","text":"

            Any analytics, if ever added, must be strictly opt-in.

            Validation: 4 independent rejection decisions protected this property. Default telemetry erodes the trust model that a persistence layer depends on. If developers must trust the system with their architectural decisions, operational learnings, and project constraints, the system cannot simultaneously be reporting usage data to external services.

            These six invariants collectively define a design space. Each feature proposal can be evaluated against them: a feature that violates any invariant is rejected regardless of how many other systems implement it. The discipline of constraint (refusing to add capabilities that compromise foundational properties) is itself an architectural contribution. Across the 17 analyzed systems, 56 patterns were explicitly rejected for violating these invariants. The rejection count per invariant (11, 13, 6, 6, 7, 4) provides a rough measure of each property's vulnerability to architectural erosion. A representative sample of these rejections is provided in Appendix A.1

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#5-landscape-analysis","level":2,"title":"5. Landscape Analysis","text":"

            The 17 systems were selected to cover the architectural design space rather than to achieve completeness. Each included system satisfies three criteria: it represents a distinct architectural primitive for AI-assisted development, it is actively maintained or widely referenced, and it provides sufficient public documentation or source code for architectural inspection. The goal was to ensure that every major category of primitive (document, embedded record, state snapshot, event/message, construction/derivation) was represented by multiple systems, enabling cross-system pattern detection.

            The resulting set spans six categories: AI coding assistants (Continue, Sourcegraph/Cody, Aider, Claude Code), AI agent frameworks (CrewAI, AutoGen, LangGraph, LlamaIndex, Letta/MemGPT), autonomous coding agents (OpenHands, Sweep), session provenance tools (Entire), data versioning systems (Dolt, Pachyderm), pipeline/build systems (Dagger), and purpose-built knowledge stores (QubicDB, Kindex). Each system was analyzed from its source code and documentation, producing 34 individual analysis artifacts (an architectural profile and a set of insights per system) that yielded 87 adopt/adapt recommendations, 56 explicit rejection decisions, and 52 watch items.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#51-primitive-taxonomy","level":3,"title":"5.1 Primitive Taxonomy","text":"

            Every system in the AI-assisted development landscape operates on a core primitive: an atomic unit around which the entire architecture revolves. Our analysis of 17 systems reveals five categories of primitives, each making irrecoverable trade-offs:

            Group A: Document/File Primitives: Human-readable documents as the primary unit. Documents are authored by humans, version-controlled in git, and consumed by AI tools. The invariant of this group is that the primitive is always human-readable and version-controllable with standard tools. Three systems participate in this pattern: the system described in this paper as a pure expression, and Continue (via its rules directory) and Claude Code (via CLAUDE.md files) as partial participants: both use document-based context as an input but organize around different core primitives.

            Group B: Embedded Record Primitives: Vector-embedded records stored with numerical embeddings for similarity search, metadata for filtering, and scoring mechanisms for ranking. Five systems use this approach (LlamaIndex, CrewAI, Letta/MemGPT, QubicDB, Kindex). The invariant is that the primitive requires an embedding model or vector database for core operations: a dependency that precludes offline and air-gapped use.

            Group C: State Snapshot Primitives: Point-in-time captures of the complete system state. The invariant is that any past state can be reconstructed at any historical point. Three systems use this approach (LangGraph, Entire, Dolt).

            Group D: Event/Message Primitives: Sequential events or messages forming an append-only log with causal relationships. Four systems use this approach (OpenHands, AutoGen, Claude Code, Sweep). The invariant is temporal ordering and append-only semantics.

            Group E: Construction/Derivation Primitives: Derived or constructed values that encode how they were produced. The invariant is that the primitive is a function of its inputs; re-executing the same inputs produces the same primitive. Three systems use this approach (Dagger, Pachyderm, Aider).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#52-comparison-matrix","level":3,"title":"5.2 Comparison Matrix","text":"

            The five primitive categories differ along seven dimensions:

            Property Document Embedded Record State Snapshot Event/Message Construction Human-readable Yes No Varies Partially No Version-controllable Yes No Varies Yes Yes Queryable by meaning No Yes No No No Rewindable Via git No Yes Yes (replay) Yes Deterministic Yes No Yes Yes Yes Zero-dependency Yes No Varies Varies Varies Offline-capable Yes No Varies Varies Yes

            The document primitive is the only one that simultaneously satisfies human-readability, version-controllability, determinism, zero dependencies, and offline capability. This is not because documents are superior in general (embedded records provide semantic queryability that documents lack) but because the combination of all five properties is what the persistence layer requires. The choice between primitive categories is not a matter of capability but of which properties are considered invariant.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#53-convergent-patterns","level":3,"title":"5.3 Convergent Patterns","text":"

            Across the 17 analyzed systems, six design patterns were independently discovered. These convergent patterns carry extra validation weight because they emerged from different problem spaces:

            Pattern 1: \"Tell me what you don't know\": When context is incomplete, explicitly communicate to the model what information is missing and what confidence level the provided context represents. Four systems independently converged on this pattern: inserting skip markers, tracking evidence gaps, annotating provenance, or naming output quality tiers.

            Pattern 2: \"Freshness matters\": Information relevance decreases over time. Three systems independently chose exponential decay with different half-lives (30 days, 90 days, and LRU ordering). Static priority ordering with no time dimension leaves relevant recent knowledge at the same priority as stale entries. This pattern is in productive tension with the persistence model's emphasis on determinism: the claim is not that time-dependence is irrelevant, but that it belongs in the curation step (a human deciding to consolidate or archive stale entries) rather than in the assembly function (an algorithm silently down-ranking entries based on age).

            Pattern 3: \"Content-address everything\": Compute a hash of content at creation time for deduplication, cache invalidation, integrity verification, and change detection. Five systems independently implement content hashing, each discovering it solves different problems 5.

            Pattern 4: \"Structured beats freeform\": When capturing knowledge or session state, a structured schema with required fields produces more useful data than freeform text. Four systems evolved from freeform summaries to typed schemas: one moving from LLM-generated prose to a structured condenser with explicit fields for completed tasks, pending tasks, and files modified.

            Pattern 5: \"Protocol convergence\": The Model Context Protocol (MCP) is emerging as a standard tool integration layer. Nine of 17 systems support it, spanning every category in the analysis. MCP's significance for the persistence model is that it provides a transport mechanism for context delivery without dictating how context is stored or assembled. This makes the approach compatible with both retrieval-centric and persistence-centric architectures.

            Pattern 6: \"Human-in-the-loop for memory\": Critical memory decisions should involve human judgment. Fully automated memory management produces lower-quality persistent context than human-reviewed systems. Four systems independently converged on variants of this pattern: ceremony-based consolidation, interrupt/resume for human input, confirmation mode for high-risk actions, and separated \"think fast\" vs. \"think slow\" processing paths.

            Pattern 6 directly validates the ceremony model described in this paper. The persistence layer requires human curation not because automation is impossible, but because the quality of persistent knowledge degrades when the curation step is removed. The improvement opportunity is to make curation easier, not to automate it away.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#6-worked-example-architectural-decision-under-two-models","level":2,"title":"6. Worked Example: Architectural Decision under Two Models","text":"

            We now instantiate the three-tier model in a concrete system (ctx) and illustrate the difference between prompt-time retrieval and cognitive state persistence using a real scenario from its development.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#61-the-problem","level":3,"title":"6.1 The Problem","text":"

            During development, the system accumulated three overlapping storage layers for session data: raw transcripts (owned by the AI tool), session copies (JSONL copies plus context snapshots), and enriched journal entries (Markdown summaries). The middle layer (session copies) was a dead-end write sink. An auto-save hook copied transcripts to a directory that nothing read from, because the journal pipeline already read directly from the raw transcripts. Approximately 15 source files, a shell hook, 20 configuration constants, and 30 documentation references supported infrastructure with no consumers.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#62-prompt-time-retrieval-model","level":3,"title":"6.2 Prompt-Time Retrieval Model","text":"

            In a retrieval-based system, the decision to remove the middle layer depends on whether the retrieval function surfaces the relevant context:

            The developer asks: \"Should we simplify the session storage?\" The retrieval system must find and rank the original discussion thread where the three layers were designed, the usage statistics showing zero reads from the middle layer, the journal pipeline documentation showing it reads from raw transcripts directly, and the dependency analysis showing 15 files, a hook, and 30 doc references. If any of these fragments are not retrieved (because they are in old chat history, because the embedding similarity score is low, or because the token budget was consumed by more recent but less relevant context), the model may recommend preserving the middle layer, or may not realize it exists.

            Six months later, a new team member asks the same question. The retrieval results will differ: the original discussion has aged out of recency scoring, the usage statistics are no longer in recent history, and the model may re-derive the answer or arrive at a different conclusion.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#63-cognitive-state-model","level":3,"title":"6.3 Cognitive State Model","text":"

            In the persistence model, the decision is recorded as a structured artifact at write time:

            ## [2026-02-11] Remove .context/sessions/ storage layer\n\n**Status**: Accepted\n\n**Context**: The session/recall/journal system had three overlapping\nstorage layers. The recall pipeline reads directly from raw transcripts,\nmaking .context/sessions/ a dead-end write sink that nothing reads from.\n\n**Decision**: Remove .context/sessions/ entirely. Two stores remain:\nraw transcripts (global, tool-owned) and enriched journal\n(project-local).\n\n**Rationale**: Dead-end write sinks waste code surface, maintenance\neffort, and user attention. The recall pipeline already proved that\nreading directly from raw transcripts is sufficient. Context snapshots\nare redundant with git history.\n\n**Consequence**: Deleted internal/cli/session/ (15 files), removed\nauto-save hook, removed --auto-save from watch, removed pre-compact\nauto-save, removed /ctx-save skill, updated ~45 documentation files.\nFour earlier decisions superseded.\n

            This artifact is:

            • Deterministically included in every subsequent session's delivery view (budget permitting, with title-only fallback if budget is exceeded)
            • Human-readable and reviewable as a diff in the commit that introduced it
            • Permanent: it persists in version control regardless of retrieval heuristics
            • Causally linked: it explicitly supersedes four earlier decisions, creating an auditable chain

            When the new team member asks \"Why don't we store session copies?\" six months later, the answer is the same artifact, at the same revision, with the same rationale. The reasoning is reconstructible because it was persisted at write time, not discovered at query time.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#64-the-diff-when-policy-changes","level":3,"title":"6.4 The Diff When Policy Changes","text":"

            If a future requirement re-introduces session storage (for example, to support multi-agent session correlation), the change appears as a diff to the decision record:

            - **Status**: Accepted\n+ **Status**: Superseded by [2026-08-15] Reintroduce session storage\n+ for multi-agent correlation\n

            The new decision record references the old one, creating a chain of reasoning visible in git log. In the retrieval model, the old decision would simply be ranked lower over time and eventually forgotten.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#7-experience-report-a-system-that-designed-itself","level":2,"title":"7. Experience Report: A System That Designed Itself","text":"

            The persistence model described in this paper was developed and tested by using it on its own development. Over 33 days and 389 sessions, the system's context files accumulated a detailed record of decisions made, reversed, and consolidated: providing quantitative and qualitative evidence for the model's properties.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#71-scale-and-structure","level":3,"title":"7.1 Scale and Structure","text":"

            The development produced the following authoritative state artifacts:

            • 8 consolidated decision records covering 24 original decisions spanning context injection architecture, hook design, task management, security, agent autonomy, and webhook systems
            • 18 consolidated learning records covering 75 original observations spanning agent compliance, hook behavior, testing patterns, documentation drift, and tool integration
            • A constitution with 13 inviolable rules across 4 categories (security, quality, process, context preservation)
            • 389 enriched journal entries providing a complete session-level audit trail

            The consolidation ratio (24 decisions compressed to 8 records, 75 learnings compressed to 18) illustrates the curation cost and its return: authoritative state becomes denser and more useful over time as related entries are merged, contradictions are resolved, and superseded decisions are marked.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#72-architectural-reversals","level":3,"title":"7.2 Architectural Reversals","text":"

            Three architectural reversals during development provide evidence that the persistence model captures and communicates reasoning effectively:

            Reversal 1: The two-tier persistence model: The original design included a middle storage tier for session copies. After 21 days of development, the middle tier was identified as a dead-end write sink (described in Section 6). The decision record captured the full context, and the removal was executed cleanly: 15 source files, a shell hook, and 45 documentation references. The pattern of a \"dead-end write sink\" was subsequently observed in 7 of 17 systems in our landscape analysis that store raw transcripts alongside structured context.

            Reversal 2: The prompt-coach hook: An early design included a hook that analyzed user prompts and offered improvement suggestions. After deployment, the hook produced zero useful tips, its output channel was invisible to users, and it accumulated orphan temporary files. The hook was removed, and the decision record captured the failure mode for future reference.

            Reversal 3: The soft-instruction compliance model: The original context injection strategy relied on soft instructions: directives asking the AI agent to read specific files. After measuring compliance across multiple sessions, we found a consistent 75-85% compliance ceiling. The revised strategy injects content directly, bypassing the agent's judgment about whether to comply. The learning record captures the ceiling measurement and the rationale for the architectural change.

            Each reversal was captured as a structured decision record with context, rationale, and consequences. In a retrieval-based system, these reversals would exist only in chat history, discoverable only if the retrieval function happens to surface them. In the persistence model, they are permanent, indexable artifacts that inform future decisions.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#73-compliance-ceiling","level":3,"title":"7.3 Compliance Ceiling","text":"

            The 75-85% compliance ceiling for soft instructions is the most operationally significant finding from the experience report. It means that any context management strategy relying on agent compliance with instructions (\"read this file,\" \"follow this convention,\" \"check this list\") has a hard ceiling on reliability.

            The root cause is structural: the instruction \"don't apply judgment\" is itself evaluated by judgment. When an agent receives a directive to read a file, it first assesses whether the directive is relevant to the current task (and that assessment is the judgment the directive was trying to prevent).

            The architectural response maps directly to the formal model defined in Section 3.1. Content requiring 100% compliance is included in authoritative_state and injected by the deterministic assemble function, bypassing the agent entirely. Content where 80% compliance is acceptable is delivered as instructions within the delivery view. The three-tier architecture makes this distinction explicit: authoritative state is injected; delivery views are assembled deterministically; ephemeral state is available but not pushed.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#74-compounding-returns","level":3,"title":"7.4 Compounding Returns","text":"

            Over 33 days, we observed a qualitative shift in the development experience. Early sessions (days 1-7) spent significant time re-establishing context: explaining conventions, re-stating constraints, re-deriving past decisions. Later sessions (days 25-33) began with the agent loading curated context and immediately operating within established constraints, because the constraints were in files rather than in chat history.

            This compounding effect (where each session's context curation improves all subsequent sessions) is the primary return on the curation investment. The cost is borne once (writing a decision record, capturing a learning, updating the task list); the benefit is collected on every subsequent session load.

            The effect is analogous to compound interest in financial systems: the knowledge base grows not linearly with effort but with increasing marginal returns as new knowledge interacts with existing context. A learning captured on day 5 prevents a mistake on day 12, which avoids a debugging session that would have consumed a day 12 session, freeing that session for productive work that generates new learnings. The growth is not literally exponential (it is bounded by project scope and subject to diminishing returns as the knowledge base matures), but within the observed 33-day window, the returns were consistently accelerating.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#75-scope-and-generalizability","level":3,"title":"7.5 Scope and Generalizability","text":"

            This experience report is self-referential by design: the system was developed using its own persistence model. This circularity strengthens the internal validity of the findings (the model was stress-tested under authentic conditions) but limits external generalizability. The two-week crossover point was observed on a single project of moderate complexity with a small team already familiar with the model's assumptions. Whether the same crossover holds for larger teams, for codebases with different characteristics, or for teams adopting the model without having designed it remains an open empirical question. The quantitative claims in this section should be read as existence proofs (demonstrating that the model can produce compounding returns) rather than as predictions about specific adoption scenarios.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#8-situating-the-persistence-layer","level":2,"title":"8. Situating the Persistence Layer","text":"

            The persistence layer occupies a specific position in the stack of AI-assisted development:

            Application Logic\nAI Interaction / Agents\nContext Retrieval Systems\nCognitive State Persistence Layer\nVersion Control / Storage\n

            Current systems innovate primarily in the retrieval layer (improving how context is discovered, ranked, and delivered at query time). The persistence layer sits beneath retrieval and above version control. Its role is to maintain the authoritative state that retrieval systems may query but do not own. The relationship is complementary: retrieval answers \"What in the corpus might be relevant?\"; cognitive state answers \"What must be true for this system to operate correctly?\" A mature system uses both: retrieval for discovery, persistence for authority.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#9-applicability-and-trade-offs","level":2,"title":"9. Applicability and Trade-Offs","text":"","path":["The Thesis"],"tags":[]},{"location":"thesis/#91-when-to-use-this-model","level":3,"title":"9.1 When to Use This Model","text":"

            A cognitive state persistence layer is most appropriate when:

            Reproducibility is a requirement: If a system must be able to answer \"Why did this output occur, and can it be produced again?\" then deterministic, version-controlled context becomes necessary. This is relevant in regulated environments, safety-critical systems, long-lived infrastructure, and security-sensitive deployments.

            Knowledge must outlive sessions and individuals: Projects with multi-year lifetimes accumulate architectural decisions, domain interpretations, and operational policy. If this knowledge is stored only in chat history, issue trackers, and institutional memory, it decays. The persistence model converts implicit knowledge into branchable, reviewable artifacts.

            Teams require shared cognitive authority: In collaborative environments, correctness depends on a stable answer to \"What does the system believe to be true?\" When this answer is derived from retrieval heuristics, authority shifts to ranking algorithms. When it is versioned and human-readable, authority remains with the team.

            Offline or air-gapped operation is required: Infrastructure-dependent memory systems cannot operate in classified environments, isolated networks, or disaster-recovery scenarios.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#92-when-not-to-use-this-model","level":3,"title":"9.2 When Not to Use This Model","text":"

            Zero-configuration personal workflows: For short-lived or exploratory tasks, the cost of explicit knowledge curation outweighs its benefits. Heuristic retrieval is sufficient when correctness is non-critical, outputs are disposable, and historical reconstruction is unnecessary.

            Maximum automatic recall from large corpora: Vector retrieval systems provide superior performance when the primary task is searching vast, weakly structured information spaces. The persistence model assumes that what matters can be decided and that this decision is valuable to record.

            Fully autonomous agent architectures: Agent runtimes that generate and discard state continuously, optimizing for local goal completion, do not benefit from a model that centers human ratification of knowledge.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#93-incremental-adoption","level":3,"title":"9.3 Incremental Adoption","text":"

            The transition does not require full system replacement. An incremental path:

            Step 1: Record decisions as versioned artifacts: Instead of allowing conclusions to remain in discussion threads, persist them in reviewable form with context, rationale, and consequences 4. This alone converts ephemeral reasoning into the cognitive state.

            Step 2: Make inclusion deterministic: Define explicit assembly rules. Retrieval may still exist, but it is no longer authoritative.

            Step 3: Move policy into cognitive state: When system behavior depends on stable constraints, encode those constraints as versioned knowledge. Behavior becomes reproducible.

            Step 4: Optimize assembly, not retrieval: Once the authoritative layer exists, performance improvements come from budgeting, caching, and structural refinement rather than from improving ranking heuristics.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#94-the-curation-cost","level":3,"title":"9.4 The Curation Cost","text":"

            The primary objection to this model is the cost of explicit knowledge curation. This cost is real. Writing a structured decision record takes longer than letting a chatbot auto-summarize a conversation. Maintaining a glossary requires discipline. Consolidating 75 learnings into 18 records requires judgment.

            The response is not that the cost is negligible but that it is amortized. A decision record written once is loaded hundreds of times. A learning captured today prevents repeated mistakes across all future sessions. The curation cost is paid once; the benefit compounds.

            The experience report provides rough order-of-magnitude numbers. Across 389 sessions over 33 days, curation activities (writing decision records, capturing learnings, updating the task list, consolidating entries) averaged approximately 3-5 minutes per session. In early sessions (days 1-7), before curated context existed, re-establishing context consumed approximately 10-15 minutes per session: re-explaining conventions, re-stating architectural constraints, re-deriving decisions that had been made but not persisted. By the final week (days 25-33), the re-explanation overhead had dropped to near zero: the agent loaded curated context and began productive work immediately.

            At ~12 sessions per day, the curation cost was roughly 35-60 minutes daily. The re-explanation cost in the first week was roughly 120-180 minutes daily. By the third week, that cost had fallen to under 15 minutes daily while the curation cost remained stable. The crossover (where cumulative curation cost was exceeded by cumulative time saved) occurred around day 10. These figures are approximate and derived from a single project with a small team already familiar with the model; the crossover point will vary with project complexity, team size, and curation discipline.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#10-future-work","level":2,"title":"10. Future Work","text":"

            Several directions are compatible with the model described here:

            Section-level deterministic budgeting: Current assembly operates at file granularity. Section-level budgeting would allow finer-grained control (including specific decision records while excluding others within the same file) without sacrificing determinism.

            Causal links between decisions: The experience report shows that decisions frequently reference earlier decisions (superseding, extending, or qualifying them). Formal causal links would enable traversal of the decision graph and automatic detection of orphaned or contradictory constraints.

            Content-addressed context caches: Five systems in our landscape analysis independently discovered that content hashing provides cache invalidation, integrity verification, and change detection. Applying content addressing to the assembly output would enable efficient cache reuse when the authoritative state has not changed.

            Conditional context inclusion: Five systems independently suggest that context entries could carry activation conditions (file patterns, task keywords, or explicit triggers) that control whether they are included in a given assembly. This would reduce the per-session budget cost of large knowledge bases without sacrificing determinism.

            Provenance metadata: Linking context entries to the sessions, decisions, or learnings that motivated them would strengthen the audit trail. Optional provenance fields on Markdown entries (session identifier, cause reference, motivation) would be lightweight and compatible with the existing file-based model.

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#11-conclusion","level":2,"title":"11. Conclusion","text":"

            AI-assisted development has treated context as a \"query result\" assembled at the moment of interaction, discarded at the session end. This paper identifies a complementary layer: the persistence of authoritative cognitive state as deterministic, version-controlled artifacts.

            The contribution is grounded in three sources of evidence. A landscape analysis of 17 systems reveals five categories of primitives and shows that no existing system provides the combination of human-readability, determinism, zero dependencies, and offline capability that the persistence layer requires. Six design invariants, validated by 56 independent rejection decisions, define the constraints of the design space. An experience report over 389 sessions and 33 days demonstrates compounding returns: later sessions start faster, decisions are not re-derived, and architectural reversals are captured with full context.

            The core claim is this: persistent cognitive state enables causal reasoning across time. A system built on this model can explain not only what is true, but why it became true and when it changed.

            When context is the state:

            • Reasoning is reproducible: the same authoritative state, budget, and policy produce the same delivery view.
            • Knowledge is auditable: decisions are traceable to explicit artifacts with context, rationale, and consequences.
            • Understanding compounds: each session's curation improves all subsequent sessions.

            The choice between retrieval-centric workflows and a persistence layer is not a matter of capability but of time horizon. Retrieval optimizes for relevance at the moment of interaction. Persistence optimizes for the durability of understanding across the lifetime of a project.

            🐸🖤 \"Gooood... let the deterministic context flow through the repository...\" - Kermit the Sidious, probably

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#appendix-a-representative-rejection-decisions","level":2,"title":"Appendix A: Representative Rejection Decisions","text":"

            The 56 rejection decisions referenced in Section 4 were cataloged across all 17 system analyses, grouped by the invariant they would violate. This appendix provides a representative sample (two per invariant) to illustrate the methodology.

            Invariant 1: Markdown-on-Filesystem (11 rejections): CrewAI's vector embedding storage was rejected because embeddings are not human-readable, not git-diff-friendly, and require external services. Kindex's knowledge graph as core primitive was rejected because it requires specialized commands to inspect content that could be a text file (kin show <id> vs. cat DECISIONS.md).

            Invariant 2: Zero Runtime Dependencies (13 rejections): Letta/MemGPT's PostgreSQL-backed architecture was rejected because it conflicts with local-first, no-database, single-binary operation. Pachyderm's Kubernetes-based distributed architecture was rejected as the antithesis of a single-binary design for a tool that manages text files.

            Invariant 3: Deterministic Assembly (6 rejections): LlamaIndex's embedding-based retrieval as the primary selection mechanism was rejected because it destroys determinism, requires an embedding model, and removes human judgment from the selection process. QubicDB's wall-clock-dependent scoring was rejected because it directly conflicts with the \"same inputs produce same output\" property.

            Invariant 4: Human Authority (6 rejections): Letta/MemGPT's agent self-modification of memory was rejected as fundamentally opposed to human-curated persistence. Claude Code's unstructured auto-memory (where the agent writes freeform notes) was rejected because structured files with defined schemas produce higher-quality persistent context than unconstrained agent output.

            Invariant 5: Local-First / Air-Gap Capable (7 rejections): Sweep's cloud-dependent architecture was rejected as fundamentally incompatible with the local-first, offline-capable model. LangGraph's managed cloud deployment was rejected because cloud dependencies for core functionality violate air-gap capability.

            Invariant 6: No Default Telemetry (4 rejections): Continue's telemetry-by-default (PostHog) was rejected because it contradicts the local-first, privacy-respecting trust model. CrewAI's global telemetry on import (Scarf tracking pixel) was rejected because it violates user trust and breaks air-gap capability.

            The remaining 9 rejections did not map to a specific invariant but were rejected on other architectural grounds: for example, Aider's full-file-content-in-context approach (which defeats token budgeting), AutoGen's multi-agent orchestration as core primitive (scope creep), and Claude Code's 30-day transcript retention limit (institutional knowledge should have no automatic expiration).

            ","path":["The Thesis"],"tags":[]},{"location":"thesis/#references","level":2,"title":"References","text":"
            1. Reproducible Builds Project, \"Reproducible Builds: Increasing the Integrity of Software Supply Chains\", 2017. https://reproducible-builds.org/docs/definition/ ↩↩↩

            2. S. McIntosh et al., \"The Impact of Build System Evolution on Software Quality\", ICSE, 2015. https://doi.org/10.1109/ICSE.2015.70 ↩

            3. C. Manning, P. Raghavan, H. Schütze, Introduction to Information Retrieval, Cambridge University Press, 2008. https://nlp.stanford.edu/IR-book/ ↩

            4. M. Nygard, \"Documenting Architecture Decisions\", Cognitect Blog, 2011. https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions ↩↩

            5. L. Torvalds et al., Git Internals - Git Objects (content-addressed storage concepts). https://git-scm.com/book/en/v2/Git-Internals-Git-Objects ↩

            6. Kief Morris, Infrastructure as Code, O'Reilly, 2016. ↩

            7. J. Kreps, \"The Log: What every software engineer should know about real-time data's unifying abstraction\", 2013. https://engineering.linkedin.com/distributed-systems/log ↩

            8. P. Hunt et al., \"ZooKeeper: Wait-free coordination for Internet-scale systems\", USENIX ATC, 2010. https://www.usenix.org/legacy/event/atc10/tech/full_papers/Hunt.pdf ↩

            ","path":["The Thesis"],"tags":[]}]} \ No newline at end of file diff --git a/site/security/agent-security/index.html b/site/security/agent-security/index.html index f1d6949ad..fce622cb3 100644 --- a/site/security/agent-security/index.html +++ b/site/security/agent-security/index.html @@ -15,7 +15,7 @@ - + @@ -252,13 +252,6 @@ - - - - - - -
          • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
          • @@ -913,6 +903,8 @@ + + @@ -979,6 +971,36 @@ + + +
          • + + + + + + + + + + Security Design + + + + + + + + +
          • + + + + + + + + @@ -1328,7 +1350,7 @@ - Security Policy + Reporting Vulnerabilities @@ -2082,7 +2104,7 @@

            Further Reading - +

          diff --git a/site/cli/backup/index.html b/site/security/design/index.html similarity index 65% rename from site/cli/backup/index.html rename to site/security/design/index.html index d6ca57c23..749b29138 100644 --- a/site/cli/backup/index.html +++ b/site/security/design/index.html @@ -12,13 +12,13 @@ - + - + - + @@ -29,7 +29,7 @@ - Backup - ctx: do you remember? + Security Design - ctx: do you remember? @@ -87,7 +87,7 @@
          - + Skip to content @@ -122,7 +122,7 @@
          - Backup + Security Design
          @@ -252,13 +252,6 @@ - - - - - - -
        • @@ -270,9 +263,6 @@ - - - @@ -297,13 +287,11 @@ - - -
        • - +
        • + CLI @@ -356,11 +344,13 @@ + + -
        • - +
        • + Security @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
        • @@ -686,8 +676,6 @@ - - @@ -704,80 +692,6 @@ - - - - - - - - - - - - - - - - - - - -
        • - - - - - - - - -
        • +

        +

    + - - - - - - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - - - - Shell - - - - - - - - - - - - +
    +
    +
    + - -
  • - - - - - - - - - - - - +
    -
    -
    - - - -
    -
    -
    - - - - - -
      +
    • - + - ctx backup + Best Practices @@ -1725,25 +1421,7 @@ - CLI - - - -
    • - - - - - - - - - -
    • - - - - Runtime + Security @@ -1763,81 +1441,162 @@ -

      Backup

      +

      Security Design

      ctx

      -

      ctx backup

      -

      Create timestamped tar.gz archives of project context and/or global -Claude Code data. Optionally copies archives to an SMB share via GVFS.

      -
      ctx backup [flags]
      -
      -

      Flags:

      - - - - - - - - - - - - - - - - - -
      FlagDescription
      --scopeBackup scope: project, global, or all (default: all)
      --jsonOutput results as JSON
      -

      Scopes:

      - - - - - - - - - - - - - - - - - - - - - -
      ScopeWhat's archived
      project.context/, .claude/, ideas/, ~/.bashrc
      global~/.claude/ (excludes todos/)
      allBoth project and global (default)
      -

      Environment:

      - - - - - - - - - - - - - - - - - -
      VariablePurpose
      CTX_BACKUP_SMB_URLSMB share URL (e.g. smb://host/share)
      CTX_BACKUP_SMB_SUBDIRSubdirectory on share (default: ctx-sessions)
      -

      Examples:

      -
      ctx backup                       # Back up everything (default: all)
      -ctx backup --scope project       # Project context only
      -ctx backup --scope global        # Global Claude data only
      -ctx backup --scope all --json    # Both, JSON output
      +

      How ctx thinks about security: trust boundaries, what the system +does and does not do for you, the engineering principle behind the +audit trail, and the permission hygiene workflow.

      +

      For vulnerability disclosure, see +Reporting Vulnerabilities.

      +

      Trust Model

      +

      ctx operates within a single trust boundary: the local +filesystem.

      +

      The person who authors .context/ files is the same person who runs +the agent that reads them. There is no remote input, no shared state, +and no server component.

      +

      This means:

      +
        +
      • ctx does not sanitize context files for prompt injection. This + is a deliberate design choice, not an oversight. The files are + authored by the developer who owns the machine: sanitizing their + own instructions back to them would be counterproductive.
      • +
      • If you place adversarial instructions in your own .context/ + files, your agent will follow them. This is expected behavior. + You control the context; the agent trusts it.
      • +
      +
      +

      Shared Repositories

      +

      In shared repositories, .context/ files should be reviewed in +code review (the same way you would review CI/CD config or +Makefiles). A malicious contributor could add harmful +instructions to CONSTITUTION.md or TASKS.md.

      +
      +

      What ctx Does for Security

      +

      ctx is designed with security in mind:

      +
        +
      • No secrets in context: The constitution explicitly forbids + storing secrets, tokens, API keys, or credentials in .context/ + files.
      • +
      • Local only: ctx runs entirely locally with no external + network calls.
      • +
      • No code execution: ctx reads and writes Markdown files only; + it does not execute arbitrary code.
      • +
      • Git-tracked: Core context files are meant to be committed, so + they should never contain sensitive data. Exception: sessions/ + and journal/ contain raw conversation data and should be + gitignored.
      • +
      +

      Permission Hygiene

      +

      Claude Code evaluates permissions in deny → ask → allow order. +ctx init automatically populates permissions.deny with rules +that block dangerous operations before the allow list is ever +consulted.

      +

      Default deny rules block:

      +
        +
      • sudo, git push, rm -rf /, rm -rf ~, curl, wget, + chmod 777
      • +
      • Read / Edit of .env, credentials, secrets, .pem, .key + files
      • +
      +

      Even with deny rules in place, the allow list accumulates one-off +permissions over time. Periodically review for:

      +
        +
      • Destructive commands: git reset --hard, git clean -f, etc.
      • +
      • Config injection vectors: permissions that allow modifying + files controlling agent behavior (CLAUDE.md, + settings.local.json).
      • +
      • Broad wildcards: overly permissive patterns that pre-approve + more than intended.
      • +
      +

      For the full hygiene workflow, see the +Claude Code Permission Hygiene +recipe.

      +

      State File Management

      +

      Hook state files (throttle markers, prompt counters, pause markers) +are stored in .context/state/, which is project-scoped and +gitignored. State files are automatically managed by the hooks that +create them; no manual cleanup is needed.

      +

      Log-First Audit Trail

      +

      The event log (.context/state/events.jsonl) is the authoritative +record of what ctx hooks did during a session. Several +audit-adjacent features depend on that log being trustworthy, not +merely best-effort:

      +
        +
      • ctx event / ctx system view-events replays session history + from the log.
      • +
      • Webhook notifications give operators a real-time signal that + assumes every notification corresponds to a logged event.
      • +
      • Drift, freshness, and map-staleness checks count events over + time and surface regressions.
      • +
      +

      A log that silently drops entries while the rest of the system +claims success is worse than no log at all: operators see a green +TUI and a webhook notification and conclude "it happened," even +when the audit trail never landed. The codebase treats this as a +correctness problem, not a UX polish problem.

      +

      The Rule

      +
      +

      Any code path that emits an observable side effect (webhook, +stdout marker, throttle-file touch, state mutation) must append +the corresponding event-log entry first and gate the side +effect on the append succeeding. If the log write fails, the +side effect must not fire.

      +
      +

      In code, this shape:

      +
      if appendErr := event.Append(channel, msg, sessionID, ref); appendErr != nil {
      +    return appendErr // do NOT send the webhook or touch the marker
      +}
      +if sendErr := notify.Send(channel, msg, sessionID, ref); sendErr != nil {
      +    return sendErr
      +}
      +// downstream side effects (marker touch, stdout, etc.)
       
      +

      The nudge.Relay helper in internal/cli/system/core/nudge +enforces this for the common "log + webhook" pair. Hook Run +functions that compose their own sequence (session_event, +heartbeat, several check_* hooks) follow the same ordering +explicitly.

      +

      Known Gaps

      +
        +
      • Nudge webhooks have no log channel. nudge.EmitAndRelay + sends a "nudge" notification before the "relay" event is logged. + The nudge leg is fire-and-forget because no event-log channel + records nudges today. A future refactor may add one; until then + this is the one documented exception.
      • +
      • ctx agent --cooldown and ctx doctor propagate rather than + gate. They surface real errors to the caller (usually Cobra) + rather than deciding what to do with them locally. Editors that + invoke these commands may display errors in an ugly way; the + ugliness is the correct signal (something persisted is broken), + not a defect to smooth over.
      • +
      • Verbose hook logs in core/log.Message stay best-effort. + That logger captures per-hook activity (how many prompts, which + percent, etc.) for debugging; it is NOT the event audit trail. + Its failures go to stderr via log/warn.Warn rather than + propagating, because losing an operational log line is not a + correctness problem.
      • +
      +

      Background

      +

      The error returns on event.Append, io.AppendBytes, +nudge.Relay, and cooldown.Active / cooldown.TouchTombstone +were introduced as part of the resolver-tightening refactor. +Before that change, most hook paths called these helpers and +silently discarded their errors. The principle above was extracted +from the observation that every user-visible correctness problem +hit during the refactor traced back to some function saying "this +succeeded" when the underlying write never landed.

      +

      Best Practices

      +
        +
      1. Review before committing: Always review .context/ files + before committing.
      2. +
      3. Use .gitignore: If you must store sensitive notes locally, + add them to .gitignore.
      4. +
      5. Drift detection: Run ctx drift to check for potential + issues.
      6. +
      7. Permission audit: Review .claude/settings.local.json after + busy sessions.
      8. +
      @@ -1880,7 +1639,7 @@

      ctx backup - +

      - +
    • @@ -270,9 +263,6 @@ - - - @@ -548,6 +538,10 @@ + + + + @@ -577,14 +571,12 @@ - - - - + + @@ -602,8 +594,6 @@ - -
    • @@ -913,6 +903,8 @@ + + @@ -981,6 +973,36 @@ +
    • + + + + + + + + + + Security Design + + + + + + + + +
    • + + + + + + + + + +
    • @@ -1239,7 +1261,7 @@ - Security Policy + Reporting Vulnerabilities @@ -1706,13 +1728,13 @@

      See Also +